From d4a704689a038a8ccca185fbc4a877564ce037cc Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 18 Dec 2024 03:15:01 -0700 Subject: [PATCH 1/9] initial changes --- .../FixedBufferOnnxValue.shared.cs | 21 +++ .../Microsoft.ML.OnnxRuntime.csproj | 4 + .../NamedOnnxValue.shared.cs | 22 +++ .../OrtValue.shared.cs | 169 +++++++++++++++++ .../InferenceTest.netcore.cs | 178 ++++++++++++++++++ 5 files changed, 394 insertions(+) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs index c8b62b145acaf..d54d9b677be60 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs @@ -4,6 +4,11 @@ using Microsoft.ML.OnnxRuntime.Tensors; using System; +#if NET8_0 +using DotnetTensors = System.Numerics.Tensors; +using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; +#endif + namespace Microsoft.ML.OnnxRuntime { /// @@ -39,6 +44,22 @@ public static FixedBufferOnnxValue CreateFromTensor(Tensor value) return new FixedBufferOnnxValue(ref ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType); } +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + /// + /// Creates a object from the tensor and pins its underlying buffer. + /// + /// + /// + /// a disposable instance of FixedBufferOnnxValue + public static FixedBufferOnnxValue CreateFromDotnetTensor(DotnetTensors.Tensor value) where T : unmanaged + { + var ortValue = OrtValue.CreateTensorValueFromDotnetTensorObject(value); + return new FixedBufferOnnxValue(ref ortValue, OnnxValueType.ONNX_TYPE_TENSOR, TensorBase.GetTypeInfo(typeof(T)).ElementType); + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback +#endif + /// /// This is a factory method that creates a disposable instance of FixedBufferOnnxValue /// on top of a buffer. Internally, it will pin managed buffer and will create diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj b/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj index 63131d05c03d5..0dbfd947e822c 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj +++ b/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj @@ -184,6 +184,10 @@ + + + + diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs index 48a10455588bc..8908e77c3fc50 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs @@ -8,6 +8,11 @@ using System.Diagnostics; using System.Linq; +#if NET8_0 +using DotnetTensors = System.Numerics.Tensors; +using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; +#endif + namespace Microsoft.ML.OnnxRuntime { /// @@ -140,6 +145,23 @@ public static NamedOnnxValue CreateFromTensor(string name, Tensor value) return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR); } +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + /// + /// This is a factory method that instantiates NamedOnnxValue + /// and associated name with an instance of a Tensor + /// + /// + /// name + /// Tensor + /// + public static NamedOnnxValue CreateFromDotnetTensor(string name, DotnetTensors.Tensor value) + { + return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR); + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback +#endif + /// /// This is a factory method that instantiates NamedOnnxValue. /// It would contain a sequence of elements diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index d38748c2f97cc..5cc1ae6d43fc6 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -6,9 +6,15 @@ using System.Buffers; using System.Collections.Generic; using System.Diagnostics; +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; +#if NET8_0 +using DotnetTensors = System.Numerics.Tensors; +using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; +#endif + namespace Microsoft.ML.OnnxRuntime { /// @@ -205,6 +211,38 @@ public ReadOnlySpan GetTensorDataAsSpan() where T : unmanaged return MemoryMarshal.Cast(byteSpan); } +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + /// + /// Returns a ReadOnlyTensorSpan over tensor native buffer that + /// provides a read-only view. + /// + /// Note, that the memory may be device allocated and, therefore, not accessible from the CPU. + /// To get memory descriptor use GetTensorMemoryInfo(). + /// + /// OrtValue must contain a non-string tensor. + /// The span is valid as long as the OrtValue instance is alive (not disposed). + /// + /// + /// ReadOnlySpan + /// + public DotnetTensors.ReadOnlyTensorSpan GetTensorDataAsTensorSpan() where T : unmanaged + { + var byteSpan = GetTensorBufferRawData(typeof(T)); + + var typeSpan = MemoryMarshal.Cast(byteSpan); + var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; + var nArray = new nint[shape.Length]; + for (int i = 0; i < shape.Length; i++) + { + nArray[i] = (nint)shape[i]; + } + + return new DotnetTensors.ReadOnlyTensorSpan(typeSpan, nArray, []); + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback +#endif + /// /// Returns a Span over tensor native buffer. /// This enables you to safely and efficiently modify the underlying @@ -225,6 +263,37 @@ public Span GetTensorMutableDataAsSpan() where T : unmanaged return MemoryMarshal.Cast(byteSpan); } +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + /// + /// Returns a TensorSpan over tensor native buffer. + /// + /// Note, that the memory may be device allocated and, therefore, not accessible from the CPU. + /// To get memory descriptor use GetTensorMemoryInfo(). + /// + /// OrtValue must contain a non-string tensor. + /// The span is valid as long as the OrtValue instance is alive (not disposed). + /// + /// + /// ReadOnlySpan + /// + public DotnetTensors.TensorSpan GetTensorMutableDataAsTensorSpan() where T : unmanaged + { + var byteSpan = GetTensorBufferRawData(typeof(T)); + + var typeSpan = MemoryMarshal.Cast(byteSpan); + var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; + var nArray = new nint[shape.Length]; + for (int i = 0; i < shape.Length; i++) + { + nArray[i] = (nint)shape[i]; + } + + return new DotnetTensors.TensorSpan(typeSpan, nArray, []); + } +#pragma warning restore SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. +#endif + /// /// Provides mutable raw native buffer access. /// @@ -234,6 +303,28 @@ public Span GetTensorMutableRawData() return GetTensorBufferRawData(typeof(byte)); } +#if NET8_0 +#pragma warning disable SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. + /// + /// Provides mutable raw native buffer access. + /// + /// TensorSpan over the native buffer bytes + public DotnetTensors.TensorSpan GetTensorSpanMutableRawData() where T : unmanaged + { + var byteSpan = GetTensorBufferRawData(typeof(T)); + + var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; + var nArray = new nint[shape.Length]; + for (int i = 0; i < shape.Length; i++) + { + nArray[i] = (nint)shape[i]; + } + + return new DotnetTensors.TensorSpan(byteSpan, nArray, []); + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback +#endif + /// /// Fetch string tensor element buffer pointer at the specified index, /// convert/copy to UTF-16 char[] and return a ReadOnlyMemory{char} instance. @@ -605,6 +696,84 @@ public static OrtValue CreateTensorValueFromMemory(T[] data, long[] shape) wh return OrtValue.CreateTensorValueFromMemory(OrtMemoryInfo.DefaultInstance, new Memory(data), shape); } +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback + /// + /// This is a factory method creates a native Onnxruntime OrtValue containing a tensor. + /// The method will attempt to pin managed memory so no copying occurs when data is passed down + /// to native code. + /// + /// Tensor object + /// discovered tensor element type + /// And instance of OrtValue constructed on top of the object + public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors.Tensor tensor) where T : unmanaged + { + if (!IsContiguousAndDense(tensor)) + { + var newTensor = DotnetTensors.Tensor.Create(tensor.Lengths); + tensor.CopyTo(newTensor); + tensor = newTensor; + } + unsafe + { + GCHandle handle = GCHandle.Alloc(tensor, GCHandleType.Pinned); + var memHandle = new MemoryHandle(Unsafe.AsPointer(ref tensor.GetPinnableReference()), handle); + + try + { + IntPtr dataBufferPointer = IntPtr.Zero; + unsafe + { + dataBufferPointer = (IntPtr)memHandle.Pointer; + } + + var bufferLengthInBytes = tensor.FlattenedLength * sizeof(T); + + var shape = new long[tensor.Rank]; + for (int i = 0; i < shape.Length; i++) + { + shape[i] = tensor.Lengths[i]; + } + + var typeInfo = TensorBase.GetTypeInfo(typeof(T)) ?? + throw new OnnxRuntimeException(ErrorCode.InvalidArgument, $"Tensor of type: {typeof(T)} is not supported"); + + NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateTensorWithDataAsOrtValue( + OrtMemoryInfo.DefaultInstance.Pointer, + dataBufferPointer, + (UIntPtr)(bufferLengthInBytes), + shape, + (UIntPtr)tensor.Rank, + typeInfo.ElementType, + out IntPtr nativeValue)); + + return new OrtValue(nativeValue, memHandle); + } + catch (Exception) + { + memHandle.Dispose(); + throw; + } + } + } + + private static bool IsContiguousAndDense(DotnetTensors.Tensor tensor) + { + // Right most dimension must be 1 for a dense tensor. + if (tensor.Strides[^1] != 1) + return false; + + // For other dimensions, the stride must be equal to the product of the dimensions to the right. + for (int i = tensor.Rank - 2; i >= 0; i--) + { + if (tensor.Strides[i] != TensorPrimitives.Product(tensor.Lengths.Slice(i + 1, tensor.Lengths.Length - i - 1))) + return false; + } + return true; + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback +#endif + /// /// The factory API creates an OrtValue with memory allocated using the given allocator /// according to the specified shape and element type. The memory will be released when OrtValue diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs index ff5fd2de54197..37ca6111b3e6d 100644 --- a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs +++ b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs @@ -7,6 +7,11 @@ using System.Text.RegularExpressions; using Xunit; +#if NET8_0_OR_GREATER +using DotnetTensors = System.Numerics.Tensors; +using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; +#endif + namespace Microsoft.ML.OnnxRuntime.Tests { /// @@ -67,6 +72,179 @@ public void CanCreateAndDisposeSessionWithModelPath() } } +#if NET8_0_OR_GREATER +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + [Theory(DisplayName = "CanRunInferenceOnAModel")] + [InlineData(GraphOptimizationLevel.ORT_DISABLE_ALL, true)] + [InlineData(GraphOptimizationLevel.ORT_DISABLE_ALL, false)] + [InlineData(GraphOptimizationLevel.ORT_ENABLE_EXTENDED, true)] + [InlineData(GraphOptimizationLevel.ORT_ENABLE_EXTENDED, false)] + private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOptimizationLevel, bool enableParallelExecution) + { + var model = TestDataLoader.LoadModelFromEmbeddedResource("squeezenet.onnx"); + + using (var cleanUp = new DisposableListTest()) + { + // Set the graph optimization level for this session. + SessionOptions options = new SessionOptions(); + cleanUp.Add(options); + options.GraphOptimizationLevel = graphOptimizationLevel; + if (enableParallelExecution) options.ExecutionMode = ExecutionMode.ORT_PARALLEL; + + var session = new InferenceSession(model, options); + cleanUp.Add(session); + + var inputMeta = session.InputMetadata; + var outputMeta = session.OutputMetadata; + var container = new List(); + + float[] expectedOutput = TestDataLoader.LoadTensorFromEmbeddedResource("bench.expected_out"); + int[] expectedDimensions = { 1, 1000, 1, 1 }; // hardcoded for now for the test data + ReadOnlySpan expectedOutputDimensions = expectedDimensions; + string[] expectedOutputNames = new string[] { "softmaxout_1" }; + + float[] inputData = TestDataLoader.LoadTensorFromEmbeddedResource("bench.in"); // this is the data for only one input tensor for this model + + foreach (var name in inputMeta.Keys) + { + Assert.Equal(typeof(float), inputMeta[name].ElementType); + Assert.True(inputMeta[name].IsTensor); + nint[] dims = inputMeta[name].Dimensions.Select(x => (nint)x).ToArray(); + var tensor = DotnetTensors.Tensor.Create(inputData, dims); + + container.Add(NamedOnnxValue.CreateFromDotnetTensor(name, tensor)); + } + + // Run inference with named inputs and outputs created with in Run() + using (var results = session.Run(container)) // results is an IReadOnlyList container + { + ValidateRunResults(results); + } + + // Run inference with named inputs, outputs created with in Run() and RunOptions + using (var runOptions = new RunOptions()) + { + runOptions.LogId = "CsharpTest"; + runOptions.Terminate = false; // TODO: Test terminate = true, it currently crashes + runOptions.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_ERROR; + IReadOnlyCollection outputNames = session.OutputMetadata.Keys.ToList(); + + using (var results = session.Run(container, outputNames, runOptions)) // results is an IReadOnlyList container + { + ValidateRunResults(results); + } + } + + // Run inference with pinned inputs and outputs created with in Run() + using (var pinnedInputs = new DisposableListTest()) + { + var inputNames = container.Select(i => i.Name).ToArray(); + pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromTensor(i.AsTensor()))); + + // output names not specified + using (var results = session.Run(inputNames, pinnedInputs)) // results is an IReadOnlyList container + { + ValidateRunResults(results); + } + + // output names specified explicitly + using (var results = session.Run(inputNames, pinnedInputs, expectedOutputNames)) // results is an IReadOnlyList container + { + ValidateRunResults(results); + } + } + + // Run inference with outputs pinned from buffers + using (var pinnedInputs = new DisposableListTest()) + using (var pinnedOutputs = new DisposableListTest()) + { + var memInfo = OrtMemoryInfo.DefaultInstance; // CPU + + // Create inputs + Assert.Single(inputMeta.Keys); + var inputNames = inputMeta.Keys.ToArray(); + var inputName = inputNames[0]; + Assert.Equal(typeof(float), inputMeta[inputName].ElementType); + Assert.True(inputMeta[inputName].IsTensor); + var longShape = Array.ConvertAll(inputMeta[inputName].Dimensions, Convert.ToInt64); + var byteSize = ShapeUtils.GetSizeForShape(longShape); + pinnedInputs.Add(FixedBufferOnnxValue.CreateFromMemory(memInfo, inputData, + TensorElementType.Float, longShape, byteSize)); + + + // Prepare output buffer + Assert.Single(outputMeta.Keys); + var outputNames = outputMeta.Keys.ToArray(); + var outputName = outputNames[0]; + Assert.Equal(typeof(float), outputMeta[outputName].ElementType); + Assert.True(outputMeta[outputName].IsTensor); + longShape = Array.ConvertAll(outputMeta[outputName].Dimensions, Convert.ToInt64); + byteSize = ShapeUtils.GetSizeForShape(longShape); + float[] outputBuffer = new float[expectedOutput.Length]; + pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromMemory(memInfo, outputBuffer, + TensorElementType.Float, longShape, byteSize)); + + session.Run(inputNames, pinnedInputs, outputNames, pinnedOutputs); + Assert.Equal(expectedOutput, outputBuffer, new FloatComparer()); + } + + // Run inference with named inputs and named outputs + { + // correct pre-allocated outputs + var expectedOutputValues = new List() + { + NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray())) + }; + session.Run(container, expectedOutputValues); + ValidateRunResultData(expectedOutputValues[0].AsTensor(), expectedOutput, expectedDimensions); + } + + // Run inference with pinned inputs and named outputs + using (var pinnedInputs = new DisposableListTest()) + { + var inputNames = container.Select(i => i.Name).ToArray(); + pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromTensor(i.AsTensor()))); + + // expected inputs and outputs + var expectedOutputValues = new List() + { + NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray())) + }; + session.Run(inputNames, pinnedInputs, expectedOutputValues); + ValidateRunResultData(expectedOutputValues[0].AsTensor(), expectedOutput, expectedDimensions); + } + + // Run inference with named inputs and pinned outputs + { + // correct pre-allocated outputs + using (var pinnedOutputs = new DisposableListTest()) + { + var outputTensor = DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray()); + pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(outputTensor)); + session.Run(container, expectedOutputNames, pinnedOutputs); + //ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); + } + } + + // Run inference with pinned inputs and pinned outputs + using (DisposableListTest pinnedInputs = new DisposableListTest(), + pinnedOutputs = new DisposableListTest()) + { + var inputNames = container.Select(i => i.Name).ToArray(); + pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromTensor(i.AsTensor()))); + + var outputTensor = DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray()); + pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(outputTensor)); + + session.Run(inputNames, pinnedInputs, expectedOutputNames, pinnedOutputs); + //ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); + } + } + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback +#endif + + #if USE_CUDA [Fact(DisplayName = "TestCUDAProviderOptions")] private void TestCUDAProviderOptions() From 7d2e575c772731a42e6fa581017b488c2d46cbb3 Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 18 Dec 2024 11:07:27 -0700 Subject: [PATCH 2/9] more changes --- .../ManagedProjections.shared.cs | 37 +++- .../NamedOnnxValue.shared.cs | 15 ++ .../OrtValue.shared.cs | 31 +-- .../InferenceTest.netcore.cs | 182 ++++++++++++++++-- 4 files changed, 227 insertions(+), 38 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs index 13117f23e8ef9..9f6469cbcfad1 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs @@ -6,6 +6,13 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using System.Reflection; + + +#if NET8_0 +using DotnetTensors = System.Numerics.Tensors; +using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; +#endif namespace Microsoft.ML.OnnxRuntime { @@ -166,13 +173,41 @@ private static OrtValue CreateMapProjection(NamedOnnxValue node, NodeMetadata el /// private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata elementMeta) { - if (node.Value is not TensorBase) +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + if (node.Value is not TensorBase && node.Value.GetType().GetGenericTypeDefinition() != typeof(DotnetTensors.Tensor<>)) { throw new OnnxRuntimeException(ErrorCode.InvalidArgument, $"NamedOnnxValue contains: {node.Value.GetType()}, expecting a Tensor"); } + OrtValue ortValue; + TensorElementType elementType; + + if (node.Value is TensorBase) + { + ortValue = OrtValue.CreateFromTensorObject(node.Value as TensorBase, out elementType); + } + else + { + MethodInfo method = typeof(OrtValue).GetMethod(nameof(OrtValue.CreateTensorValueFromDotnetTensorObject), BindingFlags.Static | BindingFlags.Public); + Type tensorType = node.Value.GetType().GetGenericArguments()[0]; + MethodInfo generic = method.MakeGenericMethod(tensorType); + ortValue = (OrtValue)generic.Invoke(null, [node.Value]); + elementType = TensorBase.GetTypeInfo(tensorType).ElementType; + } + + +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback +#else + if (node.Value is not TensorBase) + { + throw new OnnxRuntimeException(ErrorCode.InvalidArgument, + $"NamedOnnxValue contains: {node.Value.GetType()}, expecting a Tensor"); + } OrtValue ortValue = OrtValue.CreateFromTensorObject(node.Value as TensorBase, out TensorElementType elementType); + +#endif try { if (elementType != elementMeta.ElementDataType) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs index 8908e77c3fc50..50749695c565b 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs @@ -218,6 +218,21 @@ public Tensor AsTensor() return _value as Tensor; // will return null if not castable } + +#if NET8_0 +#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback + /// + /// Try-get value as a Tensor<T>. + /// + /// Type + /// Tensor object if contained value is a Tensor. Null otherwise + public DotnetTensors.Tensor AsDotnetTensor() + { + return _value as DotnetTensors.Tensor; // will return null if not castable + } +#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback +#endif + /// /// Try-get value as an Enumerable<T>. /// T is usually a NamedOnnxValue instance that may contain diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index 5cc1ae6d43fc6..2031291dd1af7 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -6,6 +6,8 @@ using System.Buffers; using System.Collections.Generic; using System.Diagnostics; +using System.Linq; +using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; @@ -232,11 +234,7 @@ public DotnetTensors.ReadOnlyTensorSpan GetTensorDataAsTensorSpan() where var typeSpan = MemoryMarshal.Cast(byteSpan); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; - var nArray = new nint[shape.Length]; - for (int i = 0; i < shape.Length; i++) - { - nArray[i] = (nint)shape[i]; - } + var nArray = shape.Select(x => (nint)x).ToArray(); return new DotnetTensors.ReadOnlyTensorSpan(typeSpan, nArray, []); } @@ -283,11 +281,7 @@ public DotnetTensors.TensorSpan GetTensorMutableDataAsTensorSpan() where T var typeSpan = MemoryMarshal.Cast(byteSpan); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; - var nArray = new nint[shape.Length]; - for (int i = 0; i < shape.Length; i++) - { - nArray[i] = (nint)shape[i]; - } + var nArray = shape.Select(x => (nint)x).ToArray(); return new DotnetTensors.TensorSpan(typeSpan, nArray, []); } @@ -314,11 +308,7 @@ public DotnetTensors.TensorSpan GetTensorSpanMutableRawData() where T : var byteSpan = GetTensorBufferRawData(typeof(T)); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; - var nArray = new nint[shape.Length]; - for (int i = 0; i < shape.Length; i++) - { - nArray[i] = (nint)shape[i]; - } + var nArray = shape.Select(x => (nint)x).ToArray(); return new DotnetTensors.TensorSpan(byteSpan, nArray, []); } @@ -716,7 +706,10 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. } unsafe { - GCHandle handle = GCHandle.Alloc(tensor, GCHandleType.Pinned); + var field = tensor.GetType().GetFields(BindingFlags.Instance | BindingFlags.NonPublic).Where(x => x.Name == "_values").FirstOrDefault(); + var backingData = (T[])field.GetValue(tensor); + GCHandle handle = GCHandle.Alloc(backingData, GCHandleType.Pinned); + //GCHandle handle = GCHandle.Alloc(tensor.GetPinnableReference(), GCHandleType.Pinned); var memHandle = new MemoryHandle(Unsafe.AsPointer(ref tensor.GetPinnableReference()), handle); try @@ -729,11 +722,7 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. var bufferLengthInBytes = tensor.FlattenedLength * sizeof(T); - var shape = new long[tensor.Rank]; - for (int i = 0; i < shape.Length; i++) - { - shape[i] = tensor.Lengths[i]; - } + var shape = tensor.Lengths.ToArray().Select(x => (long)x).ToArray(); var typeInfo = TensorBase.GetTypeInfo(typeof(T)) ?? throw new OnnxRuntimeException(ErrorCode.InvalidArgument, $"Tensor of type: {typeof(T)} is not supported"); diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs index 37ca6111b3e6d..169ceec7407bb 100644 --- a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs +++ b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs @@ -74,7 +74,7 @@ public void CanCreateAndDisposeSessionWithModelPath() #if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback - [Theory(DisplayName = "CanRunInferenceOnAModel")] + [Theory] [InlineData(GraphOptimizationLevel.ORT_DISABLE_ALL, true)] [InlineData(GraphOptimizationLevel.ORT_DISABLE_ALL, false)] [InlineData(GraphOptimizationLevel.ORT_ENABLE_EXTENDED, true)] @@ -139,7 +139,7 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp using (var pinnedInputs = new DisposableListTest()) { var inputNames = container.Select(i => i.Name).ToArray(); - pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromTensor(i.AsTensor()))); + pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromDotnetTensor(i.AsDotnetTensor()))); // output names not specified using (var results = session.Run(inputNames, pinnedInputs)) // results is an IReadOnlyList container @@ -166,10 +166,8 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp var inputName = inputNames[0]; Assert.Equal(typeof(float), inputMeta[inputName].ElementType); Assert.True(inputMeta[inputName].IsTensor); - var longShape = Array.ConvertAll(inputMeta[inputName].Dimensions, Convert.ToInt64); - var byteSize = ShapeUtils.GetSizeForShape(longShape); - pinnedInputs.Add(FixedBufferOnnxValue.CreateFromMemory(memInfo, inputData, - TensorElementType.Float, longShape, byteSize)); + var shape = inputMeta[inputName].Dimensions.Select(x => (nint)x).ToArray(); + pinnedInputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(DotnetTensors.Tensor.Create(inputData, shape))); // Prepare output buffer @@ -178,11 +176,9 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp var outputName = outputNames[0]; Assert.Equal(typeof(float), outputMeta[outputName].ElementType); Assert.True(outputMeta[outputName].IsTensor); - longShape = Array.ConvertAll(outputMeta[outputName].Dimensions, Convert.ToInt64); - byteSize = ShapeUtils.GetSizeForShape(longShape); float[] outputBuffer = new float[expectedOutput.Length]; - pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromMemory(memInfo, outputBuffer, - TensorElementType.Float, longShape, byteSize)); + shape = outputMeta[outputName].Dimensions.Select(x => (nint)x).ToArray(); + pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(DotnetTensors.Tensor.Create(outputBuffer, shape))); session.Run(inputNames, pinnedInputs, outputNames, pinnedOutputs); Assert.Equal(expectedOutput, outputBuffer, new FloatComparer()); @@ -196,14 +192,14 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray())) }; session.Run(container, expectedOutputValues); - ValidateRunResultData(expectedOutputValues[0].AsTensor(), expectedOutput, expectedDimensions); + ValidateRunResultData(expectedOutputValues[0].AsDotnetTensor(), expectedOutput, expectedDimensions); } // Run inference with pinned inputs and named outputs using (var pinnedInputs = new DisposableListTest()) { var inputNames = container.Select(i => i.Name).ToArray(); - pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromTensor(i.AsTensor()))); + pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromDotnetTensor(i.AsDotnetTensor()))); // expected inputs and outputs var expectedOutputValues = new List() @@ -211,7 +207,7 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray())) }; session.Run(inputNames, pinnedInputs, expectedOutputValues); - ValidateRunResultData(expectedOutputValues[0].AsTensor(), expectedOutput, expectedDimensions); + ValidateRunResultData(expectedOutputValues[0].AsDotnetTensor(), expectedOutput, expectedDimensions); } // Run inference with named inputs and pinned outputs @@ -222,7 +218,7 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp var outputTensor = DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray()); pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(outputTensor)); session.Run(container, expectedOutputNames, pinnedOutputs); - //ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); + ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); } } @@ -231,16 +227,151 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp pinnedOutputs = new DisposableListTest()) { var inputNames = container.Select(i => i.Name).ToArray(); - pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromTensor(i.AsTensor()))); + pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromDotnetTensor(i.AsDotnetTensor()))); var outputTensor = DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray()); pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(outputTensor)); session.Run(inputNames, pinnedInputs, expectedOutputNames, pinnedOutputs); - //ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); + ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); } } } + + [Fact] + public void InferenceSessionDisposedDotnetTensors() + { + var model = TestDataLoader.LoadModelFromEmbeddedResource("squeezenet.onnx"); + + // Set the graph optimization level for this session. + using (SessionOptions options = new SessionOptions()) + { + options.ProfileOutputPathPrefix = "Ort_P_"; + options.EnableProfiling = true; + using (var session = new InferenceSession(model, options)) + { + var inputMeta = session.InputMetadata; + var container = new List(); + + float[] inputData = TestDataLoader.LoadTensorFromEmbeddedResource("bench.in"); // this is the data for only one input tensor for this model + + foreach (var name in inputMeta.Keys) + { + Assert.Equal(typeof(float), inputMeta[name].ElementType); + Assert.True(inputMeta[name].IsTensor); + var tensor = DotnetTensors.Tensor.Create(inputData, inputMeta[name].Dimensions.Select(x => (nint) x).ToArray()); + container.Add(NamedOnnxValue.CreateFromDotnetTensor(name, tensor)); + } + + // Run inference with named inputs and outputs created with in Run() + using (var results = session.Run(container)) // results is an IReadOnlyList container + { + ValidateRunResults(results); + } + + string profile_file = session.EndProfiling(); + + // Profile file should have the output path prefix in it + Assert.Contains("Ort_P_", profile_file); + } + } + } + + [Fact] + private void ThrowWrongInputTypeDotnetTensors() + { + var tuple = OpenSessionSqueezeNet(); + var session = tuple.Item1; + var inputData = tuple.Item2; + var inputMeta = session.InputMetadata; + var container = new List(); + int[] inputDataInt = inputData.Select(x => (int)x).ToArray(); + var tensor = DotnetTensors.Tensor.Create(inputDataInt, inputMeta["data_0"].Dimensions.Select(x => (nint)x).ToArray()); + container.Add(NamedOnnxValue.CreateFromDotnetTensor("data_0", tensor)); + var ex = Assert.Throws(() => session.Run(container)); + var msg = ex.ToString(); + Assert.Contains("Tensor element data type discovered", msg); + session.Dispose(); + } + + [Fact] + private void ThrowWrongOutputNameDotnetTensors() + { + var tuple = OpenSessionSqueezeNet(); + var session = tuple.Item1; + var inputData = tuple.Item2; + var inputTensor = tuple.Item3; + var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; + var outputTensor = DotnetTensors.Tensor.Create([ 1, 2 ], [2]); + var bad_names = new string[] { "bad_output_name" }; + var ex = Assert.Throws(() => session.Run(inputs, bad_names)); + Assert.Contains("Output name: 'bad_output_name' is not in the metadata", ex.Message); + session.Dispose(); + } + + [Fact] + private void ThrowWrongOutputTypeDotnetTensors() + { + var tuple = OpenSessionSqueezeNet(); + var session = tuple.Item1; + var inputData = tuple.Item2; + var inputTensor = tuple.Item3; + var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; + var outputTensor = DotnetTensors.Tensor.Create( [ 1, 1000, 1, 1 ], [4]); + var outputs = new List { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) }; + var ex = Assert.Throws(() => session.Run(inputs, outputs)); + // TODO: check exception message + // InferenceSession::ValidateOutputs() does not check type so far. Currently this will finally trigger an error in Softmax. + session.Dispose(); + } + + [Fact] + private void ThrowWrongOutputDimensionDotnetTensors() + { + var tuple = OpenSessionSqueezeNet(); + var session = tuple.Item1; + var inputData = tuple.Item2; + var inputTensor = tuple.Item3; + var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; + var outputTensor = new DenseTensor((ReadOnlySpan)new[] { 1, 1001, 1, 1 }); + var outputs = new List { NamedOnnxValue.CreateFromTensor("softmaxout_1", outputTensor) }; + var ex = Assert.Throws(() => session.Run(inputs, outputs)); + // TODO: check exception message + // InferenceSession::ValidateOutputs() does not check dims so far. Currently this will finally trigger an error in Softmax. + session.Dispose(); + } + + [Fact] + private void ThrowNoOutputDotnetTensors() + { + var tuple = OpenSessionSqueezeNet(); + var session = tuple.Item1; + var inputData = tuple.Item2; + var inputTensor = tuple.Item3; + var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; + var outputTensor = new DenseTensor((ReadOnlySpan)new[] { 1, 1000, 1, 1 }); + var outputs = new List { NamedOnnxValue.CreateFromTensor("softmaxout_1", outputTensor) }; + var ex = Assert.Throws(() => session.Run(inputs, new NamedOnnxValue[0])); + Assert.Contains("[ErrorCode:InvalidArgument] At least one output should be requested.", ex.Message); + session.Dispose(); + } + + [Fact] + private void ThrowInconsistentPinnedOutputsDotnetTensors() + { + var tuple = OpenSessionSqueezeNet(); + var session = tuple.Item1; + var inputData = tuple.Item2; + var inputTensor = tuple.Item3; + var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; + var outputTensor = new DenseTensor((ReadOnlySpan)new[] { 1, 1000, 1, 1 }); + + using (var outputs = new DisposableListTest()) + { + var ex = Assert.Throws(() => session.Run(inputs, new string[] { "softmaxout_1" }, outputs)); + Assert.StartsWith("Length of outputNames (1) must match that of outputValues (0).", ex.Message); + } + } #pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback #endif @@ -1594,6 +1725,25 @@ private void VerifyNativeMethodsExist() } } +#if NET8_0_OR_GREATER +#pragma warning disable SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. + private void ValidateRunResultData(DotnetTensors.Tensor resultTensor, float[] expectedOutput, int[] expectedDimensions) + { + Assert.Equal(expectedDimensions.Length, resultTensor.Rank); + + var resultDimensions = resultTensor.Lengths; + for (int i = 0; i < expectedDimensions.Length; i++) + { + Assert.Equal(expectedDimensions[i], resultDimensions[i]); + } + + var resultArray = resultTensor.ToArray(); + Assert.Equal(expectedOutput.Length, resultArray.Length); + Assert.Equal(expectedOutput, resultArray, new FloatComparer()); + } +#pragma warning restore SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. +#endif + static string GetTestModelsDir() { // get build directory, append downloaded models location From 6df838b3c57c49d81ca40730e4b40fe9b96ade60 Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 18 Dec 2024 12:25:21 -0700 Subject: [PATCH 3/9] msbuild sdk extras update --- .../FixedBufferOnnxValue.shared.cs | 10 +++---- .../ManagedProjections.shared.cs | 5 ++-- .../Microsoft.ML.OnnxRuntime.csproj | 2 +- .../NamedOnnxValue.shared.cs | 30 +++++++++---------- .../OrtValue.shared.cs | 11 ++++--- .../InferenceTest.netcore.cs | 10 +++---- .../linux_pack/LinuxPackNativeNuget.csproj | 2 +- 7 files changed, 34 insertions(+), 36 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs index d54d9b677be60..6d84d8bf2a709 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs @@ -4,7 +4,7 @@ using Microsoft.ML.OnnxRuntime.Tensors; using System; -#if NET8_0 +#if NET8_0_OR_GREATER using DotnetTensors = System.Numerics.Tensors; using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; #endif @@ -14,7 +14,7 @@ namespace Microsoft.ML.OnnxRuntime /// /// This is a legacy class that is kept for backward compatibility. /// Use OrtValue based API. - /// + /// /// Represents an OrtValue with its underlying buffer pinned /// public class FixedBufferOnnxValue : IDisposable @@ -44,7 +44,7 @@ public static FixedBufferOnnxValue CreateFromTensor(Tensor value) return new FixedBufferOnnxValue(ref ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType); } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// Creates a object from the tensor and pins its underlying buffer. @@ -83,7 +83,7 @@ public static FixedBufferOnnxValue CreateFromDotnetTensor(DotnetTensors.Tenso /// Here is an example of using a 3rd party library class for processing float16/bfloat16. /// Currently, to pass tensor data and create a tensor one must copy data to Float16/BFloat16 structures /// so DenseTensor can recognize it. - /// + /// /// If you are using a library that has a class Half and it is blittable, that is its managed in memory representation /// matches native one and its size is 16-bits, you can use the following conceptual example /// to feed/fetch data for inference using Half array. This allows you to avoid copying data from your Half[] to Float16[] @@ -94,7 +94,7 @@ public static FixedBufferOnnxValue CreateFromDotnetTensor(DotnetTensors.Tenso /// var input_shape = new long[] {input.Length}; /// Half[] output = new Half[40]; // Whatever the expected len/shape is must match /// var output_shape = new long[] {output.Length}; - /// + /// /// var memInfo = OrtMemoryInfo.DefaultInstance; // CPU /// /// using(var fixedBufferInput = FixedBufferOnnxvalue.CreateFromMemory{Half}(memInfo, diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs index 9f6469cbcfad1..5a604e2a69305 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs @@ -9,7 +9,7 @@ using System.Reflection; -#if NET8_0 +#if NET8_0_OR_GREATER using DotnetTensors = System.Numerics.Tensors; using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; #endif @@ -173,7 +173,7 @@ private static OrtValue CreateMapProjection(NamedOnnxValue node, NodeMetadata el /// private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata elementMeta) { -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback if (node.Value is not TensorBase && node.Value.GetType().GetGenericTypeDefinition() != typeof(DotnetTensors.Tensor<>)) { @@ -226,4 +226,3 @@ private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata } } } - diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj b/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj index 0dbfd947e822c..00108350b6da3 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj +++ b/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj @@ -1,4 +1,4 @@ - + Microsoft.ML.OnnxRuntime diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs index 50749695c565b..deef8032e5a67 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs @@ -8,7 +8,7 @@ using System.Diagnostics; using System.Linq; -#if NET8_0 +#if NET8_0_OR_GREATER using DotnetTensors = System.Numerics.Tensors; using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; #endif @@ -35,37 +35,37 @@ internal MapHelper(TensorBase keys, TensorBase values) /// /// This is a legacy class that is kept for backward compatibility. /// Use OrtValue based API. - /// - /// The class associates a name with an Object. + /// + /// The class associates a name with an Object. /// The name of the class is a misnomer, it does not hold any Onnx values, /// just managed representation of them. - /// + /// /// The class is currently used as both inputs and outputs. Because it is non- /// disposable, it can not hold on to any native objects. - /// + /// /// When used as input, we temporarily create OrtValues that map managed inputs /// directly. Thus we are able to avoid copying of contiguous data. - /// + /// /// For outputs, tensor buffers works the same as input, providing it matches /// the expected output shape. For other types (maps and sequences) we create a copy of the data. /// This is because, the class is not Disposable and it is a public interface, thus it can not own /// the underlying OrtValues that must be destroyed before Run() returns. - /// + /// /// To avoid data copying on output, use DisposableNamedOnnxValue class that is returned from Run() methods. /// This provides access to the native memory tensors and avoids copying. - /// + /// /// It is a recursive structure that may contain Tensors (base case) /// Other sequences and maps. Although the OnnxValueType is exposed, /// the caller is supposed to know the actual data type contained. - /// + /// /// The convention is that for tensors, it would contain a DenseTensor{T} instance or /// anything derived from Tensor{T}. - /// + /// /// For sequences, it would contain a IList{T} where T is an instance of NamedOnnxValue that /// would contain a tensor or another type. - /// + /// /// For Maps, it would contain a IDictionary{K, V} where K,V are primitive types or strings. - /// + /// /// public class NamedOnnxValue { @@ -145,7 +145,7 @@ public static NamedOnnxValue CreateFromTensor(string name, Tensor value) return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR); } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// This is a factory method that instantiates NamedOnnxValue @@ -219,7 +219,7 @@ public Tensor AsTensor() } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// Try-get value as a Tensor<T>. @@ -303,7 +303,7 @@ internal virtual IntPtr OutputToOrtValueHandle(NodeMetadata metadata, out IDispo } } - throw new OnnxRuntimeException(ErrorCode.NotImplemented, + throw new OnnxRuntimeException(ErrorCode.NotImplemented, $"Can not create output OrtValue for NamedOnnxValue '{metadata.OnnxValueType}' type." + $" Only tensors can be pre-allocated for outputs " + $" Use Run() overloads that return DisposableNamedOnnxValue to get access to all Onnx value types that may be returned as output."); diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index 2031291dd1af7..70b9948f893a5 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -12,7 +12,7 @@ using System.Runtime.InteropServices; using System.Text; -#if NET8_0 +#if NET8_0_OR_GREATER using DotnetTensors = System.Numerics.Tensors; using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; #endif @@ -213,7 +213,7 @@ public ReadOnlySpan GetTensorDataAsSpan() where T : unmanaged return MemoryMarshal.Cast(byteSpan); } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// Returns a ReadOnlyTensorSpan over tensor native buffer that @@ -261,7 +261,7 @@ public Span GetTensorMutableDataAsSpan() where T : unmanaged return MemoryMarshal.Cast(byteSpan); } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// Returns a TensorSpan over tensor native buffer. @@ -297,7 +297,7 @@ public Span GetTensorMutableRawData() return GetTensorBufferRawData(typeof(byte)); } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. /// /// Provides mutable raw native buffer access. @@ -686,7 +686,7 @@ public static OrtValue CreateTensorValueFromMemory(T[] data, long[] shape) wh return OrtValue.CreateTensorValueFromMemory(OrtMemoryInfo.DefaultInstance, new Memory(data), shape); } -#if NET8_0 +#if NET8_0_OR_GREATER #pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback /// /// This is a factory method creates a native Onnxruntime OrtValue containing a tensor. @@ -709,7 +709,6 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. var field = tensor.GetType().GetFields(BindingFlags.Instance | BindingFlags.NonPublic).Where(x => x.Name == "_values").FirstOrDefault(); var backingData = (T[])field.GetValue(tensor); GCHandle handle = GCHandle.Alloc(backingData, GCHandleType.Pinned); - //GCHandle handle = GCHandle.Alloc(tensor.GetPinnableReference(), GCHandleType.Pinned); var memHandle = new MemoryHandle(Unsafe.AsPointer(ref tensor.GetPinnableReference()), handle); try diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs index 169ceec7407bb..369190010caff 100644 --- a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs +++ b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs @@ -333,8 +333,8 @@ private void ThrowWrongOutputDimensionDotnetTensors() var inputData = tuple.Item2; var inputTensor = tuple.Item3; var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = new DenseTensor((ReadOnlySpan)new[] { 1, 1001, 1, 1 }); - var outputs = new List { NamedOnnxValue.CreateFromTensor("softmaxout_1", outputTensor) }; + var outputTensor = DotnetTensors.Tensor.Create([ 1, 1001, 1, 1 ], [4]); + var outputs = new List { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) }; var ex = Assert.Throws(() => session.Run(inputs, outputs)); // TODO: check exception message // InferenceSession::ValidateOutputs() does not check dims so far. Currently this will finally trigger an error in Softmax. @@ -349,8 +349,8 @@ private void ThrowNoOutputDotnetTensors() var inputData = tuple.Item2; var inputTensor = tuple.Item3; var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = new DenseTensor((ReadOnlySpan)new[] { 1, 1000, 1, 1 }); - var outputs = new List { NamedOnnxValue.CreateFromTensor("softmaxout_1", outputTensor) }; + var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1], [4]); + var outputs = new List { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) }; var ex = Assert.Throws(() => session.Run(inputs, new NamedOnnxValue[0])); Assert.Contains("[ErrorCode:InvalidArgument] At least one output should be requested.", ex.Message); session.Dispose(); @@ -364,7 +364,7 @@ private void ThrowInconsistentPinnedOutputsDotnetTensors() var inputData = tuple.Item2; var inputTensor = tuple.Item3; var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = new DenseTensor((ReadOnlySpan)new[] { 1, 1000, 1, 1 }); + var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1], [4]); using (var outputs = new DisposableListTest()) { diff --git a/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj b/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj index 098078d2e3683..cfe76971ae793 100644 --- a/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj +++ b/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj @@ -7,7 +7,7 @@ If you need a more sophisticated package for testing, you can run the production packaging pipeline against your branch and download the resulting nuget package from the build artifacts. --> - + netstandard2.0 $(OnnxRuntimeBuildDirectory)/NativeNuget.nuspec From e1b5b2cb63a64b5241136143cb5c08be16f6f92b Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Mon, 6 Jan 2025 10:58:25 -0700 Subject: [PATCH 4/9] updated csproj --- .../Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj b/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj index 00108350b6da3..8aedbc28eee11 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj +++ b/csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj @@ -1,4 +1,4 @@ - + Microsoft.ML.OnnxRuntime @@ -184,7 +184,7 @@ - + From 645b8b608c265aa584937145a0d5de5ca9c036c0 Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Mon, 6 Jan 2025 11:02:53 -0700 Subject: [PATCH 5/9] added missed csproj --- csharp/tools/linux_pack/LinuxPackNativeNuget.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj b/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj index cfe76971ae793..b814f99b05ae1 100644 --- a/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj +++ b/csharp/tools/linux_pack/LinuxPackNativeNuget.csproj @@ -7,7 +7,7 @@ If you need a more sophisticated package for testing, you can run the production packaging pipeline against your branch and download the resulting nuget package from the build artifacts. --> - + netstandard2.0 $(OnnxRuntimeBuildDirectory)/NativeNuget.nuspec From 6724b3bf4217d409e4abfed80f3ab9953a58a0b4 Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 8 Jan 2025 10:55:36 -0700 Subject: [PATCH 6/9] changes from PR comments --- .../FixedBufferOnnxValue.shared.cs | 27 +- .../ManagedProjections.shared.cs | 38 +-- .../NamedOnnxValue.shared.cs | 61 +--- .../OrtValue.shared.cs | 9 +- .../InferenceTest.netcore.cs | 266 +++++------------- 5 files changed, 98 insertions(+), 303 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs index 6d84d8bf2a709..c8b62b145acaf 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs @@ -4,17 +4,12 @@ using Microsoft.ML.OnnxRuntime.Tensors; using System; -#if NET8_0_OR_GREATER -using DotnetTensors = System.Numerics.Tensors; -using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; -#endif - namespace Microsoft.ML.OnnxRuntime { /// /// This is a legacy class that is kept for backward compatibility. /// Use OrtValue based API. - /// + /// /// Represents an OrtValue with its underlying buffer pinned /// public class FixedBufferOnnxValue : IDisposable @@ -44,22 +39,6 @@ public static FixedBufferOnnxValue CreateFromTensor(Tensor value) return new FixedBufferOnnxValue(ref ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType); } -#if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback - /// - /// Creates a object from the tensor and pins its underlying buffer. - /// - /// - /// - /// a disposable instance of FixedBufferOnnxValue - public static FixedBufferOnnxValue CreateFromDotnetTensor(DotnetTensors.Tensor value) where T : unmanaged - { - var ortValue = OrtValue.CreateTensorValueFromDotnetTensorObject(value); - return new FixedBufferOnnxValue(ref ortValue, OnnxValueType.ONNX_TYPE_TENSOR, TensorBase.GetTypeInfo(typeof(T)).ElementType); - } -#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback -#endif - /// /// This is a factory method that creates a disposable instance of FixedBufferOnnxValue /// on top of a buffer. Internally, it will pin managed buffer and will create @@ -83,7 +62,7 @@ public static FixedBufferOnnxValue CreateFromDotnetTensor(DotnetTensors.Tenso /// Here is an example of using a 3rd party library class for processing float16/bfloat16. /// Currently, to pass tensor data and create a tensor one must copy data to Float16/BFloat16 structures /// so DenseTensor can recognize it. - /// + /// /// If you are using a library that has a class Half and it is blittable, that is its managed in memory representation /// matches native one and its size is 16-bits, you can use the following conceptual example /// to feed/fetch data for inference using Half array. This allows you to avoid copying data from your Half[] to Float16[] @@ -94,7 +73,7 @@ public static FixedBufferOnnxValue CreateFromDotnetTensor(DotnetTensors.Tenso /// var input_shape = new long[] {input.Length}; /// Half[] output = new Half[40]; // Whatever the expected len/shape is must match /// var output_shape = new long[] {output.Length}; - /// + /// /// var memInfo = OrtMemoryInfo.DefaultInstance; // CPU /// /// using(var fixedBufferInput = FixedBufferOnnxvalue.CreateFromMemory{Half}(memInfo, diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs index 5a604e2a69305..13117f23e8ef9 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs @@ -6,13 +6,6 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; -using System.Reflection; - - -#if NET8_0_OR_GREATER -using DotnetTensors = System.Numerics.Tensors; -using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; -#endif namespace Microsoft.ML.OnnxRuntime { @@ -173,41 +166,13 @@ private static OrtValue CreateMapProjection(NamedOnnxValue node, NodeMetadata el /// private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata elementMeta) { -#if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback - if (node.Value is not TensorBase && node.Value.GetType().GetGenericTypeDefinition() != typeof(DotnetTensors.Tensor<>)) - { - throw new OnnxRuntimeException(ErrorCode.InvalidArgument, - $"NamedOnnxValue contains: {node.Value.GetType()}, expecting a Tensor"); - } - - OrtValue ortValue; - TensorElementType elementType; - - if (node.Value is TensorBase) - { - ortValue = OrtValue.CreateFromTensorObject(node.Value as TensorBase, out elementType); - } - else - { - MethodInfo method = typeof(OrtValue).GetMethod(nameof(OrtValue.CreateTensorValueFromDotnetTensorObject), BindingFlags.Static | BindingFlags.Public); - Type tensorType = node.Value.GetType().GetGenericArguments()[0]; - MethodInfo generic = method.MakeGenericMethod(tensorType); - ortValue = (OrtValue)generic.Invoke(null, [node.Value]); - elementType = TensorBase.GetTypeInfo(tensorType).ElementType; - } - - -#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback -#else if (node.Value is not TensorBase) { throw new OnnxRuntimeException(ErrorCode.InvalidArgument, $"NamedOnnxValue contains: {node.Value.GetType()}, expecting a Tensor"); } - OrtValue ortValue = OrtValue.CreateFromTensorObject(node.Value as TensorBase, out TensorElementType elementType); -#endif + OrtValue ortValue = OrtValue.CreateFromTensorObject(node.Value as TensorBase, out TensorElementType elementType); try { if (elementType != elementMeta.ElementDataType) @@ -226,3 +191,4 @@ private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata } } } + diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs index deef8032e5a67..48a10455588bc 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs @@ -8,11 +8,6 @@ using System.Diagnostics; using System.Linq; -#if NET8_0_OR_GREATER -using DotnetTensors = System.Numerics.Tensors; -using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; -#endif - namespace Microsoft.ML.OnnxRuntime { /// @@ -35,37 +30,37 @@ internal MapHelper(TensorBase keys, TensorBase values) /// /// This is a legacy class that is kept for backward compatibility. /// Use OrtValue based API. - /// - /// The class associates a name with an Object. + /// + /// The class associates a name with an Object. /// The name of the class is a misnomer, it does not hold any Onnx values, /// just managed representation of them. - /// + /// /// The class is currently used as both inputs and outputs. Because it is non- /// disposable, it can not hold on to any native objects. - /// + /// /// When used as input, we temporarily create OrtValues that map managed inputs /// directly. Thus we are able to avoid copying of contiguous data. - /// + /// /// For outputs, tensor buffers works the same as input, providing it matches /// the expected output shape. For other types (maps and sequences) we create a copy of the data. /// This is because, the class is not Disposable and it is a public interface, thus it can not own /// the underlying OrtValues that must be destroyed before Run() returns. - /// + /// /// To avoid data copying on output, use DisposableNamedOnnxValue class that is returned from Run() methods. /// This provides access to the native memory tensors and avoids copying. - /// + /// /// It is a recursive structure that may contain Tensors (base case) /// Other sequences and maps. Although the OnnxValueType is exposed, /// the caller is supposed to know the actual data type contained. - /// + /// /// The convention is that for tensors, it would contain a DenseTensor{T} instance or /// anything derived from Tensor{T}. - /// + /// /// For sequences, it would contain a IList{T} where T is an instance of NamedOnnxValue that /// would contain a tensor or another type. - /// + /// /// For Maps, it would contain a IDictionary{K, V} where K,V are primitive types or strings. - /// + /// /// public class NamedOnnxValue { @@ -145,23 +140,6 @@ public static NamedOnnxValue CreateFromTensor(string name, Tensor value) return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR); } -#if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback - /// - /// This is a factory method that instantiates NamedOnnxValue - /// and associated name with an instance of a Tensor - /// - /// - /// name - /// Tensor - /// - public static NamedOnnxValue CreateFromDotnetTensor(string name, DotnetTensors.Tensor value) - { - return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR); - } -#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback -#endif - /// /// This is a factory method that instantiates NamedOnnxValue. /// It would contain a sequence of elements @@ -218,21 +196,6 @@ public Tensor AsTensor() return _value as Tensor; // will return null if not castable } - -#if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback - /// - /// Try-get value as a Tensor<T>. - /// - /// Type - /// Tensor object if contained value is a Tensor. Null otherwise - public DotnetTensors.Tensor AsDotnetTensor() - { - return _value as DotnetTensors.Tensor; // will return null if not castable - } -#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback -#endif - /// /// Try-get value as an Enumerable<T>. /// T is usually a NamedOnnxValue instance that may contain @@ -303,7 +266,7 @@ internal virtual IntPtr OutputToOrtValueHandle(NodeMetadata metadata, out IDispo } } - throw new OnnxRuntimeException(ErrorCode.NotImplemented, + throw new OnnxRuntimeException(ErrorCode.NotImplemented, $"Can not create output OrtValue for NamedOnnxValue '{metadata.OnnxValueType}' type." + $" Only tensors can be pre-allocated for outputs " + $" Use Run() overloads that return DisposableNamedOnnxValue to get access to all Onnx value types that may be returned as output."); diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index 70b9948f893a5..7aaa29ab93627 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -234,7 +234,7 @@ public DotnetTensors.ReadOnlyTensorSpan GetTensorDataAsTensorSpan() where var typeSpan = MemoryMarshal.Cast(byteSpan); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; - var nArray = shape.Select(x => (nint)x).ToArray(); + nint[] nArray = Array.ConvertAll(shape, new Converter(x => (nint)x)); return new DotnetTensors.ReadOnlyTensorSpan(typeSpan, nArray, []); } @@ -281,7 +281,7 @@ public DotnetTensors.TensorSpan GetTensorMutableDataAsTensorSpan() where T var typeSpan = MemoryMarshal.Cast(byteSpan); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; - var nArray = shape.Select(x => (nint)x).ToArray(); + nint[] nArray = Array.ConvertAll(shape, new Converter(x => (nint)x)); return new DotnetTensors.TensorSpan(typeSpan, nArray, []); } @@ -308,7 +308,7 @@ public DotnetTensors.TensorSpan GetTensorSpanMutableRawData() where T : var byteSpan = GetTensorBufferRawData(typeof(T)); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; - var nArray = shape.Select(x => (nint)x).ToArray(); + nint[] nArray = Array.ConvertAll(shape, new Converter(x => (nint)x)); return new DotnetTensors.TensorSpan(byteSpan, nArray, []); } @@ -720,8 +720,7 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. } var bufferLengthInBytes = tensor.FlattenedLength * sizeof(T); - - var shape = tensor.Lengths.ToArray().Select(x => (long)x).ToArray(); + long[] shape = Array.ConvertAll(tensor.Lengths.ToArray(), new Converter(x => (long)x)); var typeInfo = TensorBase.GetTypeInfo(typeof(T)) ?? throw new OnnxRuntimeException(ErrorCode.InvalidArgument, $"Tensor of type: {typeof(T)} is not supported"); diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs index 369190010caff..4e2573918a474 100644 --- a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs +++ b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs @@ -1,10 +1,12 @@ using Microsoft.ML.OnnxRuntime.Tensors; +using Microsoft.VisualStudio.TestPlatform.Utilities; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Runtime.InteropServices; using System.Text.RegularExpressions; +using System.Xml.Linq; using Xunit; #if NET8_0_OR_GREATER @@ -94,13 +96,14 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp var session = new InferenceSession(model, options); cleanUp.Add(session); + using var runOptions = new RunOptions(); + using var inputOrtValues = new DisposableListTest>(session.InputMetadata.Count); var inputMeta = session.InputMetadata; var outputMeta = session.OutputMetadata; - var container = new List(); float[] expectedOutput = TestDataLoader.LoadTensorFromEmbeddedResource("bench.expected_out"); - int[] expectedDimensions = { 1, 1000, 1, 1 }; // hardcoded for now for the test data - ReadOnlySpan expectedOutputDimensions = expectedDimensions; + long[] expectedDimensions = { 1, 1000, 1, 1 }; // hardcoded for now for the test data + ReadOnlySpan expectedOutputDimensions = expectedDimensions; string[] expectedOutputNames = new string[] { "softmaxout_1" }; float[] inputData = TestDataLoader.LoadTensorFromEmbeddedResource("bench.in"); // this is the data for only one input tensor for this model @@ -109,132 +112,27 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp { Assert.Equal(typeof(float), inputMeta[name].ElementType); Assert.True(inputMeta[name].IsTensor); - nint[] dims = inputMeta[name].Dimensions.Select(x => (nint)x).ToArray(); - var tensor = DotnetTensors.Tensor.Create(inputData, dims); + var tensor = DotnetTensors.Tensor.Create(inputData, inputMeta[name].Dimensions.Select(x => (nint)x).ToArray()); + inputOrtValues.Add(new DisposableTestPair(name, OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); - container.Add(NamedOnnxValue.CreateFromDotnetTensor(name, tensor)); } + runOptions.LogId = "CsharpTest"; + runOptions.Terminate = false; // TODO: Test terminate = true, it currently crashes + runOptions.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_ERROR; // Run inference with named inputs and outputs created with in Run() - using (var results = session.Run(container)) // results is an IReadOnlyList container - { - ValidateRunResults(results); - } - - // Run inference with named inputs, outputs created with in Run() and RunOptions - using (var runOptions = new RunOptions()) - { - runOptions.LogId = "CsharpTest"; - runOptions.Terminate = false; // TODO: Test terminate = true, it currently crashes - runOptions.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_ERROR; - IReadOnlyCollection outputNames = session.OutputMetadata.Keys.ToList(); - - using (var results = session.Run(container, outputNames, runOptions)) // results is an IReadOnlyList container - { - ValidateRunResults(results); - } - } - - // Run inference with pinned inputs and outputs created with in Run() - using (var pinnedInputs = new DisposableListTest()) - { - var inputNames = container.Select(i => i.Name).ToArray(); - pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromDotnetTensor(i.AsDotnetTensor()))); - - // output names not specified - using (var results = session.Run(inputNames, pinnedInputs)) // results is an IReadOnlyList container - { - ValidateRunResults(results); - } - - // output names specified explicitly - using (var results = session.Run(inputNames, pinnedInputs, expectedOutputNames)) // results is an IReadOnlyList container - { - ValidateRunResults(results); - } - } - - // Run inference with outputs pinned from buffers - using (var pinnedInputs = new DisposableListTest()) - using (var pinnedOutputs = new DisposableListTest()) - { - var memInfo = OrtMemoryInfo.DefaultInstance; // CPU - - // Create inputs - Assert.Single(inputMeta.Keys); - var inputNames = inputMeta.Keys.ToArray(); - var inputName = inputNames[0]; - Assert.Equal(typeof(float), inputMeta[inputName].ElementType); - Assert.True(inputMeta[inputName].IsTensor); - var shape = inputMeta[inputName].Dimensions.Select(x => (nint)x).ToArray(); - pinnedInputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(DotnetTensors.Tensor.Create(inputData, shape))); - - - // Prepare output buffer - Assert.Single(outputMeta.Keys); - var outputNames = outputMeta.Keys.ToArray(); - var outputName = outputNames[0]; - Assert.Equal(typeof(float), outputMeta[outputName].ElementType); - Assert.True(outputMeta[outputName].IsTensor); - float[] outputBuffer = new float[expectedOutput.Length]; - shape = outputMeta[outputName].Dimensions.Select(x => (nint)x).ToArray(); - pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(DotnetTensors.Tensor.Create(outputBuffer, shape))); - - session.Run(inputNames, pinnedInputs, outputNames, pinnedOutputs); - Assert.Equal(expectedOutput, outputBuffer, new FloatComparer()); - } - - // Run inference with named inputs and named outputs - { - // correct pre-allocated outputs - var expectedOutputValues = new List() - { - NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray())) - }; - session.Run(container, expectedOutputValues); - ValidateRunResultData(expectedOutputValues[0].AsDotnetTensor(), expectedOutput, expectedDimensions); - } - - // Run inference with pinned inputs and named outputs - using (var pinnedInputs = new DisposableListTest()) + using (var results = session.Run(runOptions, inputOrtValues.Select(x => x.Key).ToList(), inputOrtValues.Select(x => x.Value).ToList(), new List(["softmaxout_1"]))) // results is an IDisposableReadOnlyCollection container { - var inputNames = container.Select(i => i.Name).ToArray(); - pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromDotnetTensor(i.AsDotnetTensor()))); - - // expected inputs and outputs - var expectedOutputValues = new List() + // validate the results + foreach (var r in results) { - NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray())) - }; - session.Run(inputNames, pinnedInputs, expectedOutputValues); - ValidateRunResultData(expectedOutputValues[0].AsDotnetTensor(), expectedOutput, expectedDimensions); - } + Assert.Single(results); - // Run inference with named inputs and pinned outputs - { - // correct pre-allocated outputs - using (var pinnedOutputs = new DisposableListTest()) - { - var outputTensor = DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray()); - pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(outputTensor)); - session.Run(container, expectedOutputNames, pinnedOutputs); - ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); + ValidateRunResult(r, expectedOutput, expectedDimensions); } } - // Run inference with pinned inputs and pinned outputs - using (DisposableListTest pinnedInputs = new DisposableListTest(), - pinnedOutputs = new DisposableListTest()) - { - var inputNames = container.Select(i => i.Name).ToArray(); - pinnedInputs.AddRange(container.Select(i => FixedBufferOnnxValue.CreateFromDotnetTensor(i.AsDotnetTensor()))); - - var outputTensor = DotnetTensors.Tensor.Create(expectedDimensions.Select(x => (nint)x).ToArray()); - pinnedOutputs.Add(FixedBufferOnnxValue.CreateFromDotnetTensor(outputTensor)); - - session.Run(inputNames, pinnedInputs, expectedOutputNames, pinnedOutputs); - ValidateRunResultData(outputTensor, expectedOutput, expectedDimensions); - } + session.Dispose(); } } @@ -255,18 +153,32 @@ public void InferenceSessionDisposedDotnetTensors() float[] inputData = TestDataLoader.LoadTensorFromEmbeddedResource("bench.in"); // this is the data for only one input tensor for this model - foreach (var name in inputMeta.Keys) + using (var runOptions = new RunOptions()) + using (var inputOrtValues = new DisposableListTest>(session.InputMetadata.Count)) + using (var outputOrtValues = new DisposableListTest>(session.OutputMetadata.Count)) { - Assert.Equal(typeof(float), inputMeta[name].ElementType); - Assert.True(inputMeta[name].IsTensor); - var tensor = DotnetTensors.Tensor.Create(inputData, inputMeta[name].Dimensions.Select(x => (nint) x).ToArray()); - container.Add(NamedOnnxValue.CreateFromDotnetTensor(name, tensor)); - } + + foreach (var name in inputMeta.Keys) + { + Assert.Equal(typeof(float), inputMeta[name].ElementType); + Assert.True(inputMeta[name].IsTensor); + var tensor = DotnetTensors.Tensor.Create(inputData, inputMeta[name].Dimensions.Select(x => (nint) x).ToArray()); + inputOrtValues.Add(new DisposableTestPair(name, OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + } + + // Run inference with named inputs and outputs created with in Run() + using (var results = session.Run(runOptions, inputOrtValues.Select(x => x.Key).ToList(), inputOrtValues.Select(x => x.Value).ToList(), new List(["softmaxout_1"]))) // results is an IDisposableReadOnlyCollection container + { + // validate the results + foreach (var r in results) + { + Assert.Single(results); - // Run inference with named inputs and outputs created with in Run() - using (var results = session.Run(container)) // results is an IReadOnlyList container - { - ValidateRunResults(results); + float[] expectedOutput = TestDataLoader.LoadTensorFromEmbeddedResource("bench.expected_out"); + long[] expectedDimensions = { 1, 1000, 1, 1 }; // hardcoded for now for the test data + ValidateRunResult(r, expectedOutput, expectedDimensions); + } + } } string profile_file = session.EndProfiling(); @@ -277,23 +189,6 @@ public void InferenceSessionDisposedDotnetTensors() } } - [Fact] - private void ThrowWrongInputTypeDotnetTensors() - { - var tuple = OpenSessionSqueezeNet(); - var session = tuple.Item1; - var inputData = tuple.Item2; - var inputMeta = session.InputMetadata; - var container = new List(); - int[] inputDataInt = inputData.Select(x => (int)x).ToArray(); - var tensor = DotnetTensors.Tensor.Create(inputDataInt, inputMeta["data_0"].Dimensions.Select(x => (nint)x).ToArray()); - container.Add(NamedOnnxValue.CreateFromDotnetTensor("data_0", tensor)); - var ex = Assert.Throws(() => session.Run(container)); - var msg = ex.ToString(); - Assert.Contains("Tensor element data type discovered", msg); - session.Dispose(); - } - [Fact] private void ThrowWrongOutputNameDotnetTensors() { @@ -301,27 +196,20 @@ private void ThrowWrongOutputNameDotnetTensors() var session = tuple.Item1; var inputData = tuple.Item2; var inputTensor = tuple.Item3; - var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = DotnetTensors.Tensor.Create([ 1, 2 ], [2]); - var bad_names = new string[] { "bad_output_name" }; - var ex = Assert.Throws(() => session.Run(inputs, bad_names)); - Assert.Contains("Output name: 'bad_output_name' is not in the metadata", ex.Message); - session.Dispose(); - } - [Fact] - private void ThrowWrongOutputTypeDotnetTensors() - { - var tuple = OpenSessionSqueezeNet(); - var session = tuple.Item1; - var inputData = tuple.Item2; - var inputTensor = tuple.Item3; - var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = DotnetTensors.Tensor.Create( [ 1, 1000, 1, 1 ], [4]); - var outputs = new List { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) }; - var ex = Assert.Throws(() => session.Run(inputs, outputs)); - // TODO: check exception message - // InferenceSession::ValidateOutputs() does not check type so far. Currently this will finally trigger an error in Softmax. + using (var runOptions = new RunOptions()) + using (var inputOrtValues = new DisposableListTest>(session.InputMetadata.Count)) + using (var outputOrtValues = new DisposableListTest>(session.OutputMetadata.Count)) + { + var tensor = DotnetTensors.Tensor.Create(inputData, Array.ConvertAll(inputTensor.Dimensions.ToArray(), x => (nint)x)); + + inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + outputOrtValues.Add(new DisposableTestPair("bad_output_name", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + + var ex = Assert.Throws(() => session.Run(runOptions, ["data_0"], [inputOrtValues[0].Value], ["bad_output_name"], [outputOrtValues[0].Value])); + Assert.Contains("Output name: 'bad_output_name' is not in the metadata", ex.Message); + } + session.Dispose(); } @@ -332,27 +220,20 @@ private void ThrowWrongOutputDimensionDotnetTensors() var session = tuple.Item1; var inputData = tuple.Item2; var inputTensor = tuple.Item3; - var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = DotnetTensors.Tensor.Create([ 1, 1001, 1, 1 ], [4]); - var outputs = new List { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) }; - var ex = Assert.Throws(() => session.Run(inputs, outputs)); - // TODO: check exception message - // InferenceSession::ValidateOutputs() does not check dims so far. Currently this will finally trigger an error in Softmax. - session.Dispose(); - } + var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1]); + + using (var runOptions = new RunOptions()) + using (var inputOrtValues = new DisposableListTest>(session.InputMetadata.Count)) + using (var outputOrtValues = new DisposableListTest>(session.OutputMetadata.Count)) + { + var tensor = DotnetTensors.Tensor.Create(inputData, Array.ConvertAll(inputTensor.Dimensions.ToArray(), x => (nint)x)); + + inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + outputOrtValues.Add(new DisposableTestPair("softmaxout_1", OrtValue.CreateTensorValueFromDotnetTensorObject(outputTensor))); + + var ex = Assert.Throws(() => session.Run(runOptions, ["data_0"], [inputOrtValues[0].Value], ["softmaxout_1"], [outputOrtValues[0].Value])); + } - [Fact] - private void ThrowNoOutputDotnetTensors() - { - var tuple = OpenSessionSqueezeNet(); - var session = tuple.Item1; - var inputData = tuple.Item2; - var inputTensor = tuple.Item3; - var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; - var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1], [4]); - var outputs = new List { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) }; - var ex = Assert.Throws(() => session.Run(inputs, new NamedOnnxValue[0])); - Assert.Contains("[ErrorCode:InvalidArgument] At least one output should be requested.", ex.Message); session.Dispose(); } @@ -363,14 +244,21 @@ private void ThrowInconsistentPinnedOutputsDotnetTensors() var session = tuple.Item1; var inputData = tuple.Item2; var inputTensor = tuple.Item3; - var inputs = new List { NamedOnnxValue.CreateFromTensor("data_0", inputTensor) }; var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1], [4]); - using (var outputs = new DisposableListTest()) + using (var runOptions = new RunOptions()) + using (var inputOrtValues = new DisposableListTest>(session.InputMetadata.Count)) + using (var outputOrtValues = new DisposableListTest>(session.OutputMetadata.Count)) { - var ex = Assert.Throws(() => session.Run(inputs, new string[] { "softmaxout_1" }, outputs)); + var tensor = DotnetTensors.Tensor.Create(inputData, Array.ConvertAll(inputTensor.Dimensions.ToArray(), x => (nint)x)); + + inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + outputOrtValues.Add(new DisposableTestPair("softmaxout_1", OrtValue.CreateTensorValueFromDotnetTensorObject(outputTensor))); + OrtValue[] outputs = []; + var ex = Assert.Throws(() => session.Run(runOptions, ["data_0"], [inputOrtValues[0].Value], ["softmaxout_1"], outputs)); Assert.StartsWith("Length of outputNames (1) must match that of outputValues (0).", ex.Message); } + session.Dispose(); } #pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback #endif From 57d99da965a943094cd468f13a155c9b793bc138 Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 15 Jan 2025 10:22:03 -0700 Subject: [PATCH 7/9] updating api --- csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index 7aaa29ab93627..459d89351544f 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -11,6 +11,8 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; +using System.Xml.Linq; + #if NET8_0_OR_GREATER using DotnetTensors = System.Numerics.Tensors; @@ -696,7 +698,7 @@ public static OrtValue CreateTensorValueFromMemory(T[] data, long[] shape) wh /// Tensor object /// discovered tensor element type /// And instance of OrtValue constructed on top of the object - public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors.Tensor tensor) where T : unmanaged + public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors.ITensor, T> tensor) where T : unmanaged { if (!IsContiguousAndDense(tensor)) { @@ -744,7 +746,7 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. } } - private static bool IsContiguousAndDense(DotnetTensors.Tensor tensor) + private static bool IsContiguousAndDense(DotnetTensors.ITensor, T> tensor) where T : unmanaged { // Right most dimension must be 1 for a dense tensor. if (tensor.Strides[^1] != 1) From b05e5cdccb76906defc6e5f9fcf7d3785a8c915a Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 15 Jan 2025 11:32:58 -0700 Subject: [PATCH 8/9] improved reflection --- csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index 459d89351544f..290c0c24338f0 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -708,8 +708,7 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. } unsafe { - var field = tensor.GetType().GetFields(BindingFlags.Instance | BindingFlags.NonPublic).Where(x => x.Name == "_values").FirstOrDefault(); - var backingData = (T[])field.GetValue(tensor); + var backingData = (T[])tensor.GetType().GetField("_values", BindingFlags.Instance | BindingFlags.NonPublic).GetValue(tensor); GCHandle handle = GCHandle.Alloc(backingData, GCHandleType.Pinned); var memHandle = new MemoryHandle(Unsafe.AsPointer(ref tensor.GetPinnableReference()), handle); From fc37290cda14219e7b7682bfc759181b9e44ed9e Mon Sep 17 00:00:00 2001 From: Michael Sharp Date: Wed, 22 Jan 2025 10:20:37 -0700 Subject: [PATCH 9/9] changes from PR comments --- .../OrtValue.shared.cs | 41 ++++++++----------- .../InferenceTest.netcore.cs | 16 ++++---- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs index 290c0c24338f0..524f6af56e6bc 100644 --- a/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs +++ b/csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs @@ -6,16 +6,14 @@ using System.Buffers; using System.Collections.Generic; using System.Diagnostics; -using System.Linq; -using System.Reflection; -using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; -using System.Xml.Linq; - #if NET8_0_OR_GREATER -using DotnetTensors = System.Numerics.Tensors; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using System.Runtime.CompilerServices; +using SystemNumericsTensors = System.Numerics.Tensors; using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives; #endif @@ -216,7 +214,6 @@ public ReadOnlySpan GetTensorDataAsSpan() where T : unmanaged } #if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// Returns a ReadOnlyTensorSpan over tensor native buffer that /// provides a read-only view. @@ -230,7 +227,8 @@ public ReadOnlySpan GetTensorDataAsSpan() where T : unmanaged /// /// ReadOnlySpan /// - public DotnetTensors.ReadOnlyTensorSpan GetTensorDataAsTensorSpan() where T : unmanaged + [Experimental("SYSLIB5001")] + public SystemNumericsTensors.ReadOnlyTensorSpan GetTensorDataAsTensorSpan() where T : unmanaged { var byteSpan = GetTensorBufferRawData(typeof(T)); @@ -238,9 +236,8 @@ public DotnetTensors.ReadOnlyTensorSpan GetTensorDataAsTensorSpan() where var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; nint[] nArray = Array.ConvertAll(shape, new Converter(x => (nint)x)); - return new DotnetTensors.ReadOnlyTensorSpan(typeSpan, nArray, []); + return new SystemNumericsTensors.ReadOnlyTensorSpan(typeSpan, nArray, []); } -#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback #endif /// @@ -264,7 +261,6 @@ public Span GetTensorMutableDataAsSpan() where T : unmanaged } #if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback /// /// Returns a TensorSpan over tensor native buffer. /// @@ -277,7 +273,8 @@ public Span GetTensorMutableDataAsSpan() where T : unmanaged /// /// ReadOnlySpan /// - public DotnetTensors.TensorSpan GetTensorMutableDataAsTensorSpan() where T : unmanaged + [Experimental("SYSLIB5001")] + public SystemNumericsTensors.TensorSpan GetTensorMutableDataAsTensorSpan() where T : unmanaged { var byteSpan = GetTensorBufferRawData(typeof(T)); @@ -285,9 +282,8 @@ public DotnetTensors.TensorSpan GetTensorMutableDataAsTensorSpan() where T var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; nint[] nArray = Array.ConvertAll(shape, new Converter(x => (nint)x)); - return new DotnetTensors.TensorSpan(typeSpan, nArray, []); + return new SystemNumericsTensors.TensorSpan(typeSpan, nArray, []); } -#pragma warning restore SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. #endif /// @@ -300,21 +296,20 @@ public Span GetTensorMutableRawData() } #if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. /// /// Provides mutable raw native buffer access. /// /// TensorSpan over the native buffer bytes - public DotnetTensors.TensorSpan GetTensorSpanMutableRawData() where T : unmanaged + [Experimental("SYSLIB5001")] + public SystemNumericsTensors.TensorSpan GetTensorSpanMutableRawData() where T : unmanaged { var byteSpan = GetTensorBufferRawData(typeof(T)); var shape = GetTypeInfo().TensorTypeAndShapeInfo.Shape; nint[] nArray = Array.ConvertAll(shape, new Converter(x => (nint)x)); - return new DotnetTensors.TensorSpan(byteSpan, nArray, []); + return new SystemNumericsTensors.TensorSpan(byteSpan, nArray, []); } -#pragma warning restore SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback #endif /// @@ -689,7 +684,6 @@ public static OrtValue CreateTensorValueFromMemory(T[] data, long[] shape) wh } #if NET8_0_OR_GREATER -#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback /// /// This is a factory method creates a native Onnxruntime OrtValue containing a tensor. /// The method will attempt to pin managed memory so no copying occurs when data is passed down @@ -698,11 +692,12 @@ public static OrtValue CreateTensorValueFromMemory(T[] data, long[] shape) wh /// Tensor object /// discovered tensor element type /// And instance of OrtValue constructed on top of the object - public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors.ITensor, T> tensor) where T : unmanaged + [Experimental("SYSLIB5001")] + public static OrtValue CreateTensorValueFromSystemNumericsTensorObject(SystemNumericsTensors.Tensor tensor) where T : unmanaged { if (!IsContiguousAndDense(tensor)) { - var newTensor = DotnetTensors.Tensor.Create(tensor.Lengths); + var newTensor = SystemNumericsTensors.Tensor.Create(tensor.Lengths); tensor.CopyTo(newTensor); tensor = newTensor; } @@ -745,7 +740,8 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject(DotnetTensors. } } - private static bool IsContiguousAndDense(DotnetTensors.ITensor, T> tensor) where T : unmanaged + [Experimental("SYSLIB5001")] + private static bool IsContiguousAndDense(SystemNumericsTensors.Tensor tensor) where T : unmanaged { // Right most dimension must be 1 for a dense tensor. if (tensor.Strides[^1] != 1) @@ -759,7 +755,6 @@ private static bool IsContiguousAndDense(DotnetTensors.ITensor diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs index 4e2573918a474..c67b474d49c0e 100644 --- a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs +++ b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs @@ -113,7 +113,7 @@ private void CanRunInferenceOnAModelDotnetTensors(GraphOptimizationLevel graphOp Assert.Equal(typeof(float), inputMeta[name].ElementType); Assert.True(inputMeta[name].IsTensor); var tensor = DotnetTensors.Tensor.Create(inputData, inputMeta[name].Dimensions.Select(x => (nint)x).ToArray()); - inputOrtValues.Add(new DisposableTestPair(name, OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + inputOrtValues.Add(new DisposableTestPair(name, OrtValue.CreateTensorValueFromSystemNumericsTensorObject(tensor))); } @@ -163,7 +163,7 @@ public void InferenceSessionDisposedDotnetTensors() Assert.Equal(typeof(float), inputMeta[name].ElementType); Assert.True(inputMeta[name].IsTensor); var tensor = DotnetTensors.Tensor.Create(inputData, inputMeta[name].Dimensions.Select(x => (nint) x).ToArray()); - inputOrtValues.Add(new DisposableTestPair(name, OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + inputOrtValues.Add(new DisposableTestPair(name, OrtValue.CreateTensorValueFromSystemNumericsTensorObject(tensor))); } // Run inference with named inputs and outputs created with in Run() @@ -203,8 +203,8 @@ private void ThrowWrongOutputNameDotnetTensors() { var tensor = DotnetTensors.Tensor.Create(inputData, Array.ConvertAll(inputTensor.Dimensions.ToArray(), x => (nint)x)); - inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); - outputOrtValues.Add(new DisposableTestPair("bad_output_name", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); + inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromSystemNumericsTensorObject(tensor))); + outputOrtValues.Add(new DisposableTestPair("bad_output_name", OrtValue.CreateTensorValueFromSystemNumericsTensorObject(tensor))); var ex = Assert.Throws(() => session.Run(runOptions, ["data_0"], [inputOrtValues[0].Value], ["bad_output_name"], [outputOrtValues[0].Value])); Assert.Contains("Output name: 'bad_output_name' is not in the metadata", ex.Message); @@ -228,8 +228,8 @@ private void ThrowWrongOutputDimensionDotnetTensors() { var tensor = DotnetTensors.Tensor.Create(inputData, Array.ConvertAll(inputTensor.Dimensions.ToArray(), x => (nint)x)); - inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); - outputOrtValues.Add(new DisposableTestPair("softmaxout_1", OrtValue.CreateTensorValueFromDotnetTensorObject(outputTensor))); + inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromSystemNumericsTensorObject(tensor))); + outputOrtValues.Add(new DisposableTestPair("softmaxout_1", OrtValue.CreateTensorValueFromSystemNumericsTensorObject(outputTensor))); var ex = Assert.Throws(() => session.Run(runOptions, ["data_0"], [inputOrtValues[0].Value], ["softmaxout_1"], [outputOrtValues[0].Value])); } @@ -252,8 +252,8 @@ private void ThrowInconsistentPinnedOutputsDotnetTensors() { var tensor = DotnetTensors.Tensor.Create(inputData, Array.ConvertAll(inputTensor.Dimensions.ToArray(), x => (nint)x)); - inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromDotnetTensorObject(tensor))); - outputOrtValues.Add(new DisposableTestPair("softmaxout_1", OrtValue.CreateTensorValueFromDotnetTensorObject(outputTensor))); + inputOrtValues.Add(new DisposableTestPair("data_0", OrtValue.CreateTensorValueFromSystemNumericsTensorObject(tensor))); + outputOrtValues.Add(new DisposableTestPair("softmaxout_1", OrtValue.CreateTensorValueFromSystemNumericsTensorObject(outputTensor))); OrtValue[] outputs = []; var ex = Assert.Throws(() => session.Run(runOptions, ["data_0"], [inputOrtValues[0].Value], ["softmaxout_1"], outputs)); Assert.StartsWith("Length of outputNames (1) must match that of outputValues (0).", ex.Message);