From 0dbaa309ab2a3adb714a53bd7052e8a5b44eae73 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Thu, 5 Mar 2026 19:51:15 -0800 Subject: [PATCH 01/13] decimal support --- .../src/managed/CSharpInputDataSet.cs | 48 +++ .../src/managed/CSharpOutputDataSet.cs | 98 +++++ .../src/managed/CSharpParamContainer.cs | 66 ++++ .../src/managed/utils/Sql.cs | 327 +++++++++++++++- ...uild-dotnet-core-CSharp-extension-test.cmd | 8 +- .../test/src/managed/CSharpTestExecutor.cs | 33 ++ .../test/src/native/CSharpDecimalTests.cpp | 360 ++++++++++++++++++ .../test/src/native/CSharpInitParamTests.cpp | 9 + 8 files changed, 946 insertions(+), 3 deletions(-) create mode 100644 language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs index de1b0a1..6e36f20 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs @@ -126,6 +126,9 @@ private unsafe void AddColumn( case SqlDataType.DotNetReal: AddDataFrameColumn(columnNumber, rowsNumber, colData, colMap); break; + case SqlDataType.DotNetNumeric: + AddNumericDataFrameColumn(columnNumber, rowsNumber, colData, colMap); + break; case SqlDataType.DotNetChar: int[] strLens = new int[rowsNumber]; Interop.Copy((int*)colMap, strLens, 0, (int)rowsNumber); @@ -185,5 +188,50 @@ private unsafe void AddDataFrameColumn( CSharpDataFrame.Columns.Add(colDataFrame); } + + /// + /// This method adds NUMERIC/DECIMAL column data by converting from SQL_NUMERIC_STRUCT + /// to C# decimal values, creating a PrimitiveDataFrameColumn, and adding it to the DataFrame. + /// Follows the same pattern as Java extension's numeric handling. + /// + /// The column index. + /// Number of rows in this column. + /// Pointer to array of SQL_NUMERIC_STRUCT structures (19 bytes each). + /// Pointer to null indicator array (SQL_NULL_DATA for null values). + private unsafe void AddNumericDataFrameColumn( + ushort columnNumber, + ulong rowsNumber, + void *colData, + int *colMap) + { + // Cast the raw pointer to SQL_NUMERIC_STRUCT array + SqlNumericStruct* numericArray = (SqlNumericStruct*)colData; + + // Create a DataFrame column for decimal values + PrimitiveDataFrameColumn colDataFrame = + new PrimitiveDataFrameColumn(_columns[columnNumber].Name, (int)rowsNumber); + + // Convert each SQL_NUMERIC_STRUCT to decimal, handling nulls + Span nullSpan = new Span(colMap, (int)rowsNumber); + for (int i = 0; i < (int)rowsNumber; ++i) + { + // Check if this row has a null value + // + // WHY check both Nullable == 0 and SQL_NULL_DATA? + // - Nullable == 0 means column is declared NOT NULL (cannot contain nulls) + // - For NOT NULL columns, skip null checking for performance (nullSpan[i] is undefined) + // - For nullable columns (Nullable != 0), check if nullSpan[i] == SQL_NULL_DATA (-1) + // - This matches the pattern used by other numeric types in the codebase + if (_columns[columnNumber].Nullable == 0 || nullSpan[i] != SQL_NULL_DATA) + { + // Convert SQL_NUMERIC_STRUCT to C# decimal + // The conversion handles precision, scale, sign, and the 16-byte integer value + colDataFrame[i] = SqlNumericStructToDecimal(numericArray[i]); + } + // If null, the PrimitiveDataFrameColumn slot remains as null + } + + CSharpDataFrame.Columns.Add(colDataFrame); + } } } diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs index 8eee4b1..233108d 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs @@ -174,6 +174,9 @@ DataFrameColumn column case SqlDataType.DotNetDouble: SetDataPtrs(columnNumber, GetArray(column)); break; + case SqlDataType.DotNetNumeric: + ExtractNumericColumn(columnNumber, column); + break; case SqlDataType.DotNetChar: // Calculate column size from actual data. // columnSize = max UTF-8 byte length across all rows. @@ -213,6 +216,101 @@ T[] array _handleList.Add(handle); } + /// + /// This method extracts NUMERIC/DECIMAL column data by converting C# decimal values + /// to SQL_NUMERIC_STRUCT array, pinning it, and storing the pointer. + /// Follows the same pattern as Java extension's numeric handling. + /// + /// The column index. + /// The DataFrameColumn containing decimal values. + private unsafe void ExtractNumericColumn( + ushort columnNumber, + DataFrameColumn column) + { + if (column == null) + { + SetDataPtrs(columnNumber, Array.Empty()); + return; + } + + // For NUMERIC/DECIMAL, we need to determine appropriate precision and scale from the data. + // SQL Server supports precision 1-38 and scale 0-precision. + // We'll use the DecimalDigits from the column metadata (if set), or calculate from actual values. + // + // WHY default precision to 38? + // - 38 is the maximum precision SQL Server NUMERIC/DECIMAL supports + // - Using maximum precision ensures we never lose significant digits + // - SQL Server will handle storage optimization internally + byte precision = 38; + byte scale = (byte)_columns[columnNumber].DecimalDigits; + + // If scale is 0 but we have actual decimal values, calculate appropriate scale + // by examining all non-null values to ensure we don't lose precision + // + // WHY examine ALL rows instead of just sampling? + // - A previous implementation only checked first 10 rows (optimization attempt) + // - This caused data loss when higher-scale values appeared later in the dataset + // - Example: rows 1-10 have scale 2 (e.g., 123.45), but row 100 has scale 4 (e.g., 123.4567) + // - If we use scale=2 for the entire column, row 100 gets rounded to 123.46 (data loss!) + // - Must examine ALL rows to find maximum scale and preserve all decimal places + // + if (scale == 0) + { + for (int rowNumber = 0; rowNumber < column.Length; ++rowNumber) + { + if (column[rowNumber] != null) + { + decimal value = (decimal)column[rowNumber]; + // Get the scale from the decimal value itself + // + // WHY use decimal.GetBits and bit shifting? + // - C# decimal is stored as 128-bit: sign (1 bit), scale (8 bits), mantissa (96 bits) + // - GetBits returns 4 ints: [0-2] = mantissa low/mid/high, [3] = flags (sign + scale) + // - Scale is in bits 16-23 of flags field (bits[3]) + // - Bit shift >> 16 moves scale to low byte, & 0x7F masks to get 7-bit scale value + int[] bits = decimal.GetBits(value); + byte valueScale = (byte)((bits[3] >> 16) & 0x7F); + scale = Math.Max(scale, valueScale); + } + } + } + + Logging.Trace($"ExtractNumericColumn: Column {columnNumber}, Precision={precision}, Scale={scale}, RowCount={column.Length}"); + + // Convert each decimal value to SQL_NUMERIC_STRUCT + SqlNumericStruct[] numericArray = new SqlNumericStruct[column.Length]; + for (int rowNumber = 0; rowNumber < column.Length; ++rowNumber) + { + if (column[rowNumber] != null) + { + decimal value = (decimal)column[rowNumber]; + numericArray[rowNumber] = DecimalToSqlNumericStruct(value, precision, scale); + Logging.Trace($"ExtractNumericColumn: Row {rowNumber}, Value={value} converted to SqlNumericStruct"); + } + else + { + // For null values, create a zero-initialized struct + // The null indicator in strLenOrNullMap will mark this as SQL_NULL_DATA + // + // WHY create a struct for NULL values instead of leaving uninitialized? + // - ODBC requires a valid struct pointer even for NULL values + // - The strLenOrNullMap array separately tracks which values are NULL + // - Native code reads from the struct pointer, so it must be valid memory + // - We use sign=1 (positive) by convention for NULL placeholders + numericArray[rowNumber] = new SqlNumericStruct + { + precision = precision, + scale = (sbyte)scale, + sign = 1 // Positive sign convention for NULL placeholders + }; + Logging.Trace($"ExtractNumericColumn: Row {rowNumber} is NULL"); + } + } + + // Pin the SqlNumericStruct array and store pointer + SetDataPtrs(columnNumber, numericArray); + } + /// /// This method gets the array from a DataFrameColumn Column for numeric types. /// diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs index e1c53d5..bcc7d0f 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs @@ -132,6 +132,11 @@ public unsafe void AddParam( case SqlDataType.DotNetBit: _params[paramNumber].Value = *(bool*)paramValue; break; + case SqlDataType.DotNetNumeric: + // Convert SQL_NUMERIC_STRUCT to C# decimal + SqlNumericStruct* numericPtr = (SqlNumericStruct*)paramValue; + _params[paramNumber].Value = SqlNumericStructToDecimal(*numericPtr); + break; case SqlDataType.DotNetChar: _params[paramNumber].Value = Interop.UTF8PtrToStr((char*)paramValue, (ulong)strLenOrNullMap); break; @@ -214,6 +219,23 @@ public unsafe void ReplaceParam( bool boolValue = Convert.ToBoolean(param.Value); ReplaceNumericParam(boolValue, paramValue); break; + case SqlDataType.DotNetNumeric: + // Convert C# decimal to SQL_NUMERIC_STRUCT + // Use the precision and scale from the parameter metadata + decimal decimalValue = Convert.ToDecimal(param.Value); + // WHY hardcode precision to 38? + // - param.Size may contain column size, not necessarily precision + // - Using maximum precision (38) ensures we never truncate significant digits + // - SQL Server will handle precision validation based on the actual parameter declaration + byte precision = 38; // SQL Server max precision for NUMERIC/DECIMAL + byte scale = (byte)param.DecimalDigits; + // WHY set strLenOrNullMap to 19? + // - For fixed-size types like SQL_NUMERIC_STRUCT, strLenOrNullMap contains the byte size + // - SQL_NUMERIC_STRUCT is exactly 19 bytes: precision(1) + scale(1) + sign(1) + val(16) + // - This tells ODBC how many bytes to read from the paramValue pointer + *strLenOrNullMap = 19; // sizeof(SqlNumericStruct) + ReplaceNumericStructParam(decimalValue, precision, scale, paramValue); + break; case SqlDataType.DotNetChar: // For CHAR/VARCHAR, strLenOrNullMap is in bytes (1 byte per character for ANSI). // param.Size is the declared parameter size in characters (from SQL Server's CHAR(n)/VARCHAR(n)). @@ -275,6 +297,50 @@ private unsafe void ReplaceNumericParam( *paramValue = (void*)handle.AddrOfPinnedObject(); } + /// + /// This method replaces parameter value for NUMERIC/DECIMAL data types. + /// Converts C# decimal to SQL_NUMERIC_STRUCT and uses proper memory pinning. + /// Follows the same pattern as Java extension's numeric parameter handling. + /// + /// The C# decimal value to convert. + /// Total number of digits (1-38). + /// Number of digits after decimal point (0-precision). + /// Output pointer to receive the pinned SqlNumericStruct. + private unsafe void ReplaceNumericStructParam( + decimal value, + byte precision, + byte scale, + void **paramValue) + { + // Convert C# decimal to SQL_NUMERIC_STRUCT + SqlNumericStruct numericStruct = DecimalToSqlNumericStruct(value, precision, scale); + + // Box the struct into a single-element array to create a heap-allocated copy, then pin it. + // + // WHY box into an array before pinning? + // - Local struct 'numericStruct' is stack-allocated and will be destroyed when method returns + // - We need a heap-allocated copy that survives after this method returns + // - GCHandle.Alloc requires a heap object; structs must be boxed first + // - Single-element array is the simplest way to create a heap-allocated struct + // + // WHY pin with GCHandle? + // - Native code will dereference the paramValue pointer during execution + // - Without pinning, garbage collector could move the object, invalidating the pointer + // - GCHandleType.Pinned prevents GC from moving the object until we free the handle + // + // WHY add handle to _handleList? + // - If we don't keep a reference, GC could free the handle immediately + // - _handleList keeps handles alive until container is disposed/reset + // - Handles are freed in ResetParams or class disposal, ensuring proper cleanup + // + SqlNumericStruct[] valueArray = new SqlNumericStruct[1] { numericStruct }; + GCHandle handle = GCHandle.Alloc(valueArray, GCHandleType.Pinned); + _handleList.Add(handle); + *paramValue = (void*)handle.AddrOfPinnedObject(); + + Logging.Trace($"ReplaceNumericStructParam: Converted decimal {value} to SqlNumericStruct (precision={precision}, scale={scale})"); + } + /// /// This method replaces parameter value for string data types. /// If the string is not empty, the address of underlying bytes will be assigned to paramValue. diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs index 3d564ba..1bd0ad4 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs @@ -10,6 +10,7 @@ //********************************************************************* using System; using System.Collections.Generic; +using System.Runtime.InteropServices; using System.Text; namespace Microsoft.SqlServer.CSharpExtension @@ -68,7 +69,8 @@ public enum SqlDataType: short {typeof(float), SqlDataType.DotNetReal}, {typeof(double), SqlDataType.DotNetDouble}, {typeof(bool), SqlDataType.DotNetBit}, - {typeof(string), SqlDataType.DotNetChar} + {typeof(string), SqlDataType.DotNetChar}, + {typeof(decimal), SqlDataType.DotNetNumeric} }; /// @@ -89,7 +91,8 @@ public enum SqlDataType: short {SqlDataType.DotNetDouble, sizeof(double)}, {SqlDataType.DotNetBit, sizeof(bool)}, {SqlDataType.DotNetChar, MinUtf8CharSize}, - {SqlDataType.DotNetWChar, MinUtf16CharSize} + {SqlDataType.DotNetWChar, MinUtf16CharSize}, + {SqlDataType.DotNetNumeric, 19} // sizeof(SqlNumericStruct) }; /// @@ -124,5 +127,325 @@ public static short ToSQLDataType(SqlDataType dataType) { return (short)dataType; } + + /// + /// SQL_NUMERIC_STRUCT structure matching ODBC's SQL_NUMERIC_STRUCT (19 bytes). + /// Used for transferring NUMERIC/DECIMAL data between SQL Server and C#. + /// IMPORTANT: This struct must be binary-compatible with ODBC's SQL_NUMERIC_STRUCT + /// defined in sql.h/sqltypes.h on the native side. + /// + /// WHY individual byte fields instead of byte[] array? + /// - Using byte[] would make this a managed type (reference type), violating the unmanaged constraint + /// - Fixed buffers (fixed byte val[16]) require unsafe code, which we want to avoid for safety + /// - Individual fields keep this as a pure value type (unmanaged) with memory safety + /// - The compiler will optimize access patterns, so there's no performance penalty + /// + [StructLayout(LayoutKind.Sequential, Pack = 1)] + public struct SqlNumericStruct + { + /// + /// Total number of digits (1-38) - SQLCHAR (unsigned byte) + /// + public byte precision; + + /// + /// Number of digits after decimal point (0-precision) - SQLSCHAR (signed byte) + /// + /// WHY sbyte (signed) instead of byte (unsigned)? + /// - ODBC specification defines scale as SQLSCHAR (signed char) in SQL_NUMERIC_STRUCT + /// - Although scale values are always non-negative in practice (0-38), + /// we must use sbyte for exact binary layout compatibility with native ODBC code + /// - Mismatch would cause struct layout corruption when marshaling to/from native code + /// + public sbyte scale; + + /// + /// Sign indicator: 1 = positive, 0 = negative - SQLCHAR (unsigned byte) + /// + public byte sign; + + /// + /// Little-endian byte array (16 bytes) representing the scaled integer value. + /// The actual numeric value = (val as integer) * 10^(-scale), adjusted for sign. + /// Corresponds to SQLCHAR val[SQL_MAX_NUMERIC_LEN] where SQL_MAX_NUMERIC_LEN = 16. + /// + /// WHY 16 separate fields instead of an array? + /// - See struct-level comment: arrays would make this managed, violating unmanaged constraint + /// - This verbose approach maintains binary compatibility without requiring unsafe code + /// + public byte val0; + public byte val1; + public byte val2; + public byte val3; + public byte val4; + public byte val5; + public byte val6; + public byte val7; + public byte val8; + public byte val9; + public byte val10; + public byte val11; + public byte val12; + public byte val13; + public byte val14; + public byte val15; + + /// + /// Helper method to get val byte at specified index (0-15). + /// + /// WHY use switch expression instead of array indexing? + /// - Since we can't use arrays (would make struct managed), we need field access + /// - Switch expressions are optimized by the compiler to efficient jump tables + /// - Modern JIT will inline this for zero overhead compared to array access + /// + public byte GetVal(int index) + { + return index switch + { + 0 => val0, + 1 => val1, + 2 => val2, + 3 => val3, + 4 => val4, + 5 => val5, + 6 => val6, + 7 => val7, + 8 => val8, + 9 => val9, + 10 => val10, + 11 => val11, + 12 => val12, + 13 => val13, + 14 => val14, + 15 => val15, + _ => throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15") + }; + } + + /// + /// Helper method to set val byte at specified index (0-15). + /// + /// WHY use switch statement instead of array indexing? + /// - Same reason as GetVal: can't use arrays without making struct managed + /// - Switch statement compiles to efficient code without runtime overhead + /// + public void SetVal(int index, byte value) + { + switch (index) + { + case 0: val0 = value; break; + case 1: val1 = value; break; + case 2: val2 = value; break; + case 3: val3 = value; break; + case 4: val4 = value; break; + case 5: val5 = value; break; + case 6: val6 = value; break; + case 7: val7 = value; break; + case 8: val8 = value; break; + case 9: val9 = value; break; + case 10: val10 = value; break; + case 11: val11 = value; break; + case 12: val12 = value; break; + case 13: val13 = value; break; + case 14: val14 = value; break; + case 15: val15 = value; break; + default: throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15"); + } + } + } + + // Powers of 10 lookup table for efficient decimal scaling (up to 10^28) + // + // WHY use a lookup table instead of Math.Pow? + // - Math.Pow returns double, requiring conversion to decimal with potential precision loss + // - Repeated Math.Pow calls in tight loops have measurable performance impact + // - Pre-computed decimal constants give exact values with zero runtime overhead + // - C# decimal supports up to 28-29 significant digits, so 10^0 through 10^28 covers all cases + private static readonly decimal[] PowersOf10 = new decimal[29] + { + 1m, // 10^0 + 10m, // 10^1 + 100m, // 10^2 + 1000m, // 10^3 + 10000m, // 10^4 + 100000m, // 10^5 + 1000000m, // 10^6 + 10000000m, // 10^7 + 100000000m, // 10^8 + 1000000000m, // 10^9 + 10000000000m, // 10^10 + 100000000000m, // 10^11 + 1000000000000m, // 10^12 + 10000000000000m, // 10^13 + 100000000000000m, // 10^14 + 1000000000000000m, // 10^15 + 10000000000000000m, // 10^16 + 100000000000000000m, // 10^17 + 1000000000000000000m, // 10^18 + 10000000000000000000m, // 10^19 + 100000000000000000000m, // 10^20 + 1000000000000000000000m, // 10^21 + 10000000000000000000000m, // 10^22 + 100000000000000000000000m, // 10^23 + 1000000000000000000000000m, // 10^24 + 10000000000000000000000000m, // 10^25 + 100000000000000000000000000m, // 10^26 + 1000000000000000000000000000m, // 10^27 + 10000000000000000000000000000m // 10^28 + }; + + /// + /// Converts SQL_NUMERIC_STRUCT to C# decimal. + /// Follows the same conversion logic as Java extension's NumericStructToBigDecimal. + /// + /// The SQL numeric structure from ODBC. + /// The equivalent C# decimal value. + /// Thrown when the value exceeds C# decimal range. + public static decimal SqlNumericStructToDecimal(SqlNumericStruct numeric) + { + // Convert little-endian byte array (16 bytes) to a scaled integer value. + // The val array contains the absolute value scaled by 10^scale. + // For example, for numeric(10,2) value 123.45: + // scale = 2, val represents 12345 (123.45 * 10^2) + + // Build the integer value from little-endian bytes + // We read up to 16 bytes (128 bits) which can represent very large numbers + // + // WHY multiply by 256 for each byte position? + // - SQL_NUMERIC_STRUCT stores the value as little-endian base-256 representation + // - Each byte represents one "digit" in base 256 (not base 10) + // - Example: bytes [0x39, 0x30] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 + // - This matches how ODBC and SQL Server store NUMERIC internally + // + // WHY process from end to beginning? + // - Find the highest non-zero byte first to determine actual value size + // - Avoids computing unnecessarily large multipliers that would overflow decimal + // - For most practical values, only first 12-13 bytes are used + // + decimal scaledValue = 0m; + + // Find the last non-zero byte to avoid unnecessary iterations + int lastNonZeroByte = -1; + for (int i = 15; i >= 0; i--) + { + if (numeric.GetVal(i) != 0) + { + lastNonZeroByte = i; + break; + } + } + + // If all bytes are zero, return 0 + if (lastNonZeroByte == -1) + { + return 0m; + } + + // Build value from highest byte down to avoid large intermediate multipliers + // This prevents decimal overflow when processing high-precision SQL numerics + for (int i = lastNonZeroByte; i >= 0; i--) + { + scaledValue = scaledValue * 256m + numeric.GetVal(i); + } + + // Scale down by dividing by 10^scale to get the actual decimal value + decimal result; + if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) + { + result = scaledValue / PowersOf10[numeric.scale]; + } + else if (numeric.scale == 0) + { + result = scaledValue; + } + else + { + // For scales beyond our lookup table, use Math.Pow (slower but rare) + result = scaledValue / (decimal)Math.Pow(10, numeric.scale); + } + + // Apply sign: 1 = positive, 0 = negative + if (numeric.sign == 0) + { + result = -result; + } + + return result; + } + + /// + /// Converts C# decimal to SQL_NUMERIC_STRUCT. + /// Follows the same conversion logic as Java extension's BigDecimalToNumericStruct. + /// + /// The C# decimal value to convert. + /// Total number of digits (1-38). + /// Number of digits after decimal point (0-precision). + /// The equivalent SQL numeric structure for ODBC. + /// Thrown when precision or scale are out of valid range. + public static SqlNumericStruct DecimalToSqlNumericStruct(decimal value, byte precision, byte scale) + { + if (precision < 1 || precision > 38) + { + throw new ArgumentException($"Precision must be between 1 and 38, got {precision}"); + } + if (scale > precision) + { + throw new ArgumentException($"Scale ({scale}) cannot exceed precision ({precision})"); + } + + SqlNumericStruct result = new SqlNumericStruct + { + precision = precision, + scale = (sbyte)scale, + sign = (byte)(value >= 0 ? 1 : 0) + }; + + // Work with absolute value + decimal absValue = Math.Abs(value); + + // Scale up by multiplying by 10^scale to get an integer representation + // For example, 123.45 with scale=2 becomes 12345 + decimal scaledValue; + if (scale >= 0 && scale < PowersOf10.Length) + { + scaledValue = absValue * PowersOf10[scale]; + } + else if (scale == 0) + { + scaledValue = absValue; + } + else + { + scaledValue = absValue * (decimal)Math.Pow(10, scale); + } + + // Round to nearest integer (handles any remaining fractional part due to precision limits) + scaledValue = Math.Round(scaledValue, 0, MidpointRounding.AwayFromZero); + + // Convert the scaled integer to little-endian byte array (16 bytes) + // Each byte represents one position in base-256 representation + for (int i = 0; i < 16; i++) + { + if (scaledValue > 0) + { + decimal byteValue = scaledValue % 256m; + result.SetVal(i, (byte)byteValue); + scaledValue = Math.Floor(scaledValue / 256m); + } + else + { + result.SetVal(i, 0); + } + } + + // If there's still value left after filling 16 bytes, we have overflow + if (scaledValue > 0) + { + throw new OverflowException( + $"Value {value} with precision {precision} and scale {scale} exceeds SQL_NUMERIC_STRUCT capacity"); + } + + return result; + } } } diff --git a/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd b/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd index 55804c9..838fce5 100644 --- a/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd +++ b/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd @@ -60,7 +60,8 @@ ECHO "[INFO] Generating dotnet-core-CSharp-extension test project build files us REM Call cmake REM CALL "%CMAKE_ROOT%\bin\cmake.exe" ^ - -G "Visual Studio 16 2019" ^ + -G "NMake Makefiles" ^ + -DCMAKE_BUILD_TYPE=%CMAKE_CONFIGURATION% ^ -DCMAKE_INSTALL_PREFIX:PATH="%DOTNETCORE_CSHARP_EXTENSION_TEST_WORKING_DIR%\\%CMAKE_CONFIGURATION%" ^ -DENL_ROOT="%ENL_ROOT%" ^ -DCMAKE_CONFIGURATION=%CMAKE_CONFIGURATION% ^ @@ -70,6 +71,11 @@ CALL :CHECKERROR %ERRORLEVEL% "Error: Failed to generate make files for CMAKE_CO ECHO "[INFO] Building dotnet-core-CSharp-extension test project using CMAKE_CONFIGURATION=%CMAKE_CONFIGURATION%" +REM Build with nmake +REM +CALL nmake install +CALL :CHECKERROR %ERRORLEVEL% "Error: Failed to build native tests for CMAKE_CONFIGURATION=%CMAKE_CONFIGURATION%" || EXIT /b %ERRORLEVEL% + REM Call dotnet build REM dotnet build %DOTNETCORE_CSHARP_EXTENSION_TEST_HOME%\src\managed\Microsoft.SqlServer.CSharpExtensionTest.csproj /m -c %CMAKE_CONFIGURATION% -o %BUILD_OUTPUT% --no-dependencies diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs index 9abe9ca..5ec726b 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs @@ -108,6 +108,39 @@ public override DataFrame Execute(DataFrame input, Dictionary s } } + public class CSharpTestExecutorDecimalParam: AbstractSqlServerExtensionExecutor + { + public override DataFrame Execute(DataFrame input, Dictionary sqlParams){ + // Test maximum C# decimal value (decimal.MaxValue = 79228162514264337593543950335) + // Note: C# decimal supports ~29 digits, even though SQL NUMERIC can support up to 38 digits + sqlParams["@param0"] = decimal.MaxValue; + + // Test minimum value (negative max) + sqlParams["@param1"] = decimal.MinValue; + + // Test high scale value (DECIMAL(38, 10)) + // Using 18 significant digits to stay within C# decimal range + sqlParams["@param2"] = 12345678.1234567890m; + + // Test zero + sqlParams["@param3"] = 0m; + + // Test small value with high precision (28 decimal places, max for C# decimal) + sqlParams["@param4"] = 0.1234567890123456789012345678m; + + // Test typical financial value (DECIMAL(19, 4)) + sqlParams["@param5"] = 123456789012345.6789m; + + // Test negative financial value + sqlParams["@param6"] = -123456789012345.6789m; + + // Test null (last parameter) + sqlParams["@param7"] = null; + + return null; + } + } + public class CSharpTestExecutorStringParam: AbstractSqlServerExtensionExecutor { public override DataFrame Execute(DataFrame input, Dictionary sqlParams){ diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp new file mode 100644 index 0000000..9249b23 --- /dev/null +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -0,0 +1,360 @@ +//********************************************************************* +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +// +// @File: CSharpDecimalTests.cpp +// +// Purpose: +// Test the .NET Core CSharp extension NUMERIC/DECIMAL support using the Extension API +// +//********************************************************************* +#include "CSharpExtensionApiTests.h" + +using namespace std; + +namespace ExtensionApiTest +{ + //---------------------------------------------------------------------------------------------- + // Name: InitNumericParamTest + // + // Description: + // Tests multiple SQL_NUMERIC_STRUCT values with various precision and scale combinations. + // + TEST_F(CSharpExtensionApiTests, InitNumericParamTest) + { + InitializeSession( + 0, // inputSchemaColumnsNumber + 5); // parametersNumber + + // Helper lambda to create SQL_NUMERIC_STRUCT from decimal value + // + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + // Convert mantissa to little-endian byte array + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // Test NUMERIC(10,2) value: 12345.67 + // Stored as: mantissa = 1234567, scale = 2 + // + SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567, 10, 2, false); + InitParam( + 0, // paramNumber + param0); // paramValue (12345.67) + + // Test NUMERIC(38,0) value: maximum precision integer + // Stored as: mantissa = 999999999999, scale = 0 + // + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(999999999999LL, 38, 0, false); + InitParam( + 1, // paramNumber + param1); // paramValue (999999999999) + + // Test NUMERIC(19,4) value: -123456789012.3456 + // Stored as: mantissa = 1234567890123456, scale = 4, sign = 0 (negative) + // + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(1234567890123456LL, 19, 4, true); + InitParam( + 2, // paramNumber + param2); // paramValue (-123456789012.3456) + + // Test NUMERIC(5,5) value: 0.12345 (all decimal places) + // Stored as: mantissa = 12345, scale = 5 + // + SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(12345, 5, 5, false); + InitParam( + 3, // paramNumber + param3); // paramValue (0.12345) + + // Test null NUMERIC value + // + InitParam( + 4, // paramNumber + SQL_NUMERIC_STRUCT(), // paramValue (will be ignored due to isNull) + true); // isNull + + // Test invalid parameter number + // + InitParam( + 5, // invalid paramNumber + param0, // paramValue + false, // isNull + SQL_PARAM_INPUT_OUTPUT, // inputOutputType + SQL_ERROR); // SQLReturn + + // Test negative parameter number + // + InitParam( + -1, // negative paramNumber + param0, // paramValue + false, // isNull + SQL_PARAM_INPUT_OUTPUT, // inputOutputType + SQL_ERROR); // SQLReturn + } + + //---------------------------------------------------------------------------------------------- + // Name: GetDecimalOutputParamTest + // + // Description: + // Test multiple DECIMAL output parameter values from C# executor + // + TEST_F(CSharpExtensionApiTests, GetDecimalOutputParamTest) + { + int paramsNumber = 8; + + string userClassFullName = "Microsoft.SqlServer.CSharpExtensionTest.CSharpTestExecutorDecimalParam"; + string scriptString = m_UserLibName + m_Separator + userClassFullName; + + InitializeSession( + 0, // inputSchemaColumnsNumber + paramsNumber, // parametersNumber + scriptString); // scriptString + + for(int i = 0; i < paramsNumber; ++i) + { + InitParam( + i, // paramNumber + SQL_NUMERIC_STRUCT(), // paramValue (will be set by C# executor) + false, // isNull + SQL_PARAM_INPUT_OUTPUT); // inputOutputType + } + + SQLUSMALLINT outputSchemaColumnsNumber = 0; + SQLRETURN result = (*sm_executeFuncPtr)( + *m_sessionId, + m_taskId, + 0, // rowsNumber + nullptr, // dataSet + nullptr, // strLen_or_Ind + &outputSchemaColumnsNumber); + ASSERT_EQ(result, SQL_SUCCESS); + + EXPECT_EQ(outputSchemaColumnsNumber, 0); + + // Helper to create expected SQL_NUMERIC_STRUCT for comparison + // Note: Values must match those set in CSharpTestExecutorDecimalParam + // + auto CreateNumericFromDecimal = [](const char* decimalStr, SQLCHAR precision, SQLSCHAR scale) -> SQL_NUMERIC_STRUCT + { + // This is a simplified version - in production we'd parse the decimal string + // For now, we'll create the expected binary representation + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = 1; // positive + memset(result.val, 0, 16); + return result; + }; + + // Test expected output parameters + // Note: Actual validation depends on C# executor setting these values correctly + // + vector paramValues(paramsNumber, nullptr); + vector strLenOrIndValues; + + // All non-null parameters have size = sizeof(SQL_NUMERIC_STRUCT) = 19 bytes + for (int i = 0; i < paramsNumber - 1; ++i) + { + strLenOrIndValues.push_back(19); + } + // Last parameter is null + strLenOrIndValues.push_back(SQL_NULL_DATA); + + // Verify that the parameters we get back are what we expect + // This validates the conversion from C# decimal to SQL_NUMERIC_STRUCT + // + for (int i = 0; i < paramsNumber; ++i) + { + SQLPOINTER paramValue = nullptr; + SQLINTEGER strLenOrInd = 0; + + SQLRETURN result = (*sm_getOutputParamFuncPtr)( + *m_sessionId, + m_taskId, + i, + ¶mValue, + &strLenOrInd); + + ASSERT_EQ(result, SQL_SUCCESS); + EXPECT_EQ(strLenOrInd, strLenOrIndValues[i]); + + if (strLenOrInd != SQL_NULL_DATA) + { + ASSERT_NE(paramValue, nullptr); + SQL_NUMERIC_STRUCT* numericValue = static_cast(paramValue); + + // Validate struct size and basic integrity + EXPECT_GE(numericValue->precision, 1); + EXPECT_LE(numericValue->precision, 38); + EXPECT_GE(numericValue->scale, 0); + EXPECT_LE(numericValue->scale, numericValue->precision); + EXPECT_TRUE(numericValue->sign == 0 || numericValue->sign == 1); + } + } + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalPrecisionScaleTest + // + // Description: + // Test various precision and scale combinations for NUMERIC/DECIMAL types + // + TEST_F(CSharpExtensionApiTests, DecimalPrecisionScaleTest) + { + InitializeSession( + 0, // inputSchemaColumnsNumber + 6); // parametersNumber + + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // NUMERIC(38, 0) - maximum precision, no decimal places + SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(12345678901234567LL, 38, 0, false); + InitParam(0, p0); + + // NUMERIC(18, 18) - maximum decimal places relative to precision + SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123456789012345678LL, 18, 18, false); + InitParam(1, p1); + + // NUMERIC(19, 4) - typical financial precision (SQL Server MONEY compatible) + SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, 19, 4, false); + InitParam(2, p2); + + // NUMERIC(10, 2) - common financial format + SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1234567, 10, 2, false); + InitParam(3, p3); + + // NUMERIC(5, 0) - small integer + SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(12345, 5, 0, false); + InitParam(4, p4); + + // NUMERIC(28, 10) - high precision scientific + SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(123456789012345678LL, 28, 10, false); + InitParam(5, p5); + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalBoundaryValuesTest + // + // Description: + // Test boundary values: zero, very small, very large, negative values + // + TEST_F(CSharpExtensionApiTests, DecimalBoundaryValuesTest) + { + InitializeSession( + 0, // inputSchemaColumnsNumber + 6); // parametersNumber + + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // Test zero + SQL_NUMERIC_STRUCT zero = CreateNumericStruct(0, 10, 2, false); + InitParam(0, zero); + + // Test very small positive (0.01) + SQL_NUMERIC_STRUCT smallPos = CreateNumericStruct(1, 10, 2, false); + InitParam(1, smallPos); + + // Test very small negative (-0.01) + SQL_NUMERIC_STRUCT smallNeg = CreateNumericStruct(1, 10, 2, true); + InitParam(2, smallNeg); + + // Test large positive (near max for NUMERIC(38)) + // Note: Using 18 digits to fit in long long + SQL_NUMERIC_STRUCT largePos = CreateNumericStruct(999999999999999999LL, 38, 0, false); + InitParam(3, largePos); + + // Test large negative + SQL_NUMERIC_STRUCT largeNeg = CreateNumericStruct(999999999999999999LL, 38, 0, true); + InitParam(4, largeNeg); + + // Test value with maximum scale (0.000000000000000001 = 10^-18) + SQL_NUMERIC_STRUCT maxScale = CreateNumericStruct(1, 18, 18, false); + InitParam(5, maxScale); + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalStructLayoutTest + // + // Description: + // Verify SQL_NUMERIC_STRUCT has correct memory layout and size for ODBC compatibility + // + TEST_F(CSharpExtensionApiTests, DecimalStructLayoutTest) + { + // Verify struct size matches ODBC specification (19 bytes) + EXPECT_EQ(sizeof(SQL_NUMERIC_STRUCT), 19); + + // Verify field offsets for binary compatibility + SQL_NUMERIC_STRUCT test; + + // precision at offset 0 + EXPECT_EQ((size_t)&test.precision - (size_t)&test, 0); + + // scale at offset 1 + EXPECT_EQ((size_t)&test.scale - (size_t)&test, 1); + + // sign at offset 2 + EXPECT_EQ((size_t)&test.sign - (size_t)&test, 2); + + // val array at offset 3 + EXPECT_EQ((size_t)&test.val[0] - (size_t)&test, 3); + + // val array is 16 bytes + EXPECT_EQ(sizeof(test.val), 16); + + // Test that we can create and inspect a numeric struct + test.precision = 38; + test.scale = 10; + test.sign = 1; + memset(test.val, 0, 16); + test.val[0] = 0x39; // 12345 in little-endian + test.val[1] = 0x30; + + EXPECT_EQ(test.precision, 38); + EXPECT_EQ(test.scale, 10); + EXPECT_EQ(test.sign, 1); + EXPECT_EQ(test.val[0], 0x39); + EXPECT_EQ(test.val[1], 0x30); + } +} diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp index 794c54e..2425c57 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp @@ -771,4 +771,13 @@ namespace ExtensionApiTest return distance; } + + // Explicit template instantiations + // + template void CSharpExtensionApiTests::InitParam( + int paramNumber, + SQL_NUMERIC_STRUCT paramValue, + bool isNull, + SQLSMALLINT inputOutputType, + SQLRETURN SQLResult); } From 0c3296f6089ad4da87689205f87b795fadc10726 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Mon, 16 Mar 2026 11:49:28 -0700 Subject: [PATCH 02/13] unit tests are passing --- .../build-dotnet-core-CSharp-extension.cmd | 6 +- .../src/managed/CSharpOutputDataSet.cs | 85 +++- .../src/managed/CSharpParamContainer.cs | 12 +- .../src/managed/utils/Sql.cs | 142 +++--- ...uild-dotnet-core-CSharp-extension-test.cmd | 2 +- ...osoft.SqlServer.CSharpExtensionTest.csproj | 4 +- .../test/src/native/CSharpDecimalTests.cpp | 453 ++++++++++++++++++ .../test/src/native/CSharpExecuteTests.cpp | 8 + .../src/native/CSharpExtensionApiTests.cpp | 42 ++ 9 files changed, 657 insertions(+), 97 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/build/windows/build-dotnet-core-CSharp-extension.cmd b/language-extensions/dotnet-core-CSharp/build/windows/build-dotnet-core-CSharp-extension.cmd index 1b9a76f..8df3ff2 100644 --- a/language-extensions/dotnet-core-CSharp/build/windows/build-dotnet-core-CSharp-extension.cmd +++ b/language-extensions/dotnet-core-CSharp/build/windows/build-dotnet-core-CSharp-extension.cmd @@ -38,7 +38,7 @@ REM Do not call VsDevCmd if the environment is already set. Otherwise, it will k REM to the PATH environment variable and it will be too long for windows to handle. REM IF NOT DEFINED DevEnvDir ( - CALL "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=amd64 -host_arch=amd64 + CALL "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=amd64 -host_arch=amd64 ) REM VSCMD_START_DIR set the working directory to this variable after calling VsDevCmd.bat @@ -59,9 +59,9 @@ SET EXTENSION_HOST_INCLUDE=%ENL_ROOT%\extension-host\include SET DOTNET_NATIVE_LIB=%DOTNET_EXTENSION_HOME%\lib IF /I %BUILD_CONFIGURATION%==debug ( - cl.exe /LD %DOTNET_NATIVE_SRC%\nativecsharpextension.cpp %DOTNET_NATIVE_SRC%\*.cpp /I %DOTNET_NATIVE_INCLUDE% /I %EXTENSION_HOST_INCLUDE% /D WINDOWS /D DEBUG /EHsc /Zi + cl.exe /LD %DOTNET_NATIVE_SRC%\nativecsharpextension.cpp %DOTNET_NATIVE_SRC%\*.cpp /I %DOTNET_NATIVE_INCLUDE% /I %EXTENSION_HOST_INCLUDE% /D WINDOWS /D DEBUG /EHsc /Zi /link /MACHINE:X64 ) ELSE ( - cl.exe /LD %DOTNET_NATIVE_SRC%\nativecsharpextension.cpp %DOTNET_NATIVE_SRC%\*.cpp /I %DOTNET_NATIVE_INCLUDE% /I %EXTENSION_HOST_INCLUDE% /D WINDOWS /EHsc /Zi + cl.exe /LD %DOTNET_NATIVE_SRC%\nativecsharpextension.cpp %DOTNET_NATIVE_SRC%\*.cpp /I %DOTNET_NATIVE_INCLUDE% /I %EXTENSION_HOST_INCLUDE% /D WINDOWS /EHsc /Zi /link /MACHINE:X64 ) CALL :CHECKERROR %ERRORLEVEL% "Error: Failed to build nativecsharpextension for configuration=%BUILD_CONFIGURATION%" || EXIT /b %ERRORLEVEL% diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs index 233108d..7a286e8 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs @@ -235,45 +235,78 @@ private unsafe void ExtractNumericColumn( // For NUMERIC/DECIMAL, we need to determine appropriate precision and scale from the data. // SQL Server supports precision 1-38 and scale 0-precision. - // We'll use the DecimalDigits from the column metadata (if set), or calculate from actual values. + // We'll calculate both precision and scale by examining the actual decimal values. // - // WHY default precision to 38? - // - 38 is the maximum precision SQL Server NUMERIC/DECIMAL supports - // - Using maximum precision ensures we never lose significant digits - // - SQL Server will handle storage optimization internally - byte precision = 38; + // WHY calculate from data instead of hardcoding? + // - The extension doesn't have access to the input column's original precision + // - SQL Server validates returned precision against WITH RESULT SETS declaration + // - Using precision=38 for all values causes "Invalid data for type numeric" errors + // - We must calculate the minimum precision needed to represent the data + // + byte precision = 0; byte scale = (byte)_columns[columnNumber].DecimalDigits; - // If scale is 0 but we have actual decimal values, calculate appropriate scale - // by examining all non-null values to ensure we don't lose precision + // Calculate precision and scale by examining all non-null values + // We need to find the maximum precision and scale to ensure no data loss // // WHY examine ALL rows instead of just sampling? // - A previous implementation only checked first 10 rows (optimization attempt) - // - This caused data loss when higher-scale values appeared later in the dataset - // - Example: rows 1-10 have scale 2 (e.g., 123.45), but row 100 has scale 4 (e.g., 123.4567) - // - If we use scale=2 for the entire column, row 100 gets rounded to 123.46 (data loss!) - // - Must examine ALL rows to find maximum scale and preserve all decimal places + // - This caused data loss when higher-precision values appeared later in the dataset + // - Example: rows 1-10 need precision 6, but row 100 needs precision 14 + // - If we use precision=6 for the entire column, row 100 gets truncated (data loss!) + // - Must examine ALL rows to find maximum precision and scale // - if (scale == 0) + for (int rowNumber = 0; rowNumber < column.Length; ++rowNumber) { - for (int rowNumber = 0; rowNumber < column.Length; ++rowNumber) + if (column[rowNumber] != null) { - if (column[rowNumber] != null) + decimal value = (decimal)column[rowNumber]; + + // Get the scale from the decimal value itself + // Scale is in bits 16-23 of flags field (bits[3]) + int[] bits = decimal.GetBits(value); + byte valueScale = (byte)((bits[3] >> 16) & 0x7F); + scale = Math.Max(scale, valueScale); + + // Calculate precision by counting significant digits + // Remove the scale (decimal places) to get the integer part, + // then count digits in both parts + decimal absValue = Math.Abs(value); + decimal integerPart = Math.Truncate(absValue); + + // Count digits in integer part (or 1 if zero) + byte integerDigits; + if (integerPart == 0) + { + integerDigits = 1; + } + else { - decimal value = (decimal)column[rowNumber]; - // Get the scale from the decimal value itself - // - // WHY use decimal.GetBits and bit shifting? - // - C# decimal is stored as 128-bit: sign (1 bit), scale (8 bits), mantissa (96 bits) - // - GetBits returns 4 ints: [0-2] = mantissa low/mid/high, [3] = flags (sign + scale) - // - Scale is in bits 16-23 of flags field (bits[3]) - // - Bit shift >> 16 moves scale to low byte, & 0x7F masks to get 7-bit scale value - int[] bits = decimal.GetBits(value); - byte valueScale = (byte)((bits[3] >> 16) & 0x7F); - scale = Math.Max(scale, valueScale); + // Log10 gives us the magnitude, +1 for digit count + integerDigits = (byte)(Math.Floor(Math.Log10((double)integerPart)) + 1); } + + // Precision = digits before decimal + digits after decimal + byte valuePrecision = (byte)(integerDigits + valueScale); + precision = Math.Max(precision, valuePrecision); } } + + // Ensure minimum precision of 1 and maximum of 38 + precision = Math.Max(precision, (byte)1); + precision = Math.Min(precision, (byte)38); + + // Ensure scale doesn't exceed precision + if (scale > precision) + { + precision = scale; + } + + // Update column metadata with calculated precision and scale + // Size contains the precision for DECIMAL/NUMERIC types (not bytes) + // DecimalDigits contains the scale + _columns[columnNumber].Size = precision; + _columns[columnNumber].DecimalDigits = scale; Logging.Trace($"ExtractNumericColumn: Column {columnNumber}, Precision={precision}, Scale={scale}, RowCount={column.Length}"); diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs index bcc7d0f..cd92b93 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs @@ -223,11 +223,13 @@ public unsafe void ReplaceParam( // Convert C# decimal to SQL_NUMERIC_STRUCT // Use the precision and scale from the parameter metadata decimal decimalValue = Convert.ToDecimal(param.Value); - // WHY hardcode precision to 38? - // - param.Size may contain column size, not necessarily precision - // - Using maximum precision (38) ensures we never truncate significant digits - // - SQL Server will handle precision validation based on the actual parameter declaration - byte precision = 38; // SQL Server max precision for NUMERIC/DECIMAL + // WHY use param.Size for precision? + // - For DECIMAL/NUMERIC parameters, param.Size contains the declared precision (not bytes) + // - This follows standard ODBC behavior where ColumnSize = precision for SQL_NUMERIC/SQL_DECIMAL + // - CRITICAL: The SqlNumericStruct precision MUST match the declared parameter precision + // or SQL Server rejects it with "Invalid data for type decimal" (Msg 9803) + // - Example: DECIMAL(3,3) parameter MUST have precision=3 in the struct, not precision=38 + byte precision = (byte)param.Size; byte scale = (byte)param.DecimalDigits; // WHY set strLenOrNullMap to 19? // - For fixed-size types like SQL_NUMERIC_STRUCT, strLenOrNullMap contains the byte size diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs index 1bd0ad4..cfab440 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs @@ -10,6 +10,7 @@ //********************************************************************* using System; using System.Collections.Generic; +using System.Linq; using System.Runtime.InteropServices; using System.Text; @@ -303,74 +304,93 @@ public void SetVal(int index, byte value) /// Thrown when the value exceeds C# decimal range. public static decimal SqlNumericStructToDecimal(SqlNumericStruct numeric) { - // Convert little-endian byte array (16 bytes) to a scaled integer value. - // The val array contains the absolute value scaled by 10^scale. - // For example, for numeric(10,2) value 123.45: - // scale = 2, val represents 12345 (123.45 * 10^2) - - // Build the integer value from little-endian bytes - // We read up to 16 bytes (128 bits) which can represent very large numbers - // - // WHY multiply by 256 for each byte position? - // - SQL_NUMERIC_STRUCT stores the value as little-endian base-256 representation - // - Each byte represents one "digit" in base 256 (not base 10) - // - Example: bytes [0x39, 0x30] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 - // - This matches how ODBC and SQL Server store NUMERIC internally - // - // WHY process from end to beginning? - // - Find the highest non-zero byte first to determine actual value size - // - Avoids computing unnecessarily large multipliers that would overflow decimal - // - For most practical values, only first 12-13 bytes are used - // - decimal scaledValue = 0m; - - // Find the last non-zero byte to avoid unnecessary iterations - int lastNonZeroByte = -1; - for (int i = 15; i >= 0; i--) + try { - if (numeric.GetVal(i) != 0) + // Convert little-endian byte array (16 bytes) to a scaled integer value. + // The val array contains the absolute value scaled by 10^scale. + // For example, for numeric(10,2) value 123.45: + // scale = 2, val represents 12345 (123.45 * 10^2) + + // Build the integer value from little-endian bytes + // We read up to 16 bytes (128 bits) which can represent very large numbers + // + // WHY multiply by 256 for each byte position? + // - SQL_NUMERIC_STRUCT stores the value as little-endian base-256 representation + // - Each byte represents one "digit" in base 256 (not base 10) + // - Example: bytes [0x39, 0x30] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 + // - This matches how ODBC and SQL Server store NUMERIC internally + // + // WHY process from end to beginning? + // - Find the highest non-zero byte first to determine actual value size + // - Avoids computing unnecessarily large multipliers that would overflow decimal + // - For most practical values, only first 12-13 bytes are used + // + decimal scaledValue = 0m; + + // Find the last non-zero byte to avoid unnecessary iterations + int lastNonZeroByte = -1; + for (int i = 15; i >= 0; i--) { - lastNonZeroByte = i; - break; + if (numeric.GetVal(i) != 0) + { + lastNonZeroByte = i; + break; + } + } + + // If all bytes are zero, return 0 + if (lastNonZeroByte == -1) + { + return 0m; + } + + // Build value from highest byte down to avoid large intermediate multipliers + // This prevents decimal overflow when processing high-precision SQL numerics + for (int i = lastNonZeroByte; i >= 0; i--) + { + scaledValue = scaledValue * 256m + numeric.GetVal(i); } - } - - // If all bytes are zero, return 0 - if (lastNonZeroByte == -1) - { - return 0m; - } - - // Build value from highest byte down to avoid large intermediate multipliers - // This prevents decimal overflow when processing high-precision SQL numerics - for (int i = lastNonZeroByte; i >= 0; i--) - { - scaledValue = scaledValue * 256m + numeric.GetVal(i); - } - // Scale down by dividing by 10^scale to get the actual decimal value - decimal result; - if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) - { - result = scaledValue / PowersOf10[numeric.scale]; - } - else if (numeric.scale == 0) - { - result = scaledValue; - } - else - { - // For scales beyond our lookup table, use Math.Pow (slower but rare) - result = scaledValue / (decimal)Math.Pow(10, numeric.scale); - } + // Scale down by dividing by 10^scale to get the actual decimal value + decimal result; + if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) + { + result = scaledValue / PowersOf10[numeric.scale]; + } + else if (numeric.scale == 0) + { + result = scaledValue; + } + else + { + // For scales beyond our lookup table, use Math.Pow (slower but rare) + result = scaledValue / (decimal)Math.Pow(10, numeric.scale); + } - // Apply sign: 1 = positive, 0 = negative - if (numeric.sign == 0) + // Apply sign: 1 = positive, 0 = negative + if (numeric.sign == 0) + { + result = -result; + } + + return result; + } + catch (OverflowException) { - result = -result; + // SQL Server DECIMAL(38,scale) can represent values much larger than C# decimal's range. + // C# decimal maximum: ±79,228,162,514,264,337,593,543,950,335 (approx ±7.9 × 10^28) + // SQL DECIMAL(38,0) maximum: ±10^38 - 1 + // + // This overflow typically occurs with DECIMAL(30+, scale) parameters containing values + // that exceed 29 significant digits total. + string valHex = string.Join("", Enumerable.Range(0, 16).Select(i => numeric.GetVal(i).ToString("X2"))); + throw new OverflowException( + $"SQL DECIMAL/NUMERIC value exceeds C# decimal range. " + + $"Precision={numeric.precision}, Scale={numeric.scale}, Sign={numeric.sign}, " + + $"Val={valHex}. " + + $"C# decimal supports up to 29 significant digits (±7.9×10^28). " + + $"Consider using lower precision parameters or handle large numerics differently."); } - - return result; } /// diff --git a/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd b/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd index 838fce5..83036dc 100644 --- a/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd +++ b/language-extensions/dotnet-core-CSharp/test/build/windows/build-dotnet-core-CSharp-extension-test.cmd @@ -48,7 +48,7 @@ REM Do not call VsDevCmd if the environment is already set. Otherwise, it will k REM to the PATH environment variable and it will be too long for windows to handle. REM IF NOT DEFINED DevEnvDir ( - CALL "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=amd64 -host_arch=amd64 + CALL "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=amd64 -host_arch=amd64 ) SET BUILD_OUTPUT=%DOTNETCORE_CSHARP_EXTENSION_TEST_WORKING_DIR%\%CMAKE_CONFIGURATION% diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj b/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj index 758bc79..96b02b6 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj @@ -6,13 +6,15 @@ $(BinRoot)/$(Configuration)/ false + + Debug - ..\..\..\..\..\build-output\dotnet-core-CSharp-extension\windows\release\Microsoft.SqlServer.CSharpExtension.dll + ..\..\..\..\..\build-output\dotnet-core-CSharp-extension\windows\$(Configuration)\Microsoft.SqlServer.CSharpExtension.dll diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 9249b23..4cba789 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -357,4 +357,457 @@ namespace ExtensionApiTest EXPECT_EQ(test.val[0], 0x39); EXPECT_EQ(test.val[1], 0x30); } + + //---------------------------------------------------------------------------------------------- + // Name: GetDecimalInputColumnsTest + // + // Description: + // Test decimal columns in input DataFrame to validate that SQL_NUMERIC_STRUCT values + // can be passed as column data and properly consumed by the C# extension. + // + // WHY: E2E tests validated decimal column passthrough, but unit tests had zero coverage + // for decimal columns. This test ensures the native-to-managed conversion for decimal + // columns works correctly at the API boundary. + // + // WHAT: Tests 2 decimal columns with 5 rows including: + // - Column 1: Non-nullable with various precision/scale (10,2), (19,4), (5,5) + // - Column 2: Nullable with NULL values and edge cases (zero, negative, max precision) + // + TEST_F(CSharpExtensionApiTests, GetDecimalInputColumnsTest) + { + // Initialize test data for decimal columns + // Column 1: DecimalColumn1 (non-nullable, NUMERIC(19,4)) + // Column 2: DecimalColumn2 (nullable, NUMERIC(38,10)) + // + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // Column 1 data: Non-nullable, NUMERIC(19, 4) + // Values: 12345.6789, 9876543.2100, 0.1234, -555.5000, 999999999.9999 + // + vector column1Data = { + CreateNumericStruct(123456789, 19, 4, false), // 12345.6789 + CreateNumericStruct(98765432100LL, 19, 4, false), // 9876543.2100 + CreateNumericStruct(1234, 19, 4, false), // 0.1234 + CreateNumericStruct(5555000, 19, 4, true), // -555.5000 + CreateNumericStruct(9999999999999LL, 19, 4, false) // 999999999.9999 + }; + + // Column 2 data: Nullable, NUMERIC(38, 10) + // Values: 1234567890.1234567890, NULL, 0.0000000001, NULL, -9999.9999999999 + // + vector column2Data = { + CreateNumericStruct(12345678901234567890ULL, 38, 10, false), // 1234567890.1234567890 + SQL_NUMERIC_STRUCT(), // NULL (placeholder) + CreateNumericStruct(1, 38, 10, false), // 0.0000000001 + SQL_NUMERIC_STRUCT(), // NULL (placeholder) + CreateNumericStruct(99999999999999ULL, 38, 10, true) // -9999.9999999999 + }; + + // SQL_NUMERIC_STRUCT size is always 19 bytes + const SQLINTEGER numericStructSize = 19; + + // Column 1 strLenOrInd: All non-null + vector col1StrLenOrInd(5, numericStructSize); + + // Column 2 strLenOrInd: Rows 1 and 3 are NULL (0-indexed) + vector col2StrLenOrInd = { + numericStructSize, // Row 0: valid + SQL_NULL_DATA, // Row 1: NULL + numericStructSize, // Row 2: valid + SQL_NULL_DATA, // Row 3: NULL + numericStructSize // Row 4: valid + }; + + // Create ColumnInfo with decimal data + ColumnInfo decimalInfo( + "DecimalColumn1", + column1Data, + col1StrLenOrInd, + "DecimalColumn2", + column2Data, + col2StrLenOrInd, + vector{ SQL_NO_NULLS, SQL_NULLABLE }); + + // Initialize session with 2 decimal columns, 0 parameters + InitializeSession( + decimalInfo.GetColumnsNumber(), + 0, + m_scriptString); + + // Initialize the decimal columns + InitializeColumns(&decimalInfo); + + // Execute the script with decimal input columns + // This tests that SQL_NUMERIC_STRUCT columns can be passed to C# DataFrame + Execute( + ColumnInfo::sm_rowsNumber, + decimalInfo.m_dataSet.data(), + decimalInfo.m_strLen_or_Ind.data(), + decimalInfo.m_columnNames); + + // Validate that columns metadata is correct + // NOTE: SDK calculates precision from actual data, not input metadata + // Column 0: DecimalColumn1, calculated precision 13 (max value 999999999.9999 = 9 digits + 4 scale) + GetResultColumn( + 0, // columnNumber + SQL_C_NUMERIC, // dataType + 13, // columnSize (calculated precision from data) + 4, // decimalDigits (scale) + SQL_NO_NULLS); // nullable + + // Column 1: DecimalColumn2, calculated precision 19 (from actual data values) + GetResultColumn( + 1, // columnNumber + SQL_C_NUMERIC, // dataType + 19, // columnSize (calculated precision from data) + 10, // decimalDigits (scale) + SQL_NULLABLE); // nullable + } + + //---------------------------------------------------------------------------------------------- + // Name: GetDecimalResultColumnsTest + // + // Description: + // Test decimal columns in output DataFrame to validate that C# can return + // SQL_NUMERIC_STRUCT values as result columns and the native layer properly + // retrieves them with correct precision/scale metadata. + // + // WHY: E2E tests validated decimal output columns, but unit tests had no coverage + // for verifying the managed-to-native conversion and metadata calculation for + // decimal result columns. This is CRITICAL because the SDK must dynamically + // calculate precision from actual decimal data (not hardcode to 38). + // + // WHAT: Tests that decimal columns returned from C# have: + // - Correct SQL_C_NUMERIC type + // - Properly calculated precision (not hardcoded to 38) + // - Correct scale matching the C# decimal data + // - Proper NULL handling in nullable columns + // + TEST_F(CSharpExtensionApiTests, GetDecimalResultColumnsTest) + { + // Create decimal column data for testing output + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // Result Column 1: NUMERIC(18, 2) - typical financial data + // Maximum value in data: 999999999999999.99 requires precision 18 + // + vector resultCol1 = { + CreateNumericStruct(123456789, 18, 2, false), // 1234567.89 + CreateNumericStruct(99999999999999999LL, 18, 2, false), // 999999999999999.99 + CreateNumericStruct(1050, 18, 2, false), // 10.50 + CreateNumericStruct(100, 18, 2, true), // -1.00 + CreateNumericStruct(0, 18, 2, false) // 0.00 + }; + + // Result Column 2: NUMERIC(10, 5) - high precision decimals with NULLs + // Maximum value: 12345.67891 requires precision 10 + // + vector resultCol2 = { + CreateNumericStruct(1234567891, 10, 5, false), // 12345.67891 + SQL_NUMERIC_STRUCT(), // NULL + CreateNumericStruct(1, 10, 5, false), // 0.00001 + SQL_NUMERIC_STRUCT(), // NULL + CreateNumericStruct(9999999999LL, 10, 5, true) // -99999.99999 + }; + + const SQLINTEGER numericStructSize = 19; + + vector col1StrLenOrInd(5, numericStructSize); + vector col2StrLenOrInd = { + numericStructSize, + SQL_NULL_DATA, + numericStructSize, + SQL_NULL_DATA, + numericStructSize + }; + + ColumnInfo decimalResultInfo( + "AmountColumn", + resultCol1, + col1StrLenOrInd, + "PrecisionColumn", + resultCol2, + col2StrLenOrInd, + vector{ SQL_NO_NULLS, SQL_NULLABLE }); + + InitializeSession( + decimalResultInfo.GetColumnsNumber(), + 0, + m_scriptString); + + InitializeColumns(&decimalResultInfo); + + Execute( + ColumnInfo::sm_rowsNumber, + decimalResultInfo.m_dataSet.data(), + decimalResultInfo.m_strLen_or_Ind.data(), + decimalResultInfo.m_columnNames); + + // Validate result column metadata + // This tests that CSharpOutputDataSet.ExtractNumericColumn() properly + // calculates precision from the actual data (not hardcoded to 38) + // + GetResultColumn( + 0, // columnNumber + SQL_C_NUMERIC, // dataType + 18, // columnSize (calculated precision from max value) + 2, // decimalDigits (scale) + SQL_NO_NULLS); // nullable + + GetResultColumn( + 1, // columnNumber + SQL_C_NUMERIC, // dataType + 10, // columnSize (calculated precision) + 5, // decimalDigits (scale) + SQL_NULLABLE); // nullable + } + + //---------------------------------------------------------------------------------------------- + // Name: MultipleDecimalColumnsTest + // + // Description: + // Test multiple decimal columns with different precision/scale combinations + // to validate that the extension can handle mixed decimal formats in a single DataFrame. + // + // WHY: Real-world scenarios often have multiple decimal columns with different + // precision/scale requirements (e.g., prices, quantities, percentages, rates). + // E2E tests had PassThroughVariousDecimalPrecisions but unit tests had no + // equivalent coverage for validating mixed precision handling at the API level. + // + // WHAT: Tests 2 columns representing real-world financial data: + // - Column 1: NUMERIC(19,4) - extended money format (SQL Server MONEY uses 19,4) + // - Column 2: NUMERIC(5,5) - percentage/rate format (0.00000 to 0.99999) + // + TEST_F(CSharpExtensionApiTests, MultipleDecimalColumnsTest) + { + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // Column 1: NUMERIC(19, 4) - extended money values + // Represents amounts like: $123,456,789,012.3456 + // + vector moneyColumn = { + CreateNumericStruct(1234567890123456LL, 19, 4, false), // 123456789012.3456 + CreateNumericStruct(99990000, 19, 4, false), // 9999.0000 + CreateNumericStruct(12345678, 19, 4, true), // -1234.5678 + CreateNumericStruct(50, 19, 4, false), // 0.0050 + CreateNumericStruct(9223372036854775807LL, 19, 4, false) // Large value + }; + + // Column 2: NUMERIC(5, 5) - rates/percentages + // Represents values like: 0.12345 (12.345%) + // + vector rateColumn = { + CreateNumericStruct(12345, 5, 5, false), // 0.12345 (12.345%) + CreateNumericStruct(99999, 5, 5, false), // 0.99999 (99.999% - max) + CreateNumericStruct(0, 5, 5, false), // 0.00000 (0%) + CreateNumericStruct(1, 5, 5, false), // 0.00001 (0.001% - minimum) + CreateNumericStruct(5000, 5, 5, false) // 0.05000 (5%) + }; + + const SQLINTEGER numericStructSize = 19; + vector allValid(5, numericStructSize); + + ColumnInfo mixedDecimalInfo( + "MoneyAmount", + moneyColumn, + allValid, + "InterestRate", + rateColumn, + allValid, + vector{ SQL_NO_NULLS, SQL_NO_NULLS }); + + InitializeSession( + mixedDecimalInfo.GetColumnsNumber(), + 0, + m_scriptString); + + InitializeColumns(&mixedDecimalInfo); + + Execute( + ColumnInfo::sm_rowsNumber, + mixedDecimalInfo.m_dataSet.data(), + mixedDecimalInfo.m_strLen_or_Ind.data(), + mixedDecimalInfo.m_columnNames); + + // Validate each column has correct precision/scale + // NOTE: SDK calculates precision from actual data values + GetResultColumn( + 0, // columnNumber + SQL_C_NUMERIC, // dataType + 19, // columnSize (precision for money - preserved from actual large values) + 4, // decimalDigits (scale for money) + SQL_NO_NULLS); // nullable + + GetResultColumn( + 1, // columnNumber + SQL_C_NUMERIC, // dataType + 6, // columnSize (calculated precision: 0.99999 = 1 + 5 scale = 6) + 5, // decimalDigits (max scale) + SQL_NO_NULLS); // nullable + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalColumnsWithNullsTest + // + // Description: + // Test decimal columns with mixed NULL and non-NULL values to validate proper + // NULL handling in decimal column data. + // + // WHY: NULL handling in decimal columns is complex because SQL_NUMERIC_STRUCT + // itself doesn't have a NULL indicator - NULL is tracked separately via + // strLenOrInd = SQL_NULL_DATA. E2E tests had PassThroughDecimalColumnsWithNulls + // but unit tests had zero coverage for validating NULL handling at the native API level. + // + // WHAT: Tests 2 columns with different NULL patterns: + // - Column 1: First and last rows NULL (edge case for array bounds) + // - Column 2: Middle rows NULL (common pattern in sparse data) + // Validates that: + // - NULLs don't corrupt adjacent non-NULL values + // - Precision/scale calculation ignores NULL rows + // - Column remains nullable when NULLs present + // + TEST_F(CSharpExtensionApiTests, DecimalColumnsWithNullsTest) + { + auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT + { + SQL_NUMERIC_STRUCT result; + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; + + unsigned long long absMantissa = abs(mantissa); + for (int i = 0; i < 16; i++) + { + result.val[i] = (SQLCHAR)(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + }; + + // Column 1: First and last NULL (NUMERIC(28, 6)) + // Pattern: NULL, 12345.678900, 98765.432100, 0.000001, NULL + // + vector col1Data = { + SQL_NUMERIC_STRUCT(), // NULL + CreateNumericStruct(12345678900LL, 28, 6, false), // 12345.678900 + CreateNumericStruct(98765432100LL, 28, 6, false), // 98765.432100 + CreateNumericStruct(1, 28, 6, false), // 0.000001 + SQL_NUMERIC_STRUCT() // NULL + }; + + // Column 2: Middle rows NULL (NUMERIC(15, 3)) + // Pattern: 999999.999, NULL, NULL, -123.456, 0.001 + // + vector col2Data = { + CreateNumericStruct(999999999, 15, 3, false), // 999999.999 + SQL_NUMERIC_STRUCT(), // NULL + SQL_NUMERIC_STRUCT(), // NULL + CreateNumericStruct(123456, 15, 3, true), // -123.456 + CreateNumericStruct(1, 15, 3, false) // 0.001 + }; + + const SQLINTEGER numericStructSize = 19; + + // Column 1: Rows 0 and 4 are NULL + vector col1StrLenOrInd = { + SQL_NULL_DATA, + numericStructSize, + numericStructSize, + numericStructSize, + SQL_NULL_DATA + }; + + // Column 2: Rows 1 and 2 are NULL + vector col2StrLenOrInd = { + numericStructSize, + SQL_NULL_DATA, + SQL_NULL_DATA, + numericStructSize, + numericStructSize + }; + + ColumnInfo nullDecimalInfo( + "SparseColumn1", + col1Data, + col1StrLenOrInd, + "SparseColumn2", + col2Data, + col2StrLenOrInd, + vector{ SQL_NULLABLE, SQL_NULLABLE }); + + InitializeSession( + nullDecimalInfo.GetColumnsNumber(), + 0, + m_scriptString); + + InitializeColumns(&nullDecimalInfo); + + Execute( + ColumnInfo::sm_rowsNumber, + nullDecimalInfo.m_dataSet.data(), + nullDecimalInfo.m_strLen_or_Ind.data(), + nullDecimalInfo.m_columnNames); + + // Validate metadata - both columns should be nullable + // NOTE: SDK calculates precision from actual non-NULL data values + GetResultColumn( + 0, // columnNumber + SQL_C_NUMERIC, // dataType + 9, // columnSize (calculated precision from max non-NULL value) + 6, // decimalDigits (scale) + SQL_NULLABLE); // nullable (contains NULLs) + + GetResultColumn( + 1, // columnNumber + SQL_C_NUMERIC, // dataType + 9, // columnSize (calculated precision from max non-NULL value) + 3, // decimalDigits (scale) + SQL_NULLABLE); // nullable (contains NULLs) + } } diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp index 8a89fc7..ab50eef 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp @@ -528,4 +528,12 @@ namespace ExtensionApiTest EXPECT_TRUE(error.find("Error: Unable to find user class with full name:") != string::npos); } } + + // Explicit template instantiations + template void CSharpExtensionApiTests::Execute( + SQLULEN rowsNumber, + void **dataSet, + SQLINTEGER **strLen_or_Ind, + vector columnNames, + SQLRETURN SQLResult); } diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp index fed15af..74282a0 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp @@ -390,6 +390,46 @@ namespace ExtensionApiTest } } + //---------------------------------------------------------------------------------------------- + // Name: CSharpExtensionApiTest::InitializeColumns + // + // Description: + // Template specialization for SQL_NUMERIC_STRUCT to extract precision from the struct + // instead of using sizeof() which gives the struct size (19 bytes). + // + template<> + void CSharpExtensionApiTests::InitializeColumns( + ColumnInfo *columnInfo) + { + SQLUSMALLINT inputSchemaColumnsNumber = columnInfo->GetColumnsNumber(); + for (SQLUSMALLINT columnNumber = 0; columnNumber < inputSchemaColumnsNumber; ++columnNumber) + { + // For NUMERIC columns, extract precision from the first non-NULL value in the column + // columnSize for NUMERIC represents precision (1-38), not bytes + SQLULEN precision = 38; // default + const SQL_NUMERIC_STRUCT* columnData = + static_cast(columnInfo->m_dataSet[columnNumber]); + SQLINTEGER* strLenOrInd = columnInfo->m_strLen_or_Ind[columnNumber]; + + // Find first non-NULL value to get precision + for (SQLULEN row = 0; row < ColumnInfo::sm_rowsNumber; ++row) + { + if (strLenOrInd[row] != SQL_NULL_DATA) + { + precision = columnData[row].precision; + break; + } + } + + InitializeColumn(columnNumber, + columnInfo->m_columnNames[columnNumber], + SQL_C_NUMERIC, + precision, + columnInfo->m_nullable[columnNumber], + columnInfo->m_partitionByIndexes[columnNumber]); + } + } + //---------------------------------------------------------------------------------------------- // Name: ColumnInfo::ColumnInfo // @@ -485,6 +525,8 @@ namespace ExtensionApiTest ColumnInfo *ColumnInfo); template void CSharpExtensionApiTests::InitializeColumns( ColumnInfo *ColumnInfo); + template void CSharpExtensionApiTests::InitializeColumns( + ColumnInfo *ColumnInfo); template vector CSharpExtensionApiTests::GenerateContiguousData( vector columnVector, From fa9a35feb42eb220c41911e9231b98c05c21c672 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Mon, 16 Mar 2026 14:04:51 -0700 Subject: [PATCH 03/13] review --- .../test/include/CSharpExtensionApiTests.h | 54 ++++++++ .../test/src/native/CSharpDecimalTests.cpp | 130 ++---------------- 2 files changed, 66 insertions(+), 118 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h b/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h index 34c0162..af6fbd9 100644 --- a/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h +++ b/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h @@ -481,4 +481,58 @@ namespace ExtensionApiTest std::vector m_nullable; std::vector m_partitionByIndexes; }; + + //---------------------------------------------------------------------------------------------- + // TestHelpers namespace - Utility functions for test data generation + // + namespace TestHelpers + { + //---------------------------------------------------------------------------------------------- + // Name: CreateNumericStruct + // + // Description: + // Helper function to create SQL_NUMERIC_STRUCT from decimal value components. + // Creates a properly initialized ODBC numeric structure with little-endian mantissa encoding. + // + // Arguments: + // mantissa - The unscaled integer value (e.g., 123456789 for 12345.6789 with scale=4) + // precision - Total number of digits (1-38, as per SQL NUMERIC/DECIMAL spec) + // scale - Number of digits after decimal point (0-precision) + // isNegative - true for negative values, false for positive/zero + // + // Returns: + // SQL_NUMERIC_STRUCT - Fully initialized 19-byte ODBC numeric structure + // + // Example: + // CreateNumericStruct(1234567, 10, 2, false) → represents 12345.67 + // CreateNumericStruct(5555000, 19, 4, true) → represents -555.5000 + // + inline SQL_NUMERIC_STRUCT CreateNumericStruct( + long long mantissa, + SQLCHAR precision, + SQLSCHAR scale, + bool isNegative) + { + // Zero-initialize all fields for safety + SQL_NUMERIC_STRUCT result{}; + + result.precision = precision; + result.scale = scale; + result.sign = isNegative ? 0 : 1; // 0 = negative, 1 = positive (ODBC convention) + + // Convert mantissa to little-endian byte array in val[0..15] + // Use std::abs for long long (not plain abs which is for int) + unsigned long long absMantissa = static_cast(std::abs(mantissa)); + + // Extract bytes in little-endian order + // Use sizeof for self-documenting code instead of magic number 16 + for (size_t i = 0; i < sizeof(result.val); i++) + { + result.val[i] = static_cast(absMantissa & 0xFF); + absMantissa >>= 8; + } + + return result; + } + } } diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 4cba789..c9d12e8 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -22,30 +22,12 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, InitNumericParamTest) { + using TestHelpers::CreateNumericStruct; + InitializeSession( 0, // inputSchemaColumnsNumber 5); // parametersNumber - // Helper lambda to create SQL_NUMERIC_STRUCT from decimal value - // - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - // Convert mantissa to little-endian byte array - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; - // Test NUMERIC(10,2) value: 12345.67 // Stored as: mantissa = 1234567, scale = 2 // @@ -213,27 +195,12 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, DecimalPrecisionScaleTest) { + using TestHelpers::CreateNumericStruct; + InitializeSession( 0, // inputSchemaColumnsNumber 6); // parametersNumber - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; - // NUMERIC(38, 0) - maximum precision, no decimal places SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(12345678901234567LL, 38, 0, false); InitParam(0, p0); @@ -267,27 +234,12 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, DecimalBoundaryValuesTest) { + using TestHelpers::CreateNumericStruct; + InitializeSession( 0, // inputSchemaColumnsNumber 6); // parametersNumber - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; - // Test zero SQL_NUMERIC_STRUCT zero = CreateNumericStruct(0, 10, 2, false); InitParam(0, zero); @@ -375,26 +327,12 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, GetDecimalInputColumnsTest) { + using TestHelpers::CreateNumericStruct; + // Initialize test data for decimal columns // Column 1: DecimalColumn1 (non-nullable, NUMERIC(19,4)) // Column 2: DecimalColumn2 (nullable, NUMERIC(38,10)) // - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; // Column 1 data: Non-nullable, NUMERIC(19, 4) // Values: 12345.6789, 9876543.2100, 0.1234, -555.5000, 999999999.9999 @@ -500,23 +438,9 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, GetDecimalResultColumnsTest) { + using TestHelpers::CreateNumericStruct; + // Create decimal column data for testing output - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; // Result Column 1: NUMERIC(18, 2) - typical financial data // Maximum value in data: 999999999999999.99 requires precision 18 @@ -610,22 +534,7 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, MultipleDecimalColumnsTest) { - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; + using TestHelpers::CreateNumericStruct; // Column 1: NUMERIC(19, 4) - extended money values // Represents amounts like: $123,456,789,012.3456 @@ -713,22 +622,7 @@ namespace ExtensionApiTest // TEST_F(CSharpExtensionApiTests, DecimalColumnsWithNullsTest) { - auto CreateNumericStruct = [](long long mantissa, SQLCHAR precision, SQLSCHAR scale, bool isNegative) -> SQL_NUMERIC_STRUCT - { - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = isNegative ? 0 : 1; - - unsigned long long absMantissa = abs(mantissa); - for (int i = 0; i < 16; i++) - { - result.val[i] = (SQLCHAR)(absMantissa & 0xFF); - absMantissa >>= 8; - } - - return result; - }; + using TestHelpers::CreateNumericStruct; // Column 1: First and last NULL (NUMERIC(28, 6)) // Pattern: NULL, 12345.678900, 98765.432100, 0.000001, NULL From 7c524be0edb951961c174c73565564d7236ee77e Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Mon, 16 Mar 2026 16:24:35 -0700 Subject: [PATCH 04/13] wip --- .../src/managed/CSharpInputDataSet.cs | 3 +- .../src/managed/CSharpOutputDataSet.cs | 3 +- .../src/managed/CSharpParamContainer.cs | 5 +- .../src/managed/utils/Sql.cs | 355 +----------------- .../test/src/native/CSharpDecimalTests.cpp | 72 ++++ .../test/src/native/CSharpInitParamTests.cpp | 56 +++ 6 files changed, 146 insertions(+), 348 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs index 6e36f20..f712f51 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs @@ -11,6 +11,7 @@ using System; using Microsoft.Data.Analysis; using static Microsoft.SqlServer.CSharpExtension.Sql; +using static Microsoft.SqlServer.CSharpExtension.SqlNumericHelper; namespace Microsoft.SqlServer.CSharpExtension { @@ -226,7 +227,7 @@ private unsafe void AddNumericDataFrameColumn( { // Convert SQL_NUMERIC_STRUCT to C# decimal // The conversion handles precision, scale, sign, and the 16-byte integer value - colDataFrame[i] = SqlNumericStructToDecimal(numericArray[i]); + colDataFrame[i] = ToDecimal(numericArray[i]); } // If null, the PrimitiveDataFrameColumn slot remains as null } diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs index 7a286e8..021f923 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs @@ -15,6 +15,7 @@ using System.Collections.Generic; using Microsoft.Data.Analysis; using static Microsoft.SqlServer.CSharpExtension.Sql; +using static Microsoft.SqlServer.CSharpExtension.SqlNumericHelper; namespace Microsoft.SqlServer.CSharpExtension { @@ -317,7 +318,7 @@ private unsafe void ExtractNumericColumn( if (column[rowNumber] != null) { decimal value = (decimal)column[rowNumber]; - numericArray[rowNumber] = DecimalToSqlNumericStruct(value, precision, scale); + numericArray[rowNumber] = FromDecimal(value, precision, scale); Logging.Trace($"ExtractNumericColumn: Row {rowNumber}, Value={value} converted to SqlNumericStruct"); } else diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs index cd92b93..3d2b3e3 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs @@ -14,6 +14,7 @@ using System.Collections.Generic; using System.Runtime.InteropServices; using static Microsoft.SqlServer.CSharpExtension.Sql; +using static Microsoft.SqlServer.CSharpExtension.SqlNumericHelper; namespace Microsoft.SqlServer.CSharpExtension { @@ -135,7 +136,7 @@ public unsafe void AddParam( case SqlDataType.DotNetNumeric: // Convert SQL_NUMERIC_STRUCT to C# decimal SqlNumericStruct* numericPtr = (SqlNumericStruct*)paramValue; - _params[paramNumber].Value = SqlNumericStructToDecimal(*numericPtr); + _params[paramNumber].Value = ToDecimal(*numericPtr); break; case SqlDataType.DotNetChar: _params[paramNumber].Value = Interop.UTF8PtrToStr((char*)paramValue, (ulong)strLenOrNullMap); @@ -315,7 +316,7 @@ private unsafe void ReplaceNumericStructParam( void **paramValue) { // Convert C# decimal to SQL_NUMERIC_STRUCT - SqlNumericStruct numericStruct = DecimalToSqlNumericStruct(value, precision, scale); + SqlNumericStruct numericStruct = FromDecimal(value, precision, scale); // Box the struct into a single-element array to create a heap-allocated copy, then pin it. // diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs index cfab440..e1ec618 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs @@ -5,14 +5,12 @@ // @File: Sql.cs // // Purpose: -// This is the the main include for SqlDataType and Sql return values +// SQL data type definitions, ODBC constants, and type mapping dictionaries. +// For NUMERIC/DECIMAL conversion utilities, see SqlNumericHelper.cs. // //********************************************************************* using System; using System.Collections.Generic; -using System.Linq; -using System.Runtime.InteropServices; -using System.Text; namespace Microsoft.SqlServer.CSharpExtension { @@ -29,6 +27,14 @@ public class Sql public const short MinUtf8CharSize = 1; public const short MinUtf16CharSize = 2; + + /// + /// Size of SQL_NUMERIC_STRUCT in bytes (ODBC specification). + /// Layout: precision(1) + scale(1) + sign(1) + val[16] = 19 bytes + /// Must match the exact size of ODBC's SQL_NUMERIC_STRUCT for binary compatibility. + /// + public const short SqlNumericStructSize = 19; + public enum SqlDataType: short { DotNetBigInt = -5 + SQL_SIGNED_OFFSET, //SQL_C_SBIGINT + SQL_SIGNED_OFFSET @@ -93,7 +99,7 @@ public enum SqlDataType: short {SqlDataType.DotNetBit, sizeof(bool)}, {SqlDataType.DotNetChar, MinUtf8CharSize}, {SqlDataType.DotNetWChar, MinUtf16CharSize}, - {SqlDataType.DotNetNumeric, 19} // sizeof(SqlNumericStruct) + {SqlDataType.DotNetNumeric, SqlNumericStructSize} }; /// @@ -128,344 +134,5 @@ public static short ToSQLDataType(SqlDataType dataType) { return (short)dataType; } - - /// - /// SQL_NUMERIC_STRUCT structure matching ODBC's SQL_NUMERIC_STRUCT (19 bytes). - /// Used for transferring NUMERIC/DECIMAL data between SQL Server and C#. - /// IMPORTANT: This struct must be binary-compatible with ODBC's SQL_NUMERIC_STRUCT - /// defined in sql.h/sqltypes.h on the native side. - /// - /// WHY individual byte fields instead of byte[] array? - /// - Using byte[] would make this a managed type (reference type), violating the unmanaged constraint - /// - Fixed buffers (fixed byte val[16]) require unsafe code, which we want to avoid for safety - /// - Individual fields keep this as a pure value type (unmanaged) with memory safety - /// - The compiler will optimize access patterns, so there's no performance penalty - /// - [StructLayout(LayoutKind.Sequential, Pack = 1)] - public struct SqlNumericStruct - { - /// - /// Total number of digits (1-38) - SQLCHAR (unsigned byte) - /// - public byte precision; - - /// - /// Number of digits after decimal point (0-precision) - SQLSCHAR (signed byte) - /// - /// WHY sbyte (signed) instead of byte (unsigned)? - /// - ODBC specification defines scale as SQLSCHAR (signed char) in SQL_NUMERIC_STRUCT - /// - Although scale values are always non-negative in practice (0-38), - /// we must use sbyte for exact binary layout compatibility with native ODBC code - /// - Mismatch would cause struct layout corruption when marshaling to/from native code - /// - public sbyte scale; - - /// - /// Sign indicator: 1 = positive, 0 = negative - SQLCHAR (unsigned byte) - /// - public byte sign; - - /// - /// Little-endian byte array (16 bytes) representing the scaled integer value. - /// The actual numeric value = (val as integer) * 10^(-scale), adjusted for sign. - /// Corresponds to SQLCHAR val[SQL_MAX_NUMERIC_LEN] where SQL_MAX_NUMERIC_LEN = 16. - /// - /// WHY 16 separate fields instead of an array? - /// - See struct-level comment: arrays would make this managed, violating unmanaged constraint - /// - This verbose approach maintains binary compatibility without requiring unsafe code - /// - public byte val0; - public byte val1; - public byte val2; - public byte val3; - public byte val4; - public byte val5; - public byte val6; - public byte val7; - public byte val8; - public byte val9; - public byte val10; - public byte val11; - public byte val12; - public byte val13; - public byte val14; - public byte val15; - - /// - /// Helper method to get val byte at specified index (0-15). - /// - /// WHY use switch expression instead of array indexing? - /// - Since we can't use arrays (would make struct managed), we need field access - /// - Switch expressions are optimized by the compiler to efficient jump tables - /// - Modern JIT will inline this for zero overhead compared to array access - /// - public byte GetVal(int index) - { - return index switch - { - 0 => val0, - 1 => val1, - 2 => val2, - 3 => val3, - 4 => val4, - 5 => val5, - 6 => val6, - 7 => val7, - 8 => val8, - 9 => val9, - 10 => val10, - 11 => val11, - 12 => val12, - 13 => val13, - 14 => val14, - 15 => val15, - _ => throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15") - }; - } - - /// - /// Helper method to set val byte at specified index (0-15). - /// - /// WHY use switch statement instead of array indexing? - /// - Same reason as GetVal: can't use arrays without making struct managed - /// - Switch statement compiles to efficient code without runtime overhead - /// - public void SetVal(int index, byte value) - { - switch (index) - { - case 0: val0 = value; break; - case 1: val1 = value; break; - case 2: val2 = value; break; - case 3: val3 = value; break; - case 4: val4 = value; break; - case 5: val5 = value; break; - case 6: val6 = value; break; - case 7: val7 = value; break; - case 8: val8 = value; break; - case 9: val9 = value; break; - case 10: val10 = value; break; - case 11: val11 = value; break; - case 12: val12 = value; break; - case 13: val13 = value; break; - case 14: val14 = value; break; - case 15: val15 = value; break; - default: throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15"); - } - } - } - - // Powers of 10 lookup table for efficient decimal scaling (up to 10^28) - // - // WHY use a lookup table instead of Math.Pow? - // - Math.Pow returns double, requiring conversion to decimal with potential precision loss - // - Repeated Math.Pow calls in tight loops have measurable performance impact - // - Pre-computed decimal constants give exact values with zero runtime overhead - // - C# decimal supports up to 28-29 significant digits, so 10^0 through 10^28 covers all cases - private static readonly decimal[] PowersOf10 = new decimal[29] - { - 1m, // 10^0 - 10m, // 10^1 - 100m, // 10^2 - 1000m, // 10^3 - 10000m, // 10^4 - 100000m, // 10^5 - 1000000m, // 10^6 - 10000000m, // 10^7 - 100000000m, // 10^8 - 1000000000m, // 10^9 - 10000000000m, // 10^10 - 100000000000m, // 10^11 - 1000000000000m, // 10^12 - 10000000000000m, // 10^13 - 100000000000000m, // 10^14 - 1000000000000000m, // 10^15 - 10000000000000000m, // 10^16 - 100000000000000000m, // 10^17 - 1000000000000000000m, // 10^18 - 10000000000000000000m, // 10^19 - 100000000000000000000m, // 10^20 - 1000000000000000000000m, // 10^21 - 10000000000000000000000m, // 10^22 - 100000000000000000000000m, // 10^23 - 1000000000000000000000000m, // 10^24 - 10000000000000000000000000m, // 10^25 - 100000000000000000000000000m, // 10^26 - 1000000000000000000000000000m, // 10^27 - 10000000000000000000000000000m // 10^28 - }; - - /// - /// Converts SQL_NUMERIC_STRUCT to C# decimal. - /// Follows the same conversion logic as Java extension's NumericStructToBigDecimal. - /// - /// The SQL numeric structure from ODBC. - /// The equivalent C# decimal value. - /// Thrown when the value exceeds C# decimal range. - public static decimal SqlNumericStructToDecimal(SqlNumericStruct numeric) - { - try - { - // Convert little-endian byte array (16 bytes) to a scaled integer value. - // The val array contains the absolute value scaled by 10^scale. - // For example, for numeric(10,2) value 123.45: - // scale = 2, val represents 12345 (123.45 * 10^2) - - // Build the integer value from little-endian bytes - // We read up to 16 bytes (128 bits) which can represent very large numbers - // - // WHY multiply by 256 for each byte position? - // - SQL_NUMERIC_STRUCT stores the value as little-endian base-256 representation - // - Each byte represents one "digit" in base 256 (not base 10) - // - Example: bytes [0x39, 0x30] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 - // - This matches how ODBC and SQL Server store NUMERIC internally - // - // WHY process from end to beginning? - // - Find the highest non-zero byte first to determine actual value size - // - Avoids computing unnecessarily large multipliers that would overflow decimal - // - For most practical values, only first 12-13 bytes are used - // - decimal scaledValue = 0m; - - // Find the last non-zero byte to avoid unnecessary iterations - int lastNonZeroByte = -1; - for (int i = 15; i >= 0; i--) - { - if (numeric.GetVal(i) != 0) - { - lastNonZeroByte = i; - break; - } - } - - // If all bytes are zero, return 0 - if (lastNonZeroByte == -1) - { - return 0m; - } - - // Build value from highest byte down to avoid large intermediate multipliers - // This prevents decimal overflow when processing high-precision SQL numerics - for (int i = lastNonZeroByte; i >= 0; i--) - { - scaledValue = scaledValue * 256m + numeric.GetVal(i); - } - - // Scale down by dividing by 10^scale to get the actual decimal value - decimal result; - if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) - { - result = scaledValue / PowersOf10[numeric.scale]; - } - else if (numeric.scale == 0) - { - result = scaledValue; - } - else - { - // For scales beyond our lookup table, use Math.Pow (slower but rare) - result = scaledValue / (decimal)Math.Pow(10, numeric.scale); - } - - // Apply sign: 1 = positive, 0 = negative - if (numeric.sign == 0) - { - result = -result; - } - - return result; - } - catch (OverflowException) - { - // SQL Server DECIMAL(38,scale) can represent values much larger than C# decimal's range. - // C# decimal maximum: ±79,228,162,514,264,337,593,543,950,335 (approx ±7.9 × 10^28) - // SQL DECIMAL(38,0) maximum: ±10^38 - 1 - // - // This overflow typically occurs with DECIMAL(30+, scale) parameters containing values - // that exceed 29 significant digits total. - string valHex = string.Join("", Enumerable.Range(0, 16).Select(i => numeric.GetVal(i).ToString("X2"))); - throw new OverflowException( - $"SQL DECIMAL/NUMERIC value exceeds C# decimal range. " + - $"Precision={numeric.precision}, Scale={numeric.scale}, Sign={numeric.sign}, " + - $"Val={valHex}. " + - $"C# decimal supports up to 29 significant digits (±7.9×10^28). " + - $"Consider using lower precision parameters or handle large numerics differently."); - } - } - - /// - /// Converts C# decimal to SQL_NUMERIC_STRUCT. - /// Follows the same conversion logic as Java extension's BigDecimalToNumericStruct. - /// - /// The C# decimal value to convert. - /// Total number of digits (1-38). - /// Number of digits after decimal point (0-precision). - /// The equivalent SQL numeric structure for ODBC. - /// Thrown when precision or scale are out of valid range. - public static SqlNumericStruct DecimalToSqlNumericStruct(decimal value, byte precision, byte scale) - { - if (precision < 1 || precision > 38) - { - throw new ArgumentException($"Precision must be between 1 and 38, got {precision}"); - } - if (scale > precision) - { - throw new ArgumentException($"Scale ({scale}) cannot exceed precision ({precision})"); - } - - SqlNumericStruct result = new SqlNumericStruct - { - precision = precision, - scale = (sbyte)scale, - sign = (byte)(value >= 0 ? 1 : 0) - }; - - // Work with absolute value - decimal absValue = Math.Abs(value); - - // Scale up by multiplying by 10^scale to get an integer representation - // For example, 123.45 with scale=2 becomes 12345 - decimal scaledValue; - if (scale >= 0 && scale < PowersOf10.Length) - { - scaledValue = absValue * PowersOf10[scale]; - } - else if (scale == 0) - { - scaledValue = absValue; - } - else - { - scaledValue = absValue * (decimal)Math.Pow(10, scale); - } - - // Round to nearest integer (handles any remaining fractional part due to precision limits) - scaledValue = Math.Round(scaledValue, 0, MidpointRounding.AwayFromZero); - - // Convert the scaled integer to little-endian byte array (16 bytes) - // Each byte represents one position in base-256 representation - for (int i = 0; i < 16; i++) - { - if (scaledValue > 0) - { - decimal byteValue = scaledValue % 256m; - result.SetVal(i, (byte)byteValue); - scaledValue = Math.Floor(scaledValue / 256m); - } - else - { - result.SetVal(i, 0); - } - } - - // If there's still value left after filling 16 bytes, we have overflow - if (scaledValue > 0) - { - throw new OverflowException( - $"Value {value} with precision {precision} and scale {scale} exceeds SQL_NUMERIC_STRUCT capacity"); - } - - return result; - } } } diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index c9d12e8..2367804 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -704,4 +704,76 @@ namespace ExtensionApiTest 3, // decimalDigits (scale) SQL_NULLABLE); // nullable (contains NULLs) } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalHighScaleTest + // + // Description: + // Test decimal values with scale > 28 to verify Math.Pow() fallback behavior. + // + // WHY: SqlNumericHelper uses a PowersOf10 lookup table for scales 0-28 for performance. + // For scales 29-38 (beyond the lookup table), it falls back to Math.Pow(10, scale). + // This test ensures: + // 1. Math.Pow fallback doesn't crash + // 2. Values are converted correctly despite potential precision loss + // 3. Edge case handling is robust for rare but valid SQL Server DECIMAL types + // + // WHAT: Tests various high scale scenarios: + // - NUMERIC(38, 30): Very small fractional value (fits in C# decimal) + // - NUMERIC(38, 35): Extremely small fractional value (1 significant digit) + // - NUMERIC(38, 38): Maximum scale with minimum value (0.00...001) + // - NUMERIC(38, 29): Boundary case at scale = 29 (first fallback case) + // + // PRACTICAL USAGE: While these extreme scales are rare in production databases, + // they're valid SQL Server types and must be handled gracefully: + // - Scientific computing: micro-fractions (e.g., atomic measurements) + // - Financial: basis points in high-precision calculations (e.g., 0.00000001%) + // - IoT/Telemetry: sensor readings with extreme precision requirements + // + TEST_F(CSharpExtensionApiTests, DecimalHighScaleTest) + { + using TestHelpers::CreateNumericStruct; + + InitializeSession( + 0, // inputSchemaColumnsNumber + 6); // parametersNumber + + // Test NUMERIC(38, 29) - boundary case at scale = 29 (first fallback to Math.Pow) + // Value: 0.00000000000000000000000000001 (1 at 29th decimal place) + SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(1, 38, 29, false); + InitParam(0, p0); + + // Test NUMERIC(38, 30) - scale = 30 + // Value: 0.000000000000000000000000000123 (123 scaled by 10^-30) + // Small mantissa value tests Math.Pow fallback without overflow + SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123, 38, 30, false); + InitParam(1, p1); + + // Test NUMERIC(38, 35) - very high scale + // Value: 0.00000000000000000000000000000000123 (3 significant digits) + SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(123, 38, 35, false); + InitParam(2, p2); + + // Test NUMERIC(38, 38) - maximum scale + // Value: 0.00000000000000000000000000000000000001 (1 at 38th decimal place) + // This is the smallest non-zero value representable in NUMERIC(38,38) + SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1, 38, 38, false); + InitParam(3, p3); + + // Test negative value with high scale + // Value: -0.0000000000000000000000000000001 (negative, scale 31) + SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(1, 38, 31, true); + InitParam(4, p4); + + // Test zero with high scale (should remain zero regardless of scale) + // Value: 0.00000000000000000000000000000000 (zero, scale 32) + SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(0, 38, 32, false); + InitParam(5, p5); + + // NOTE: This test validates that the Math.Pow() fallback in ToDecimal() + // handles scales beyond the PowersOf10 lookup table gracefully. + // While Math.Pow returns double (potential precision loss), these extreme + // scales typically occur with very small values that fit within double's + // 53-bit mantissa precision, so conversion to decimal is safe. + } } diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp index 2425c57..8da0994 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp @@ -772,6 +772,62 @@ namespace ExtensionApiTest return distance; } + //---------------------------------------------------------------------------------------------- + // Name: InitParam (Template Specialization for SQL_NUMERIC_STRUCT) + // + // Description: + // Specialized template for SQL_NUMERIC_STRUCT that correctly passes precision and scale + // from the struct to InitParam. The generic template passes decimalDigits=0, which + // causes InitParam to reject NUMERIC parameters with non-zero scale. + // + // Note: For output parameters with uninitialized structs (precision=0), uses defaults: + // precision=38, scale=0 to allow the C# executor to set the actual values later. + // + template<> + void CSharpExtensionApiTests::InitParam( + int paramNumber, + SQL_NUMERIC_STRUCT paramValue, + bool isNull, + SQLSMALLINT inputOutputType, + SQLRETURN SQLResult) + { + string paramName = "param" + to_string(paramNumber); + string atParam = "@" + paramName; + SQLCHAR *unsignedParamName = static_cast( + static_cast(const_cast(atParam.c_str()))); + + int paramNameLength = atParam.length(); + + SQL_NUMERIC_STRUCT *pParamValue = nullptr; + + if (!isNull) + { + pParamValue = &(paramValue); + } + + // For uninitialized structs (precision=0), use defaults for output parameters + // The C# executor will set the actual values during execution. + // NOTE: In production T-SQL, SQL Server always provides proper precision/scale metadata. + // This handles test scenarios where OUTPUT parameters are initialized with default structs. + SQLULEN precision = (isNull || paramValue.precision == 0) ? 38 : paramValue.precision; + SQLSMALLINT scale = (isNull || paramValue.precision == 0) ? 0 : paramValue.scale; + + SQLRETURN result = (*sm_initParamFuncPtr)( + *m_sessionId, + m_taskId, + paramNumber, + unsignedParamName, + paramNameLength, + SQL_C_NUMERIC, + precision, // paramSize = precision (not sizeof) + scale, // decimalDigits = scale from struct + pParamValue, // paramValue + pParamValue != nullptr ? sizeof(SQL_NUMERIC_STRUCT) : SQL_NULL_DATA, // strLenOrInd = 19 bytes + inputOutputType); // inputOutputType + + EXPECT_EQ(result, SQLResult); + } + // Explicit template instantiations // template void CSharpExtensionApiTests::InitParam( From a09369409087b55552fe3f363ea7c266c33c9b1f Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Mon, 16 Mar 2026 16:24:52 -0700 Subject: [PATCH 05/13] wip --- .../src/managed/utils/SqlNumericHelper.cs | 380 ++++++++++++++++++ 1 file changed, 380 insertions(+) create mode 100644 language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs new file mode 100644 index 0000000..31911a3 --- /dev/null +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -0,0 +1,380 @@ +//********************************************************************* +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +// +// @File: SqlNumericHelper.cs +// +// Purpose: +// SQL NUMERIC/DECIMAL type support: ODBC-compatible struct definition +// and bidirectional conversion between SQL_NUMERIC_STRUCT and C# decimal. +// +//********************************************************************* +using System; +using System.Linq; +using System.Runtime.InteropServices; + +namespace Microsoft.SqlServer.CSharpExtension +{ + /// + /// Helper class for converting between SQL Server NUMERIC/DECIMAL types and C# decimal. + /// Provides ODBC-compatible SQL_NUMERIC_STRUCT definition and conversion methods. + /// + public static class SqlNumericHelper + { + /// + /// Maximum number of powers of 10 in the PowersOf10 lookup table. + /// C# decimal supports up to 28-29 significant digits, so we store 10^0 through 10^28 (29 entries). + /// This covers all possible scale values (0-38) within C# decimal's precision range. + /// Array index corresponds to the exponent: PowersOf10[n] = 10^n. + /// + private const int MaxPowersOf10Count = 29; + + // Powers of 10 lookup table for efficient decimal scaling (up to 10^28) + // + // Use a lookup table instead of Math.Pow because: + // - Math.Pow returns double, requiring conversion to decimal with potential precision loss. + // - Repeated Math.Pow calls in tight loops have measurable performance impact. + // - Pre-computed decimal constants give exact values with zero runtime overhead. + // - C# decimal supports up to 28-29 significant digits, so 10^0 through 10^28 covers all cases. + private static readonly decimal[] PowersOf10 = new decimal[MaxPowersOf10Count] + { + 1m, // 10^0 + 10m, // 10^1 + 100m, // 10^2 + 1000m, // 10^3 + 10000m, // 10^4 + 100000m, // 10^5 + 1000000m, // 10^6 + 10000000m, // 10^7 + 100000000m, // 10^8 + 1000000000m, // 10^9 + 10000000000m, // 10^10 + 100000000000m, // 10^11 + 1000000000000m, // 10^12 + 10000000000000m, // 10^13 + 100000000000000m, // 10^14 + 1000000000000000m, // 10^15 + 10000000000000000m, // 10^16 + 100000000000000000m, // 10^17 + 1000000000000000000m, // 10^18 + 10000000000000000000m, // 10^19 + 100000000000000000000m, // 10^20 + 1000000000000000000000m, // 10^21 + 10000000000000000000000m, // 10^22 + 100000000000000000000000m, // 10^23 + 1000000000000000000000000m, // 10^24 + 10000000000000000000000000m, // 10^25 + 100000000000000000000000000m, // 10^26 + 1000000000000000000000000000m, // 10^27 + 10000000000000000000000000000m // 10^28 + }; + + /// + /// SQL_NUMERIC_STRUCT structure matching ODBC's SQL_NUMERIC_STRUCT. + /// Used for transferring NUMERIC/DECIMAL data between SQL Server and C#. + /// IMPORTANT: This struct must be binary-compatible with ODBC's SQL_NUMERIC_STRUCT + /// defined in sql.h/sqltypes.h on the native side. + /// + /// Why individual byte fields instead of byte[] array? + /// - Using byte[] would make this a managed type (reference type), violating the unmanaged constraint + /// - Fixed buffers (fixed byte val[16]) require unsafe code, which we want to avoid for safety. + /// - Individual fields keep this as a pure value type (unmanaged) with memory safety. + /// - The compiler will optimize access patterns, so there's no performance penalty. + /// + [StructLayout(LayoutKind.Sequential, Pack = 1)] + public struct SqlNumericStruct + { + /// + /// Total number of digits (e.g., 1-38) - SQLCHAR (unsigned byte) + /// + public byte precision; + + /// + /// Number of digits after decimal point - SQLSCHAR (signed byte) + /// + /// ODBC specification defines scale as SQLSCHAR (signed char) in SQL_NUMERIC_STRUCT. + /// We must use sbyte for exact binary layout compatibility with native ODBC code. + /// Mismatch would cause struct layout corruption when marshaling to/from native code. + /// + public sbyte scale; + + /// + /// Sign indicator: 1 = positive, 0 = negative - SQLCHAR (unsigned byte) + /// + public byte sign; + + /// + /// Little-endian byte array (16 bytes) representing the scaled integer value. + /// The actual numeric value = (val as integer) * 10^(-scale), adjusted for sign. + /// Corresponds to SQLCHAR val[SQL_MAX_NUMERIC_LEN] where SQL_MAX_NUMERIC_LEN = 16. + /// + /// Why 16 separate fields instead of an array? + /// - See struct-level comment: arrays would make this managed, violating unmanaged constraint. + /// - This verbose approach maintains binary compatibility without requiring unsafe code or /unsafe compiler flag. + /// + public byte val0; + public byte val1; + public byte val2; + public byte val3; + public byte val4; + public byte val5; + public byte val6; + public byte val7; + public byte val8; + public byte val9; + public byte val10; + public byte val11; + public byte val12; + public byte val13; + public byte val14; + public byte val15; + + /// + /// Helper method to get val byte at specified index (0-15). + /// + /// We use switch expression instead of array indexing: + /// - Since we can't use arrays (would make struct managed), we need field access. + /// - Switch expressions are optimized by the compiler to efficient jump tables. + /// - Modern Just-In-Time compiler will inline this for zero overhead compared to array access. + /// + public byte GetVal(int index) + { + return index switch + { + 0 => val0, + 1 => val1, + 2 => val2, + 3 => val3, + 4 => val4, + 5 => val5, + 6 => val6, + 7 => val7, + 8 => val8, + 9 => val9, + 10 => val10, + 11 => val11, + 12 => val12, + 13 => val13, + 14 => val14, + 15 => val15, + _ => throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15") + }; + } + + /// + /// Helper method to set val byte at specified index (0-15). + /// + /// We use switch statement instead of array indexing: + /// - Same reason as GetVal: can't use arrays without making struct managed. + /// - Switch statement compiles to efficient code without runtime overhead. + /// + public void SetVal(int index, byte value) + { + switch (index) + { + case 0: val0 = value; break; + case 1: val1 = value; break; + case 2: val2 = value; break; + case 3: val3 = value; break; + case 4: val4 = value; break; + case 5: val5 = value; break; + case 6: val6 = value; break; + case 7: val7 = value; break; + case 8: val8 = value; break; + case 9: val9 = value; break; + case 10: val10 = value; break; + case 11: val11 = value; break; + case 12: val12 = value; break; + case 13: val13 = value; break; + case 14: val14 = value; break; + case 15: val15 = value; break; + default: throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15"); + } + } + } + + /// + /// Converts SQL_NUMERIC_STRUCT to C# decimal. + /// Follows the same conversion logic as Java extension's NumericStructToBigDecimal. + /// + /// The SQL numeric structure from ODBC. + /// The equivalent C# decimal value. + /// Thrown when the value exceeds C# decimal range. + public static decimal ToDecimal(SqlNumericStruct numeric) + { + try + { + // Convert little-endian byte array (16 bytes) to a scaled integer value. + // The val array contains the absolute value scaled by 10^scale. + // For example, for numeric(10,2) value 123.45: + // scale = 2, val represents 12345 (123.45 * 10^2) + + // Build the integer value from little-endian bytes + // We read up to 16 bytes (128 bits) which can represent very large numbers. + // + // Why multiply by 256 for each byte position? + // - SQL_NUMERIC_STRUCT stores the value as little-endian base-256 representation. + // - Each byte represents one "digit" in base 256 (not base 10). + // - Example: bytes [0x39, 0x30] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 + // - This matches how ODBC and SQL Server store NUMERIC internally. + // + // Why process from end to beginning? + // - Find the highest non-zero byte first to determine actual value size. + // - Avoids computing unnecessarily large multipliers that would overflow decimal. + // - For most practical values, only first 12-13 bytes are used. + // + decimal scaledValue = 0m; + + // Find the last non-zero byte to avoid unnecessary iterations + int lastNonZeroByte = -1; + for (int i = 15; i >= 0; i--) + { + if (numeric.GetVal(i) != 0) + { + lastNonZeroByte = i; + break; + } + } + + // If all bytes are zero, return 0 + if (lastNonZeroByte == -1) + { + return 0m; + } + + // Build value from highest byte down to avoid large intermediate multipliers + // This prevents decimal overflow when processing high-precision SQL numerics + for (int i = lastNonZeroByte; i >= 0; i--) + { + scaledValue = scaledValue * 256m + numeric.GetVal(i); + } + + // Scale down by dividing by 10^scale to get the actual decimal value + decimal result; + if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) + { + result = scaledValue / PowersOf10[numeric.scale]; + } + else if (numeric.scale == 0) + { + result = scaledValue; + } + else + { + // For scales beyond our lookup table, use repeated division by 10 + // Cannot use Math.Pow(10, scale) because values > 10^28 overflow when converting double→decimal + result = scaledValue; + for (int i = 0; i < numeric.scale; i++) + { + result /= 10m; + } + } + + // Apply sign: 1 = positive, 0 = negative + if (numeric.sign == 0) + { + result = -result; + } + + return result; + } + catch (OverflowException) + { + // SQL Server DECIMAL(38,scale) can represent values much larger than C# decimal's range. + // C# decimal maximum: ±79,228,162,514,264,337,593,543,950,335 (approx ±7.9 × 10^28) + // SQL DECIMAL(38,0) maximum: ±10^38 - 1 + // + // This overflow typically occurs with DECIMAL(30+, scale) parameters containing values + // that exceed 29 significant digits total. + string valHex = string.Join("", Enumerable.Range(0, 16).Select(i => numeric.GetVal(i).ToString("X2"))); + throw new OverflowException( + $"SQL DECIMAL/NUMERIC value exceeds C# decimal range. " + + $"Precision={numeric.precision}, Scale={numeric.scale}, Sign={numeric.sign}, " + + $"Val={valHex}. " + + $"C# decimal supports up to 29 significant digits (±7.9×10^28). " + + $"Consider using lower precision parameters or handle large numerics differently."); + } + } + + /// + /// Converts C# decimal to SQL_NUMERIC_STRUCT. + /// Follows the same conversion logic as Java extension's BigDecimalToNumericStruct. + /// + /// The C# decimal value to convert. + /// Total number of digits (1-38). + /// Number of digits after decimal point (0-precision). + /// The equivalent SQL numeric structure for ODBC. + /// Thrown when precision or scale are out of valid range. + public static SqlNumericStruct FromDecimal(decimal value, byte precision, byte scale) + { + if (precision < 1 || precision > 38) + { + throw new ArgumentException($"Precision must be between 1 and 38, got {precision}"); + } + if (scale > precision) + { + throw new ArgumentException($"Scale ({scale}) cannot exceed precision ({precision})"); + } + + SqlNumericStruct result = new SqlNumericStruct + { + precision = precision, + scale = (sbyte)scale, + sign = (byte)(value >= 0 ? 1 : 0) + }; + + // Work with absolute value + decimal absValue = Math.Abs(value); + + // Scale up by multiplying by 10^scale to get an integer representation + // For example, 123.45 with scale=2 becomes 12345 + decimal scaledValue; + if (scale >= 0 && scale < PowersOf10.Length) + { + scaledValue = absValue * PowersOf10[scale]; + } + else if (scale == 0) + { + scaledValue = absValue; + } + else + { + // For scales beyond our lookup table, use repeated multiplication by 10 + // Cannot use Math.Pow(10, scale) because values > 10^28 overflow when converting double→decimal + scaledValue = absValue; + for (int i = 0; i < scale; i++) + { + scaledValue *= 10m; + } + } + + // Round to nearest integer (handles any remaining fractional part due to precision limits) + scaledValue = Math.Round(scaledValue, 0, MidpointRounding.AwayFromZero); + + // Convert the scaled integer to little-endian byte array (16 bytes) + // Each byte represents one position in base-256 representation + for (int i = 0; i < 16; i++) + { + if (scaledValue > 0) + { + decimal byteValue = scaledValue % 256m; + result.SetVal(i, (byte)byteValue); + scaledValue = Math.Floor(scaledValue / 256m); + } + else + { + result.SetVal(i, 0); + } + } + + // If there's still value left after filling 16 bytes, we have overflow + if (scaledValue > 0) + { + throw new OverflowException( + $"Value {value} with precision {precision} and scale {scale} exceeds SQL_NUMERIC_STRUCT capacity"); + } + + return result; + } + } +} From 96163ff5b01b396f3a3a898ef22557f68b1134ec Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Mon, 16 Mar 2026 17:59:25 -0700 Subject: [PATCH 06/13] removing unncessary unsafe methods --- .../src/managed/CSharpInputDataSet.cs | 7 +- .../src/managed/CSharpOutputDataSet.cs | 13 +- .../src/managed/utils/Sql.cs | 6 +- .../src/managed/utils/SqlNumericHelper.cs | 102 +++++------ .../test/src/managed/CSharpTestExecutor.cs | 37 ++++ .../test/src/native/CSharpDecimalTests.cpp | 159 ++++++++++++++++++ 6 files changed, 262 insertions(+), 62 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs index f712f51..0a8a5a6 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs @@ -193,11 +193,10 @@ private unsafe void AddDataFrameColumn( /// /// This method adds NUMERIC/DECIMAL column data by converting from SQL_NUMERIC_STRUCT /// to C# decimal values, creating a PrimitiveDataFrameColumn, and adding it to the DataFrame. - /// Follows the same pattern as Java extension's numeric handling. /// /// The column index. /// Number of rows in this column. - /// Pointer to array of SQL_NUMERIC_STRUCT structures (19 bytes each). + /// Pointer to array of SQL_NUMERIC_STRUCT structures. /// Pointer to null indicator array (SQL_NULL_DATA for null values). private unsafe void AddNumericDataFrameColumn( ushort columnNumber, @@ -218,7 +217,7 @@ private unsafe void AddNumericDataFrameColumn( { // Check if this row has a null value // - // WHY check both Nullable == 0 and SQL_NULL_DATA? + // Why check both Nullable == 0 and SQL_NULL_DATA? // - Nullable == 0 means column is declared NOT NULL (cannot contain nulls) // - For NOT NULL columns, skip null checking for performance (nullSpan[i] is undefined) // - For nullable columns (Nullable != 0), check if nullSpan[i] == SQL_NULL_DATA (-1) @@ -226,10 +225,8 @@ private unsafe void AddNumericDataFrameColumn( if (_columns[columnNumber].Nullable == 0 || nullSpan[i] != SQL_NULL_DATA) { // Convert SQL_NUMERIC_STRUCT to C# decimal - // The conversion handles precision, scale, sign, and the 16-byte integer value colDataFrame[i] = ToDecimal(numericArray[i]); } - // If null, the PrimitiveDataFrameColumn slot remains as null } CSharpDataFrame.Columns.Add(colDataFrame); diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs index 021f923..50037c7 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs @@ -207,7 +207,7 @@ DataFrameColumn column /// /// This method sets data pointer for the column and append the array to the handle list. /// - private unsafe void SetDataPtrs( + private void SetDataPtrs( ushort columnNumber, T[] array ) where T : unmanaged @@ -220,19 +220,19 @@ T[] array /// /// This method extracts NUMERIC/DECIMAL column data by converting C# decimal values /// to SQL_NUMERIC_STRUCT array, pinning it, and storing the pointer. - /// Follows the same pattern as Java extension's numeric handling. /// /// The column index. /// The DataFrameColumn containing decimal values. - private unsafe void ExtractNumericColumn( + private void ExtractNumericColumn( ushort columnNumber, DataFrameColumn column) { if (column == null) { SetDataPtrs(columnNumber, Array.Empty()); - return; } + else + { // For NUMERIC/DECIMAL, we need to determine appropriate precision and scale from the data. // SQL Server supports precision 1-38 and scale 0-precision. @@ -341,8 +341,9 @@ private unsafe void ExtractNumericColumn( } } - // Pin the SqlNumericStruct array and store pointer - SetDataPtrs(columnNumber, numericArray); + // Pin the SqlNumericStruct array and store pointer + SetDataPtrs(columnNumber, numericArray); + } } /// diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs index e1ec618..199e382 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs @@ -11,6 +11,7 @@ //********************************************************************* using System; using System.Collections.Generic; +using System.Runtime.InteropServices; namespace Microsoft.SqlServer.CSharpExtension { @@ -30,10 +31,11 @@ public class Sql /// /// Size of SQL_NUMERIC_STRUCT in bytes (ODBC specification). - /// Layout: precision(1) + scale(1) + sign(1) + val[16] = 19 bytes + /// Calculated from SqlNumericHelper.SqlNumericStruct layout: + /// precision(1) + scale(1) + sign(1) + val0-val15(16) = 19 bytes. /// Must match the exact size of ODBC's SQL_NUMERIC_STRUCT for binary compatibility. /// - public const short SqlNumericStructSize = 19; + public static readonly short SqlNumericStructSize = (short)Marshal.SizeOf(); public enum SqlDataType: short { diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs index 31911a3..909f192 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -22,7 +22,7 @@ namespace Microsoft.SqlServer.CSharpExtension public static class SqlNumericHelper { /// - /// Maximum number of powers of 10 in the PowersOf10 lookup table. + /// Maximum number of powers of 10 in the "PowersOf10" lookup table. /// C# decimal supports up to 28-29 significant digits, so we store 10^0 through 10^28 (29 entries). /// This covers all possible scale values (0-38) within C# decimal's precision range. /// Array index corresponds to the exponent: PowersOf10[n] = 10^n. @@ -195,37 +195,32 @@ public void SetVal(int index, byte value) /// /// Converts SQL_NUMERIC_STRUCT to C# decimal. - /// Follows the same conversion logic as Java extension's NumericStructToBigDecimal. /// /// The SQL numeric structure from ODBC. /// The equivalent C# decimal value. /// Thrown when the value exceeds C# decimal range. public static decimal ToDecimal(SqlNumericStruct numeric) { + decimal result; + try { // Convert little-endian byte array (16 bytes) to a scaled integer value. // The val array contains the absolute value scaled by 10^scale. // For example, for numeric(10,2) value 123.45: // scale = 2, val represents 12345 (123.45 * 10^2) - - // Build the integer value from little-endian bytes - // We read up to 16 bytes (128 bits) which can represent very large numbers. - // - // Why multiply by 256 for each byte position? - // - SQL_NUMERIC_STRUCT stores the value as little-endian base-256 representation. - // - Each byte represents one "digit" in base 256 (not base 10). - // - Example: bytes [0x39, 0x30] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 - // - This matches how ODBC and SQL Server store NUMERIC internally. // - // Why process from end to beginning? - // - Find the highest non-zero byte first to determine actual value size. - // - Avoids computing unnecessarily large multipliers that would overflow decimal. - // - For most practical values, only first 12-13 bytes are used. + // Little-endian storage layout: + // - val[0] = least significant byte (LSB) + // - val[15] = most significant byte (MSB) + // - Each byte represents one "digit" in base-256 representation + // - Example: bytes [0x39, 0x30, 0x00, ...] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 // decimal scaledValue = 0m; - // Find the last non-zero byte to avoid unnecessary iterations + // Find the most significant non-zero byte (highest index) to optimize the conversion. + // This avoids processing unnecessary high-order zero bytes and prevents potential + // overflow when building large values. Most practical values use only 12-13 bytes. int lastNonZeroByte = -1; for (int i = 15; i >= 0; i--) { @@ -236,47 +231,54 @@ public static decimal ToDecimal(SqlNumericStruct numeric) } } - // If all bytes are zero, return 0 + // If all bytes are zero, result is 0 if (lastNonZeroByte == -1) { - return 0m; - } - - // Build value from highest byte down to avoid large intermediate multipliers - // This prevents decimal overflow when processing high-precision SQL numerics - for (int i = lastNonZeroByte; i >= 0; i--) - { - scaledValue = scaledValue * 256m + numeric.GetVal(i); - } - - // Scale down by dividing by 10^scale to get the actual decimal value - decimal result; - if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) - { - result = scaledValue / PowersOf10[numeric.scale]; - } - else if (numeric.scale == 0) - { - result = scaledValue; + result = 0m; } else { - // For scales beyond our lookup table, use repeated division by 10 - // Cannot use Math.Pow(10, scale) because values > 10^28 overflow when converting double→decimal - result = scaledValue; - for (int i = 0; i < numeric.scale; i++) + // Build the integer value by processing from MSB (highest index) to LSB (index 0). + // Algorithm: Start with MSB, then for each subsequent byte toward LSB, + // multiply current value by 256 and add the next byte. + // This approach avoids large intermediate multipliers that could overflow decimal. + for (int i = lastNonZeroByte; i >= 0; i--) { - result /= 10m; + scaledValue = scaledValue * 256m + numeric.GetVal(i); } - } - // Apply sign: 1 = positive, 0 = negative - if (numeric.sign == 0) - { - result = -result; - } + // Scale down by dividing by 10^scale to get the actual decimal value. + // The scaledValue contains the integer representation; we need to divide by 10^scale. + // For example, if scaledValue=12345 and scale=2, result = 12345 / 100 = 123.45 + if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) + { + // Use pre-computed lookup table for scales 0-28 (fast path) + result = scaledValue / PowersOf10[numeric.scale]; + } + else if (numeric.scale == 0) + { + // No scaling needed - value is already an integer + result = scaledValue; + } + else + { + // For scales beyond our lookup table (29-38), use repeated division by 10. + // We cannot use Math.Pow(10, scale) because: + // - Math.Pow returns double, and values > 10^28 overflow when converting double→decimal + // - Repeated division maintains decimal precision without overflow + result = scaledValue; + for (int i = 0; i < numeric.scale; i++) + { + result /= 10m; + } + } - return result; + // Apply sign: 1 = positive, 0 = negative + if (numeric.sign == 0) + { + result = -result; + } + } } catch (OverflowException) { @@ -294,6 +296,8 @@ public static decimal ToDecimal(SqlNumericStruct numeric) $"C# decimal supports up to 29 significant digits (±7.9×10^28). " + $"Consider using lower precision parameters or handle large numerics differently."); } + + return result; } /// @@ -319,7 +323,7 @@ public static SqlNumericStruct FromDecimal(decimal value, byte precision, byte s SqlNumericStruct result = new SqlNumericStruct { precision = precision, - scale = (sbyte)scale, + scale = (sbyte)scale, // Safe cast: scale validated and the max is 38 < 127. sign = (byte)(value >= 0 ? 1 : 0) }; diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs index 5ec726b..ea1018c 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs @@ -141,6 +141,43 @@ public override DataFrame Execute(DataFrame input, Dictionary s } } + /// + /// Test executor for decimal OUTPUT parameters with maximum precision (29 digits). + /// Tests the FromDecimal() conversion for values at the edge of C# decimal's capability. + /// + /// Note: C# decimal normalizes values - the scale is determined by the value's actual + /// precision requirements, not by a declared scale. This tests high-precision conversions. + /// + public class CSharpTestExecutorDecimalHighScaleParam: AbstractSqlServerExtensionExecutor + { + public override DataFrame Execute(DataFrame input, Dictionary sqlParams) + { + // Set high-precision decimal values (29 significant digits total) + // These exercise the FromDecimal() conversion for C# decimal's maximum capability + // C# decimal can represent values with up to 29 significant digits + + // param0: Maximum precision with integer and fractional parts + sqlParams["@param0"] = 12345678901234567.890123456789m; // 29 total digits + + // param1: Large fractional precision + sqlParams["@param1"] = 1.2345678901234567890123456789m; // 29 total digits + + // param2: Different high-precision pattern + sqlParams["@param2"] = 123.45678901234567890123456789m; // 29 total digits + + // param3: Maximum fractional precision + sqlParams["@param3"] = 0.12345678901234567890123456789m; // 29 total digits + + // param4: Negative high-precision value + sqlParams["@param4"] = -987.65432109876543210987654321m; // 29 total digits + + // param5: Zero value for validation + sqlParams["@param5"] = 0.0m; + + return null; + } + } + public class CSharpTestExecutorStringParam: AbstractSqlServerExtensionExecutor { public override DataFrame Execute(DataFrame input, Dictionary sqlParams){ diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 2367804..6fb7f8a 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -776,4 +776,163 @@ namespace ExtensionApiTest // scales typically occur with very small values that fit within double's // 53-bit mantissa precision, so conversion to decimal is safe. } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalOverflowTest + // + // Description: + // Test that values exceeding C# decimal range throw OverflowException. + // C# decimal max: ±79,228,162,514,264,337,593,543,950,335 (~7.9 × 10^28) + // SQL DECIMAL(38,0) max: ±10^38 - 1 + // + // This test verifies the exception path in SqlNumericHelper.ToDecimal() when + // converting SQL NUMERIC values that exceed C# decimal's 29-significant-digit limit. + // + TEST_F(CSharpExtensionApiTests, DecimalOverflowTest) + { + InitializeSession( + 0, // inputSchemaColumnsNumber + 2); // parametersNumber + + // Create SQL_NUMERIC_STRUCT with value exceeding C# decimal.MaxValue + // We'll construct a DECIMAL(38,0) with value ~10^38 by setting high-order bytes + // to non-zero values that will overflow when building scaledValue in ToDecimal() + // + // Strategy: Set bytes val[13..15] (upper 3 bytes) to create a value > 7.9 × 10^28 + // This represents a number too large for C# decimal's 96-bit mantissa. + SQL_NUMERIC_STRUCT overflowPositive{}; + overflowPositive.precision = 38; + overflowPositive.scale = 0; + overflowPositive.sign = 1; // positive + + // Set upper bytes to create a large value: + // val[15] = 0x4B (75 decimal) means the value is approximately 75 * 256^15 + // which equals approximately 4.9 × 10^37, well above decimal.MaxValue (~7.9 × 10^28) + overflowPositive.val[15] = 0x4B; // High byte + overflowPositive.val[14] = 0x3B; // Medium-high byte + overflowPositive.val[13] = 0x9A; // Medium byte + // Leave lower bytes as zero for simplicity + + // This should fail when C# extension tries to convert to decimal + // The OverflowException from ToDecimal() will propagate as SQL_ERROR + InitParam( + 0, // paramNumber + overflowPositive, // paramValue (too large for C# decimal) + false, // isNull + SQL_PARAM_INPUT_OUTPUT, // inputOutputType + SQL_ERROR); // expected return: SQL_ERROR + + // Test negative overflow as well + SQL_NUMERIC_STRUCT overflowNegative{}; + overflowNegative.precision = 38; + overflowNegative.scale = 0; + overflowNegative.sign = 0; // negative + + // Same large value bytes as above, but negative + overflowNegative.val[15] = 0x4B; + overflowNegative.val[14] = 0x3B; + overflowNegative.val[13] = 0x9A; + + InitParam( + 1, // paramNumber + overflowNegative, // paramValue (too large for C# decimal) + false, // isNull + SQL_PARAM_INPUT_OUTPUT, // inputOutputType + SQL_ERROR); // expected return: SQL_ERROR + + // NOTE: This test confirms that the OverflowException catch block in + // SqlNumericHelper.ToDecimal() is reachable and provides useful diagnostics + // (precision, scale, sign, val hex dump) when SQL values exceed C# decimal range. + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalHighPrecisionOutputParamTest + // + // Description: + // Test decimal OUTPUT parameters with maximum precision (29 digits) to exercise + // the FromDecimal() conversion for values at the edge of C# decimal's capability. + // Note: C# decimal normalizes values, so we test precision rather than forcing specific scales. + // + TEST_F(CSharpExtensionApiTests, DecimalHighPrecisionOutputParamTest) + { + int paramsNumber = 6; + + string userClassFullName = "Microsoft.SqlServer.CSharpExtensionTest.CSharpTestExecutorDecimalHighScaleParam"; + string scriptString = m_UserLibName + m_Separator + userClassFullName; + + InitializeSession( + 0, // inputSchemaColumnsNumber + paramsNumber, // parametersNumber + scriptString); // scriptString + + // Initialize all parameters as OUTPUT parameters + // The C# executor will set high-precision decimal values + for(int i = 0; i < paramsNumber; ++i) + { + InitParam( + i, // paramNumber + SQL_NUMERIC_STRUCT(), // paramValue (will be set by C# executor) + false, // isNull + SQL_PARAM_INPUT_OUTPUT); // inputOutputType + } + + SQLUSMALLINT outputSchemaColumnsNumber = 0; + SQLRETURN result = (*sm_executeFuncPtr)( + *m_sessionId, + m_taskId, + 0, // rowsNumber + nullptr, // dataSet + nullptr, // strLen_or_Ind + &outputSchemaColumnsNumber); + ASSERT_EQ(result, SQL_SUCCESS); + + EXPECT_EQ(outputSchemaColumnsNumber, 0); + + // Expected sizes: all non-null parameters have size = sizeof(SQL_NUMERIC_STRUCT) = 19 bytes + vector expectedStrLenOrInd(paramsNumber, 19); + + // Verify that the parameters we get back have valid structure + // This validates the conversion from C# decimal to SQL_NUMERIC_STRUCT + // for high-precision values at the edge of C# decimal's capability (29 digits) + // + for (int i = 0; i < paramsNumber; ++i) + { + SQLPOINTER paramValue = nullptr; + SQLINTEGER strLenOrInd = 0; + + SQLRETURN result = (*sm_getOutputParamFuncPtr)( + *m_sessionId, + m_taskId, + i, + ¶mValue, + &strLenOrInd); + + ASSERT_EQ(result, SQL_SUCCESS); + EXPECT_EQ(strLenOrInd, expectedStrLenOrInd[i]); + + ASSERT_NE(paramValue, nullptr); + SQL_NUMERIC_STRUCT* numericValue = static_cast(paramValue); + + // Validate struct integrity + EXPECT_GE(numericValue->precision, 1); + EXPECT_LE(numericValue->precision, 38); + EXPECT_GE(numericValue->scale, 0); + EXPECT_LE(numericValue->scale, numericValue->precision); + EXPECT_TRUE(numericValue->sign == 0 || numericValue->sign == 1); + + // For high-precision decimal values (29 digits), expect high precision/scale + // C# decimal can represent up to 29 significant digits + if (i < paramsNumber - 1) // All except zero (param5) + { + // High precision values should have relatively high precision settings + EXPECT_GE(numericValue->precision, 20) << "Parameter " << i << " should have high precision"; + } + } + + // NOTE: This test exercises the FromDecimal() conversion for maximum-precision + // C# decimal values. While we can't force scale 29-38 through OUTPUT parameters + // (since C# decimal normalizes values), we verify that high-precision decimals + // convert correctly through the FromDecimal() path, which includes the repeated + // multiplication fallback for scales beyond the PowersOf10 lookup table. + } } From 7f3211e7dafb706a45e3e2f595985f11c780e446 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Thu, 19 Mar 2026 16:36:08 -0700 Subject: [PATCH 07/13] using sqlDecimal --- .../src/managed/CSharpInputDataSet.cs | 21 +- .../src/managed/CSharpOutputDataSet.cs | 71 ++--- .../src/managed/CSharpParamContainer.cs | 79 +++-- ...Microsoft.SqlServer.CSharpExtension.csproj | 1 + .../src/managed/utils/Sql.cs | 3 +- .../src/managed/utils/SqlNumericHelper.cs | 301 ++++++------------ .../test/src/managed/CSharpTestExecutor.cs | 59 ++-- ...osoft.SqlServer.CSharpExtensionTest.csproj | 1 + .../test/src/native/CSharpDecimalTests.cpp | 65 ++-- 9 files changed, 246 insertions(+), 355 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs index 0a8a5a6..00ab297 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpInputDataSet.cs @@ -9,6 +9,7 @@ // //********************************************************************* using System; +using System.Data.SqlTypes; using Microsoft.Data.Analysis; using static Microsoft.SqlServer.CSharpExtension.Sql; using static Microsoft.SqlServer.CSharpExtension.SqlNumericHelper; @@ -192,7 +193,11 @@ private unsafe void AddDataFrameColumn( /// /// This method adds NUMERIC/DECIMAL column data by converting from SQL_NUMERIC_STRUCT - /// to C# decimal values, creating a PrimitiveDataFrameColumn, and adding it to the DataFrame. + /// to SqlDecimal values (full 38-digit precision), creating a PrimitiveDataFrameColumn, + /// and adding it to the DataFrame. + /// + /// IMPORTANT: We use SqlDecimal throughout to support SQL Server's full 38-digit precision. + /// C# decimal is NOT used to avoid 28-digit precision limitations and potential data loss. /// /// The column index. /// Number of rows in this column. @@ -207,11 +212,12 @@ private unsafe void AddNumericDataFrameColumn( // Cast the raw pointer to SQL_NUMERIC_STRUCT array SqlNumericStruct* numericArray = (SqlNumericStruct*)colData; - // Create a DataFrame column for decimal values - PrimitiveDataFrameColumn colDataFrame = - new PrimitiveDataFrameColumn(_columns[columnNumber].Name, (int)rowsNumber); + // Create a DataFrame column for SqlDecimal values + // Using SqlDecimal instead of decimal provides full SQL Server precision (38 digits) + PrimitiveDataFrameColumn colDataFrame = + new PrimitiveDataFrameColumn(_columns[columnNumber].Name, (int)rowsNumber); - // Convert each SQL_NUMERIC_STRUCT to decimal, handling nulls + // Convert each SQL_NUMERIC_STRUCT to SqlDecimal, handling nulls Span nullSpan = new Span(colMap, (int)rowsNumber); for (int i = 0; i < (int)rowsNumber; ++i) { @@ -224,9 +230,10 @@ private unsafe void AddNumericDataFrameColumn( // - This matches the pattern used by other numeric types in the codebase if (_columns[columnNumber].Nullable == 0 || nullSpan[i] != SQL_NULL_DATA) { - // Convert SQL_NUMERIC_STRUCT to C# decimal - colDataFrame[i] = ToDecimal(numericArray[i]); + // Convert SQL_NUMERIC_STRUCT to SqlDecimal with full precision support + colDataFrame[i] = ToSqlDecimal(numericArray[i]); } + // else: leave as null (default for nullable primitive column) } CSharpDataFrame.Columns.Add(colDataFrame); diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs index 50037c7..2739818 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs @@ -9,6 +9,7 @@ // //********************************************************************* using System; +using System.Data.SqlTypes; using System.Linq; using System.Text; using System.Runtime.InteropServices; @@ -218,11 +219,11 @@ T[] array } /// - /// This method extracts NUMERIC/DECIMAL column data by converting C# decimal values + /// This method extracts NUMERIC/DECIMAL column data by converting SqlDecimal values /// to SQL_NUMERIC_STRUCT array, pinning it, and storing the pointer. /// /// The column index. - /// The DataFrameColumn containing decimal values. + /// The DataFrameColumn containing SqlDecimal values. private void ExtractNumericColumn( ushort columnNumber, DataFrameColumn column) @@ -234,62 +235,29 @@ private void ExtractNumericColumn( else { - // For NUMERIC/DECIMAL, we need to determine appropriate precision and scale from the data. - // SQL Server supports precision 1-38 and scale 0-precision. - // We'll calculate both precision and scale by examining the actual decimal values. - // - // WHY calculate from data instead of hardcoding? - // - The extension doesn't have access to the input column's original precision - // - SQL Server validates returned precision against WITH RESULT SETS declaration - // - Using precision=38 for all values causes "Invalid data for type numeric" errors - // - We must calculate the minimum precision needed to represent the data + // Extract precision and scale from SqlDecimal values. + // SqlDecimal from Microsoft.Data.SqlClient preserves precision/scale metadata, + // so we find the maximum precision and scale across all non-null values. // byte precision = 0; byte scale = (byte)_columns[columnNumber].DecimalDigits; - // Calculate precision and scale by examining all non-null values - // We need to find the maximum precision and scale to ensure no data loss - // - // WHY examine ALL rows instead of just sampling? - // - A previous implementation only checked first 10 rows (optimization attempt) - // - This caused data loss when higher-precision values appeared later in the dataset - // - Example: rows 1-10 need precision 6, but row 100 needs precision 14 - // - If we use precision=6 for the entire column, row 100 gets truncated (data loss!) - // - Must examine ALL rows to find maximum precision and scale + // Examine all rows to find maximum precision and scale + // This ensures we preserve the highest precision/scale present in the data // for (int rowNumber = 0; rowNumber < column.Length; ++rowNumber) { if (column[rowNumber] != null) { - decimal value = (decimal)column[rowNumber]; - - // Get the scale from the decimal value itself - // Scale is in bits 16-23 of flags field (bits[3]) - int[] bits = decimal.GetBits(value); - byte valueScale = (byte)((bits[3] >> 16) & 0x7F); - scale = Math.Max(scale, valueScale); - - // Calculate precision by counting significant digits - // Remove the scale (decimal places) to get the integer part, - // then count digits in both parts - decimal absValue = Math.Abs(value); - decimal integerPart = Math.Truncate(absValue); + SqlDecimal value = (SqlDecimal)column[rowNumber]; - // Count digits in integer part (or 1 if zero) - byte integerDigits; - if (integerPart == 0) + // SqlDecimal already carries precision/scale metadata from the input + // Use it directly - no need to recalculate + if (!value.IsNull) { - integerDigits = 1; + scale = Math.Max(scale, value.Scale); + precision = Math.Max(precision, value.Precision); } - else - { - // Log10 gives us the magnitude, +1 for digit count - integerDigits = (byte)(Math.Floor(Math.Log10((double)integerPart)) + 1); - } - - // Precision = digits before decimal + digits after decimal - byte valuePrecision = (byte)(integerDigits + valueScale); - precision = Math.Max(precision, valuePrecision); } } @@ -303,7 +271,7 @@ private void ExtractNumericColumn( precision = scale; } - // Update column metadata with calculated precision and scale + // Update column metadata with extracted precision and scale // Size contains the precision for DECIMAL/NUMERIC types (not bytes) // DecimalDigits contains the scale _columns[columnNumber].Size = precision; @@ -311,14 +279,17 @@ private void ExtractNumericColumn( Logging.Trace($"ExtractNumericColumn: Column {columnNumber}, Precision={precision}, Scale={scale}, RowCount={column.Length}"); - // Convert each decimal value to SQL_NUMERIC_STRUCT + // Convert each SqlDecimal value to SQL_NUMERIC_STRUCT SqlNumericStruct[] numericArray = new SqlNumericStruct[column.Length]; for (int rowNumber = 0; rowNumber < column.Length; ++rowNumber) { if (column[rowNumber] != null) { - decimal value = (decimal)column[rowNumber]; - numericArray[rowNumber] = FromDecimal(value, precision, scale); + SqlDecimal value = (SqlDecimal)column[rowNumber]; + + // Convert SqlDecimal directly to SQL_NUMERIC_STRUCT with full precision support + // FromSqlDecimal handles scale adjustment if needed to match target precision/scale + numericArray[rowNumber] = FromSqlDecimal(value, precision, scale); Logging.Trace($"ExtractNumericColumn: Row {rowNumber}, Value={value} converted to SqlNumericStruct"); } else diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs index 3d2b3e3..db494bd 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs @@ -9,6 +9,7 @@ // //************************************************************************************************* using System; +using System.Data.SqlTypes; using System.Runtime; using System.Text; using System.Collections.Generic; @@ -134,9 +135,20 @@ public unsafe void AddParam( _params[paramNumber].Value = *(bool*)paramValue; break; case SqlDataType.DotNetNumeric: - // Convert SQL_NUMERIC_STRUCT to C# decimal + // Convert SQL_NUMERIC_STRUCT to SqlDecimal + // Special handling for OUTPUT parameters: if precision=0, treat as uninitialized SqlNumericStruct* numericPtr = (SqlNumericStruct*)paramValue; - _params[paramNumber].Value = ToDecimal(*numericPtr); + if (numericPtr->precision == 0) + { + // OUTPUT parameter with uninitialized struct - use SqlDecimal.Null + // The C# executor will set the actual value + _params[paramNumber].Value = SqlDecimal.Null; + } + else + { + // INPUT or INPUT_OUTPUT parameter with valid value + _params[paramNumber].Value = ToSqlDecimal(*numericPtr); + } break; case SqlDataType.DotNetChar: _params[paramNumber].Value = Interop.UTF8PtrToStr((char*)paramValue, (ulong)strLenOrNullMap); @@ -174,7 +186,17 @@ public unsafe void ReplaceParam( _params[paramNumber].Value = paramValue_; CSharpParam param = _params[paramNumber]; - if(param.Value == null) + + // Use null-coalescing pattern for safer null checking with value types + // SqlDecimal is a struct, so we need to check both object null and SqlDecimal.IsNull + if(ReferenceEquals(param.Value, null)) + { + *strLenOrNullMap = SQL_NULL_DATA; + return; + } + + // Special handling for SqlDecimal.Null (SqlDecimal is a struct, not a class) + if(param.DataType == SqlDataType.DotNetNumeric && param.Value is SqlDecimal sqlDecVal && sqlDecVal.IsNull) { *strLenOrNullMap = SQL_NULL_DATA; return; @@ -221,23 +243,30 @@ public unsafe void ReplaceParam( ReplaceNumericParam(boolValue, paramValue); break; case SqlDataType.DotNetNumeric: - // Convert C# decimal to SQL_NUMERIC_STRUCT + // Convert SqlDecimal to SQL_NUMERIC_STRUCT // Use the precision and scale from the parameter metadata - decimal decimalValue = Convert.ToDecimal(param.Value); - // WHY use param.Size for precision? - // - For DECIMAL/NUMERIC parameters, param.Size contains the declared precision (not bytes) - // - This follows standard ODBC behavior where ColumnSize = precision for SQL_NUMERIC/SQL_DECIMAL - // - CRITICAL: The SqlNumericStruct precision MUST match the declared parameter precision - // or SQL Server rejects it with "Invalid data for type decimal" (Msg 9803) - // - Example: DECIMAL(3,3) parameter MUST have precision=3 in the struct, not precision=38 - byte precision = (byte)param.Size; - byte scale = (byte)param.DecimalDigits; - // WHY set strLenOrNullMap to 19? - // - For fixed-size types like SQL_NUMERIC_STRUCT, strLenOrNullMap contains the byte size - // - SQL_NUMERIC_STRUCT is exactly 19 bytes: precision(1) + scale(1) + sign(1) + val(16) - // - This tells ODBC how many bytes to read from the paramValue pointer - *strLenOrNullMap = 19; // sizeof(SqlNumericStruct) - ReplaceNumericStructParam(decimalValue, precision, scale, paramValue); + // Note: param.Value could be SqlDecimal or potentially null (handled above) + if (param.Value is SqlDecimal sqlDecimalValue) + { + // WHY use param.Size for precision? + // - For DECIMAL/NUMERIC parameters, param.Size contains the declared precision (not bytes) + // - This follows standard ODBC behavior where ColumnSize = precision for SQL_NUMERIC/SQL_DECIMAL + // - CRITICAL: The SqlNumericStruct precision MUST match the declared parameter precision + // or SQL Server rejects it with "Invalid data for type decimal" (Msg 9803) + // - Example: DECIMAL(3,3) parameter MUST have precision=3 in the struct, not precision=38 + byte precision = (byte)param.Size; + byte scale = (byte)param.DecimalDigits; + // WHY set strLenOrNullMap to 19? + // - For fixed-size types like SQL_NUMERIC_STRUCT, strLenOrNullMap contains the byte size + // - SQL_NUMERIC_STRUCT is exactly 19 bytes: precision(1) + scale(1) + sign(1) + val(16) + // - This tells ODBC how many bytes to read from the paramValue pointer + *strLenOrNullMap = 19; // sizeof(SqlNumericStruct) + ReplaceNumericStructParam(sqlDecimalValue, precision, scale, paramValue); + } + else + { + throw new InvalidCastException($"Expected SqlDecimal for NUMERIC parameter, got {param.Value?.GetType().Name ?? "null"}"); + } break; case SqlDataType.DotNetChar: // For CHAR/VARCHAR, strLenOrNullMap is in bytes (1 byte per character for ANSI). @@ -302,21 +331,21 @@ private unsafe void ReplaceNumericParam( /// /// This method replaces parameter value for NUMERIC/DECIMAL data types. - /// Converts C# decimal to SQL_NUMERIC_STRUCT and uses proper memory pinning. + /// Converts SqlDecimal to SQL_NUMERIC_STRUCT and uses proper memory pinning. /// Follows the same pattern as Java extension's numeric parameter handling. /// - /// The C# decimal value to convert. + /// The SqlDecimal value to convert. /// Total number of digits (1-38). /// Number of digits after decimal point (0-precision). /// Output pointer to receive the pinned SqlNumericStruct. private unsafe void ReplaceNumericStructParam( - decimal value, + SqlDecimal value, byte precision, byte scale, void **paramValue) { - // Convert C# decimal to SQL_NUMERIC_STRUCT - SqlNumericStruct numericStruct = FromDecimal(value, precision, scale); + // Convert SqlDecimal to SQL_NUMERIC_STRUCT + SqlNumericStruct numericStruct = FromSqlDecimal(value, precision, scale); // Box the struct into a single-element array to create a heap-allocated copy, then pin it. // @@ -341,7 +370,7 @@ private unsafe void ReplaceNumericStructParam( _handleList.Add(handle); *paramValue = (void*)handle.AddrOfPinnedObject(); - Logging.Trace($"ReplaceNumericStructParam: Converted decimal {value} to SqlNumericStruct (precision={precision}, scale={scale})"); + Logging.Trace($"ReplaceNumericStructParam: Converted SqlDecimal {value} to SqlNumericStruct (precision={precision}, scale={scale})"); } /// diff --git a/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj b/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj index 3b8c8e4..82cb3f6 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj +++ b/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj @@ -11,5 +11,6 @@ + \ No newline at end of file diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs index 199e382..1279395 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs @@ -11,6 +11,7 @@ //********************************************************************* using System; using System.Collections.Generic; +using System.Data.SqlTypes; using System.Runtime.InteropServices; namespace Microsoft.SqlServer.CSharpExtension @@ -79,7 +80,7 @@ public enum SqlDataType: short {typeof(double), SqlDataType.DotNetDouble}, {typeof(bool), SqlDataType.DotNetBit}, {typeof(string), SqlDataType.DotNetChar}, - {typeof(decimal), SqlDataType.DotNetNumeric} + {typeof(SqlDecimal), SqlDataType.DotNetNumeric} }; /// diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs index 909f192..9ddd653 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -6,68 +6,25 @@ // // Purpose: // SQL NUMERIC/DECIMAL type support: ODBC-compatible struct definition -// and bidirectional conversion between SQL_NUMERIC_STRUCT and C# decimal. +// and bidirectional conversion between SQL_NUMERIC_STRUCT and SqlDecimal. // //********************************************************************* using System; +using System.Data.SqlTypes; using System.Linq; using System.Runtime.InteropServices; namespace Microsoft.SqlServer.CSharpExtension { /// - /// Helper class for converting between SQL Server NUMERIC/DECIMAL types and C# decimal. + /// Helper class for converting between SQL Server NUMERIC/DECIMAL types and SqlDecimal. /// Provides ODBC-compatible SQL_NUMERIC_STRUCT definition and conversion methods. + /// + /// IMPORTANT: This implementation uses SqlDecimal from Microsoft.Data.SqlClient which supports + /// full SQL Server precision (38 digits). C# decimal is NOT used to avoid 28-digit limitations. /// public static class SqlNumericHelper { - /// - /// Maximum number of powers of 10 in the "PowersOf10" lookup table. - /// C# decimal supports up to 28-29 significant digits, so we store 10^0 through 10^28 (29 entries). - /// This covers all possible scale values (0-38) within C# decimal's precision range. - /// Array index corresponds to the exponent: PowersOf10[n] = 10^n. - /// - private const int MaxPowersOf10Count = 29; - - // Powers of 10 lookup table for efficient decimal scaling (up to 10^28) - // - // Use a lookup table instead of Math.Pow because: - // - Math.Pow returns double, requiring conversion to decimal with potential precision loss. - // - Repeated Math.Pow calls in tight loops have measurable performance impact. - // - Pre-computed decimal constants give exact values with zero runtime overhead. - // - C# decimal supports up to 28-29 significant digits, so 10^0 through 10^28 covers all cases. - private static readonly decimal[] PowersOf10 = new decimal[MaxPowersOf10Count] - { - 1m, // 10^0 - 10m, // 10^1 - 100m, // 10^2 - 1000m, // 10^3 - 10000m, // 10^4 - 100000m, // 10^5 - 1000000m, // 10^6 - 10000000m, // 10^7 - 100000000m, // 10^8 - 1000000000m, // 10^9 - 10000000000m, // 10^10 - 100000000000m, // 10^11 - 1000000000000m, // 10^12 - 10000000000000m, // 10^13 - 100000000000000m, // 10^14 - 1000000000000000m, // 10^15 - 10000000000000000m, // 10^16 - 100000000000000000m, // 10^17 - 1000000000000000000m, // 10^18 - 10000000000000000000m, // 10^19 - 100000000000000000000m, // 10^20 - 1000000000000000000000m, // 10^21 - 10000000000000000000000m, // 10^22 - 100000000000000000000000m, // 10^23 - 1000000000000000000000000m, // 10^24 - 10000000000000000000000000m, // 10^25 - 100000000000000000000000000m, // 10^26 - 1000000000000000000000000000m, // 10^27 - 10000000000000000000000000000m // 10^28 - }; /// /// SQL_NUMERIC_STRUCT structure matching ODBC's SQL_NUMERIC_STRUCT. @@ -194,190 +151,120 @@ public void SetVal(int index, byte value) } /// - /// Converts SQL_NUMERIC_STRUCT to C# decimal. + /// Converts SQL_NUMERIC_STRUCT to SqlDecimal with full 38-digit precision support. + /// This method supports the complete SQL Server DECIMAL/NUMERIC range without data loss. /// /// The SQL numeric structure from ODBC. - /// The equivalent C# decimal value. - /// Thrown when the value exceeds C# decimal range. - public static decimal ToDecimal(SqlNumericStruct numeric) + /// The equivalent SqlDecimal value. + /// + /// SqlDecimal provides full SQL Server precision (38 digits) compared to C# decimal (28-29 digits). + /// Use this method when working with high-precision values to avoid data loss. + /// + public static SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) { - decimal result; - - try + // Validate precision and scale before creating SqlDecimal + if (numeric.precision < 1 || numeric.precision > 38) { - // Convert little-endian byte array (16 bytes) to a scaled integer value. - // The val array contains the absolute value scaled by 10^scale. - // For example, for numeric(10,2) value 123.45: - // scale = 2, val represents 12345 (123.45 * 10^2) - // - // Little-endian storage layout: - // - val[0] = least significant byte (LSB) - // - val[15] = most significant byte (MSB) - // - Each byte represents one "digit" in base-256 representation - // - Example: bytes [0x39, 0x30, 0x00, ...] = 0x39 + (0x30 * 256) = 57 + 12288 = 12345 - // - decimal scaledValue = 0m; - - // Find the most significant non-zero byte (highest index) to optimize the conversion. - // This avoids processing unnecessary high-order zero bytes and prevents potential - // overflow when building large values. Most practical values use only 12-13 bytes. - int lastNonZeroByte = -1; - for (int i = 15; i >= 0; i--) - { - if (numeric.GetVal(i) != 0) - { - lastNonZeroByte = i; - break; - } - } - - // If all bytes are zero, result is 0 - if (lastNonZeroByte == -1) - { - result = 0m; - } - else - { - // Build the integer value by processing from MSB (highest index) to LSB (index 0). - // Algorithm: Start with MSB, then for each subsequent byte toward LSB, - // multiply current value by 256 and add the next byte. - // This approach avoids large intermediate multipliers that could overflow decimal. - for (int i = lastNonZeroByte; i >= 0; i--) - { - scaledValue = scaledValue * 256m + numeric.GetVal(i); - } - - // Scale down by dividing by 10^scale to get the actual decimal value. - // The scaledValue contains the integer representation; we need to divide by 10^scale. - // For example, if scaledValue=12345 and scale=2, result = 12345 / 100 = 123.45 - if (numeric.scale >= 0 && numeric.scale < PowersOf10.Length) - { - // Use pre-computed lookup table for scales 0-28 (fast path) - result = scaledValue / PowersOf10[numeric.scale]; - } - else if (numeric.scale == 0) - { - // No scaling needed - value is already an integer - result = scaledValue; - } - else - { - // For scales beyond our lookup table (29-38), use repeated division by 10. - // We cannot use Math.Pow(10, scale) because: - // - Math.Pow returns double, and values > 10^28 overflow when converting double→decimal - // - Repeated division maintains decimal precision without overflow - result = scaledValue; - for (int i = 0; i < numeric.scale; i++) - { - result /= 10m; - } - } - - // Apply sign: 1 = positive, 0 = negative - if (numeric.sign == 0) - { - result = -result; - } - } + throw new ArgumentException($"Precision must be between 1 and 38, got {numeric.precision}"); } - catch (OverflowException) + if (numeric.scale < 0 || numeric.scale > numeric.precision) { - // SQL Server DECIMAL(38,scale) can represent values much larger than C# decimal's range. - // C# decimal maximum: ±79,228,162,514,264,337,593,543,950,335 (approx ±7.9 × 10^28) - // SQL DECIMAL(38,0) maximum: ±10^38 - 1 - // - // This overflow typically occurs with DECIMAL(30+, scale) parameters containing values - // that exceed 29 significant digits total. - string valHex = string.Join("", Enumerable.Range(0, 16).Select(i => numeric.GetVal(i).ToString("X2"))); - throw new OverflowException( - $"SQL DECIMAL/NUMERIC value exceeds C# decimal range. " + - $"Precision={numeric.precision}, Scale={numeric.scale}, Sign={numeric.sign}, " + - $"Val={valHex}. " + - $"C# decimal supports up to 29 significant digits (±7.9×10^28). " + - $"Consider using lower precision parameters or handle large numerics differently."); + throw new ArgumentException($"Scale ({numeric.scale}) must be between 0 and precision ({numeric.precision})"); } - return result; + // SqlDecimal constructor requires int[] array (not byte[]) + // The val array in SqlNumericStruct is 16 bytes = 128 bits + // We need to convert to 4 int32s (4 x 32 bits = 128 bits) + + int[] data = new int[4]; + for (int i = 0; i < 4; i++) + { + // Convert each group of 4 bytes to an int32 (little-endian) + int offset = i * 4; + data[i] = numeric.GetVal(offset) | + (numeric.GetVal(offset + 1) << 8) | + (numeric.GetVal(offset + 2) << 16) | + (numeric.GetVal(offset + 3) << 24); + } + + // SqlDecimal constructor: SqlDecimal(byte precision, byte scale, bool positive, int[] data) + bool isPositive = numeric.sign == 1; + + // Note: SqlDecimal scale parameter is byte (unsigned), but SqlNumericStruct.scale is sbyte (signed) + // SQL Server scale is always non-negative (0-38), so this cast is safe + byte scale = (byte)Math.Max((sbyte)0, numeric.scale); + + return new SqlDecimal(numeric.precision, scale, isPositive, data); } /// - /// Converts C# decimal to SQL_NUMERIC_STRUCT. - /// Follows the same conversion logic as Java extension's BigDecimalToNumericStruct. + /// Converts SqlDecimal to SQL_NUMERIC_STRUCT for transfer to SQL Server. + /// This method handles the full 38-digit precision range without data loss. /// - /// The C# decimal value to convert. - /// Total number of digits (1-38). - /// Number of digits after decimal point (0-precision). + /// The SqlDecimal value to convert. + /// Total number of digits (1-38). If null, uses SqlDecimal's precision. + /// Number of digits after decimal point (0-precision). If null, uses SqlDecimal's scale. /// The equivalent SQL numeric structure for ODBC. /// Thrown when precision or scale are out of valid range. - public static SqlNumericStruct FromDecimal(decimal value, byte precision, byte scale) + public static SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision = null, byte? scale = null) { - if (precision < 1 || precision > 38) - { - throw new ArgumentException($"Precision must be between 1 and 38, got {precision}"); - } - if (scale > precision) + // Handle SqlDecimal.Null + if (value.IsNull) { - throw new ArgumentException($"Scale ({scale}) cannot exceed precision ({precision})"); + // Return a zero-initialized struct - caller should set null indicator separately + return new SqlNumericStruct + { + precision = precision ?? 1, + scale = (sbyte)(scale ?? 0), + sign = 1 + }; } - - SqlNumericStruct result = new SqlNumericStruct - { - precision = precision, - scale = (sbyte)scale, // Safe cast: scale validated and the max is 38 < 127. - sign = (byte)(value >= 0 ? 1 : 0) - }; - - // Work with absolute value - decimal absValue = Math.Abs(value); - - // Scale up by multiplying by 10^scale to get an integer representation - // For example, 123.45 with scale=2 becomes 12345 - decimal scaledValue; - if (scale >= 0 && scale < PowersOf10.Length) + + // Use SqlDecimal's own precision/scale if not specified + byte targetPrecision = precision ?? value.Precision; + byte targetScale = scale ?? value.Scale; + + if (targetPrecision < 1 || targetPrecision > 38) { - scaledValue = absValue * PowersOf10[scale]; + throw new ArgumentException($"Precision must be between 1 and 38, got {targetPrecision}"); } - else if (scale == 0) + if (targetScale > targetPrecision) { - scaledValue = absValue; + throw new ArgumentException($"Scale ({targetScale}) cannot exceed precision ({targetPrecision})"); } - else + + // Adjust scale if needed (SqlDecimal has AdjustScale method) + SqlDecimal adjustedValue = value; + if (targetScale != value.Scale) { - // For scales beyond our lookup table, use repeated multiplication by 10 - // Cannot use Math.Pow(10, scale) because values > 10^28 overflow when converting double→decimal - scaledValue = absValue; - for (int i = 0; i < scale; i++) - { - scaledValue *= 10m; - } + // AdjustScale returns a new SqlDecimal with the specified scale + // positive scaleShift adds decimal places, negative removes them + int scaleShift = targetScale - value.Scale; + adjustedValue = SqlDecimal.AdjustScale(value, scaleShift, false); } - - // Round to nearest integer (handles any remaining fractional part due to precision limits) - scaledValue = Math.Round(scaledValue, 0, MidpointRounding.AwayFromZero); - - // Convert the scaled integer to little-endian byte array (16 bytes) - // Each byte represents one position in base-256 representation - for (int i = 0; i < 16; i++) + + SqlNumericStruct result = new SqlNumericStruct { - if (scaledValue > 0) - { - decimal byteValue = scaledValue % 256m; - result.SetVal(i, (byte)byteValue); - scaledValue = Math.Floor(scaledValue / 256m); - } - else - { - result.SetVal(i, 0); - } - } - - // If there's still value left after filling 16 bytes, we have overflow - if (scaledValue > 0) + precision = targetPrecision, + scale = (sbyte)targetScale, + sign = (byte)(adjustedValue.IsPositive ? 1 : 0) + }; + + // SqlDecimal stores data as int[4] array (128 bits total) + // We need to convert to byte[16] for SqlNumericStruct + int[] data = adjustedValue.Data; + + for (int i = 0; i < 4 && i < data.Length; i++) { - throw new OverflowException( - $"Value {value} with precision {precision} and scale {scale} exceeds SQL_NUMERIC_STRUCT capacity"); + // Convert each int32 to 4 bytes (little-endian) + int offset = i * 4; + int value32 = data[i]; + result.SetVal(offset, (byte)(value32 & 0xFF)); + result.SetVal(offset + 1, (byte)((value32 >> 8) & 0xFF)); + result.SetVal(offset + 2, (byte)((value32 >> 16) & 0xFF)); + result.SetVal(offset + 3, (byte)((value32 >> 24) & 0xFF)); } - + return result; } } diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs index ea1018c..e5515c9 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs @@ -9,6 +9,7 @@ // //********************************************************************* using System; +using System.Data.SqlTypes; using System.Runtime.InteropServices; using System.Collections.Generic; using Microsoft.Data.Analysis; @@ -111,28 +112,27 @@ public override DataFrame Execute(DataFrame input, Dictionary s public class CSharpTestExecutorDecimalParam: AbstractSqlServerExtensionExecutor { public override DataFrame Execute(DataFrame input, Dictionary sqlParams){ - // Test maximum C# decimal value (decimal.MaxValue = 79228162514264337593543950335) - // Note: C# decimal supports ~29 digits, even though SQL NUMERIC can support up to 38 digits - sqlParams["@param0"] = decimal.MaxValue; + // Test maximum SqlDecimal value (DECIMAL(38,0) max = 99999999999999999999999999999999999999) + // Note: SqlDecimal supports full 38 digits, unlike C# decimal which is limited to ~29 digits + sqlParams["@param0"] = SqlDecimal.Parse("99999999999999999999999999999999999999"); // Test minimum value (negative max) - sqlParams["@param1"] = decimal.MinValue; + sqlParams["@param1"] = SqlDecimal.Parse("-99999999999999999999999999999999999999"); - // Test high scale value (DECIMAL(38, 10)) - // Using 18 significant digits to stay within C# decimal range - sqlParams["@param2"] = 12345678.1234567890m; + // Test high scale value (DECIMAL(38, 10)) - full 38-digit precision + sqlParams["@param2"] = SqlDecimal.Parse("1234567890123456789012345678.1234567890"); // Test zero - sqlParams["@param3"] = 0m; + sqlParams["@param3"] = new SqlDecimal(0); - // Test small value with high precision (28 decimal places, max for C# decimal) - sqlParams["@param4"] = 0.1234567890123456789012345678m; + // Test small value with high precision (DECIMAL(38, 28)) + sqlParams["@param4"] = SqlDecimal.Parse("1234567890.1234567890123456789012345678"); // Test typical financial value (DECIMAL(19, 4)) - sqlParams["@param5"] = 123456789012345.6789m; + sqlParams["@param5"] = SqlDecimal.Parse("123456789012345.6789"); // Test negative financial value - sqlParams["@param6"] = -123456789012345.6789m; + sqlParams["@param6"] = SqlDecimal.Parse("-123456789012345.6789"); // Test null (last parameter) sqlParams["@param7"] = null; @@ -142,37 +142,36 @@ public override DataFrame Execute(DataFrame input, Dictionary s } /// - /// Test executor for decimal OUTPUT parameters with maximum precision (29 digits). - /// Tests the FromDecimal() conversion for values at the edge of C# decimal's capability. + /// Test executor for SqlDecimal OUTPUT parameters with maximum precision (38 digits). + /// Tests the FromSqlDecimal() conversion for values at the edge of SQL Server DECIMAL's capability. /// - /// Note: C# decimal normalizes values - the scale is determined by the value's actual - /// precision requirements, not by a declared scale. This tests high-precision conversions. + /// Note: SqlDecimal supports up to 38 digits of precision, matching SQL Server's DECIMAL/NUMERIC. /// public class CSharpTestExecutorDecimalHighScaleParam: AbstractSqlServerExtensionExecutor { public override DataFrame Execute(DataFrame input, Dictionary sqlParams) { - // Set high-precision decimal values (29 significant digits total) - // These exercise the FromDecimal() conversion for C# decimal's maximum capability - // C# decimal can represent values with up to 29 significant digits + // Set high-precision SqlDecimal values (38 significant digits total) + // These exercise the FromSqlDecimal() conversion for SQL Server's maximum capability + // SqlDecimal can represent values with up to 38 significant digits - // param0: Maximum precision with integer and fractional parts - sqlParams["@param0"] = 12345678901234567.890123456789m; // 29 total digits + // param0: Maximum precision with integer and fractional parts (DECIMAL(38, 10)) + sqlParams["@param0"] = SqlDecimal.Parse("1234567890123456789012345678.9012345678"); - // param1: Large fractional precision - sqlParams["@param1"] = 1.2345678901234567890123456789m; // 29 total digits + // param1: Large fractional precision (DECIMAL(38, 28)) + sqlParams["@param1"] = SqlDecimal.Parse("1234567890.1234567890123456789012345678"); - // param2: Different high-precision pattern - sqlParams["@param2"] = 123.45678901234567890123456789m; // 29 total digits + // param2: Different high-precision pattern (DECIMAL(38, 20)) + sqlParams["@param2"] = SqlDecimal.Parse("123456789012345678.12345678901234567890"); - // param3: Maximum fractional precision - sqlParams["@param3"] = 0.12345678901234567890123456789m; // 29 total digits + // param3: Maximum fractional precision (DECIMAL(38, 38)) + sqlParams["@param3"] = SqlDecimal.Parse("0.12345678901234567890123456789012345678"); - // param4: Negative high-precision value - sqlParams["@param4"] = -987.65432109876543210987654321m; // 29 total digits + // param4: Negative high-precision value (DECIMAL(38, 18)) + sqlParams["@param4"] = SqlDecimal.Parse("-12345678901234567890.123456789012345678"); // param5: Zero value for validation - sqlParams["@param5"] = 0.0m; + sqlParams["@param5"] = new SqlDecimal(0); return null; } diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj b/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj index d863550..c0664f3 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj @@ -11,6 +11,7 @@ + diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 6fb7f8a..423fc41 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -399,20 +399,20 @@ namespace ExtensionApiTest decimalInfo.m_columnNames); // Validate that columns metadata is correct - // NOTE: SDK calculates precision from actual data, not input metadata - // Column 0: DecimalColumn1, calculated precision 13 (max value 999999999.9999 = 9 digits + 4 scale) + // NOTE: SqlDecimal preserves input precision/scale metadata + // Column 0: DecimalColumn1, declared NUMERIC(19,4) GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType - 13, // columnSize (calculated precision from data) + 19, // columnSize (declared precision from input) 4, // decimalDigits (scale) SQL_NO_NULLS); // nullable - // Column 1: DecimalColumn2, calculated precision 19 (from actual data values) + // Column 1: DecimalColumn2, declared NUMERIC(38,10) GetResultColumn( 1, // columnNumber SQL_C_NUMERIC, // dataType - 19, // columnSize (calculated precision from data) + 38, // columnSize (declared precision from input) 10, // decimalDigits (scale) SQL_NULLABLE); // nullable } @@ -427,13 +427,11 @@ namespace ExtensionApiTest // // WHY: E2E tests validated decimal output columns, but unit tests had no coverage // for verifying the managed-to-native conversion and metadata calculation for - // decimal result columns. This is CRITICAL because the SDK must dynamically - // calculate precision from actual decimal data (not hardcode to 38). + // decimal result columns and verifies precision/scale metadata is preserved correctly. // // WHAT: Tests that decimal columns returned from C# have: // - Correct SQL_C_NUMERIC type - // - Properly calculated precision (not hardcoded to 38) - // - Correct scale matching the C# decimal data + // - Preserved precision/scale from SqlDecimal metadata // - Proper NULL handling in nullable columns // TEST_F(CSharpExtensionApiTests, GetDecimalResultColumnsTest) @@ -498,20 +496,20 @@ namespace ExtensionApiTest decimalResultInfo.m_columnNames); // Validate result column metadata - // This tests that CSharpOutputDataSet.ExtractNumericColumn() properly - // calculates precision from the actual data (not hardcoded to 38) + // This tests that CSharpOutputDataSet.ExtractNumericColumn() preserves + // SqlDecimal precision/scale from the input data // GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType - 18, // columnSize (calculated precision from max value) + 18, // columnSize (declared precision from input) 2, // decimalDigits (scale) SQL_NO_NULLS); // nullable GetResultColumn( 1, // columnNumber SQL_C_NUMERIC, // dataType - 10, // columnSize (calculated precision) + 10, // columnSize (declared precision from input) 5, // decimalDigits (scale) SQL_NULLABLE); // nullable } @@ -584,18 +582,18 @@ namespace ExtensionApiTest mixedDecimalInfo.m_columnNames); // Validate each column has correct precision/scale - // NOTE: SDK calculates precision from actual data values + // NOTE: SqlDecimal preserves declared precision from input GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType - 19, // columnSize (precision for money - preserved from actual large values) + 19, // columnSize (declared precision from input NUMERIC(19,4)) 4, // decimalDigits (scale for money) SQL_NO_NULLS); // nullable GetResultColumn( 1, // columnNumber SQL_C_NUMERIC, // dataType - 6, // columnSize (calculated precision: 0.99999 = 1 + 5 scale = 6) + 5, // columnSize (declared precision from input NUMERIC(5,5)) 5, // decimalDigits (max scale) SQL_NO_NULLS); // nullable } @@ -689,18 +687,18 @@ namespace ExtensionApiTest nullDecimalInfo.m_columnNames); // Validate metadata - both columns should be nullable - // NOTE: SDK calculates precision from actual non-NULL data values + // NOTE: SqlDecimal preserves declared precision even when NULLs present GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType - 9, // columnSize (calculated precision from max non-NULL value) + 28, // columnSize (declared precision from input NUMERIC(28,6)) 6, // decimalDigits (scale) SQL_NULLABLE); // nullable (contains NULLs) GetResultColumn( 1, // columnNumber SQL_C_NUMERIC, // dataType - 9, // columnSize (calculated precision from max non-NULL value) + 15, // columnSize (declared precision from input NUMERIC(15,3)) 3, // decimalDigits (scale) SQL_NULLABLE); // nullable (contains NULLs) } @@ -709,20 +707,20 @@ namespace ExtensionApiTest // Name: DecimalHighScaleTest // // Description: - // Test decimal values with scale > 28 to verify Math.Pow() fallback behavior. + // Test decimal values with high scale (29-38) to verify SqlDecimal handles + // extreme precision requirements correctly. // - // WHY: SqlNumericHelper uses a PowersOf10 lookup table for scales 0-28 for performance. - // For scales 29-38 (beyond the lookup table), it falls back to Math.Pow(10, scale). + // WHY: SqlDecimal from Microsoft.Data.SqlClient supports scales up to 38. // This test ensures: - // 1. Math.Pow fallback doesn't crash - // 2. Values are converted correctly despite potential precision loss - // 3. Edge case handling is robust for rare but valid SQL Server DECIMAL types + // 1. High scale values convert correctly between SQL_NUMERIC_STRUCT and SqlDecimal + // 2. Edge cases are handled gracefully for rare but valid SQL Server DECIMAL types + // 3. Full 38-digit precision is preserved without data loss // // WHAT: Tests various high scale scenarios: - // - NUMERIC(38, 30): Very small fractional value (fits in C# decimal) - // - NUMERIC(38, 35): Extremely small fractional value (1 significant digit) + // - NUMERIC(38, 30): Very small fractional values + // - NUMERIC(38, 35): Extremely small fractional values (1 significant digit) // - NUMERIC(38, 38): Maximum scale with minimum value (0.00...001) - // - NUMERIC(38, 29): Boundary case at scale = 29 (first fallback case) + // - NUMERIC(38, 29): Boundary case at scale = 29 // // PRACTICAL USAGE: While these extreme scales are rare in production databases, // they're valid SQL Server types and must be handled gracefully: @@ -738,14 +736,13 @@ namespace ExtensionApiTest 0, // inputSchemaColumnsNumber 6); // parametersNumber - // Test NUMERIC(38, 29) - boundary case at scale = 29 (first fallback to Math.Pow) + // Test NUMERIC(38, 29) - boundary case at scale = 29 // Value: 0.00000000000000000000000000001 (1 at 29th decimal place) SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(1, 38, 29, false); InitParam(0, p0); // Test NUMERIC(38, 30) - scale = 30 // Value: 0.000000000000000000000000000123 (123 scaled by 10^-30) - // Small mantissa value tests Math.Pow fallback without overflow SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123, 38, 30, false); InitParam(1, p1); @@ -770,11 +767,9 @@ namespace ExtensionApiTest SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(0, 38, 32, false); InitParam(5, p5); - // NOTE: This test validates that the Math.Pow() fallback in ToDecimal() - // handles scales beyond the PowersOf10 lookup table gracefully. - // While Math.Pow returns double (potential precision loss), these extreme - // scales typically occur with very small values that fit within double's - // 53-bit mantissa precision, so conversion to decimal is safe. + // NOTE: This test validates that SqlDecimal correctly handles high scales (29-38) + // without precision loss. Microsoft.Data.SqlClient's SqlDecimal provides + // full 38-digit precision support for all valid SQL Server DECIMAL types. } //---------------------------------------------------------------------------------------------- From 93e85e1ed82b67335579560bd0130bdf98145c06 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Thu, 19 Mar 2026 23:13:07 -0700 Subject: [PATCH 08/13] wip --- .../src/managed/CSharpOutputDataSet.cs | 60 ++-- .../src/managed/CSharpParamContainer.cs | 46 ++- .../src/managed/utils/SqlNumericHelper.cs | 305 ++++++++++-------- .../test/src/native/CSharpDecimalTests.cpp | 242 ++++++++++++++ 4 files changed, 489 insertions(+), 164 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs index 2739818..de87403 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpOutputDataSet.cs @@ -221,25 +221,32 @@ T[] array /// /// This method extracts NUMERIC/DECIMAL column data by converting SqlDecimal values /// to SQL_NUMERIC_STRUCT array, pinning it, and storing the pointer. + /// + /// Precision and Scale Terminology (T-SQL DECIMAL(precision, scale)): + /// - Precision: Total number of decimal digits (1-38), both left and right of decimal point + /// - Scale: Number of digits to the right of the decimal point (0-precision) + /// - Example: DECIMAL(10,2) can store values like 12345678.90 (10 total digits, 2 after decimal) /// /// The column index. /// The DataFrameColumn containing SqlDecimal values. - private void ExtractNumericColumn( + private unsafe void ExtractNumericColumn( ushort columnNumber, DataFrameColumn column) { if (column == null) { SetDataPtrs(columnNumber, Array.Empty()); + return; } - else - { // Extract precision and scale from SqlDecimal values. // SqlDecimal from Microsoft.Data.SqlClient preserves precision/scale metadata, // so we find the maximum precision and scale across all non-null values. // - byte precision = 0; + // In T-SQL terms: We're determining the target DECIMAL(precision, scale) + // that can accommodate all values in this column. + // + byte precision = SqlNumericHelper.SQL_MIN_PRECISION; // Start with minimum (1) byte scale = (byte)_columns[columnNumber].DecimalDigits; // Examine all rows to find maximum precision and scale @@ -261,23 +268,24 @@ private void ExtractNumericColumn( } } - // Ensure minimum precision of 1 and maximum of 38 - precision = Math.Max(precision, (byte)1); - precision = Math.Min(precision, (byte)38); + // Ensure precision is within T-SQL DECIMAL valid range (1-38) + precision = Math.Max(precision, SqlNumericHelper.SQL_MIN_PRECISION); + precision = Math.Min(precision, SqlNumericHelper.SQL_MAX_PRECISION); - // Ensure scale doesn't exceed precision + // Ensure scale doesn't exceed precision (T-SQL DECIMAL(p,s) constraint: s <= p) if (scale > precision) { precision = scale; } - // Update column metadata with extracted precision and scale - // Size contains the precision for DECIMAL/NUMERIC types (not bytes) - // DecimalDigits contains the scale + // Update column metadata with determined precision and scale + // IMPORTANT: For DECIMAL/NUMERIC types, Size represents precision (total digits), + // NOT byte size. This follows ODBC ColumnSize convention for SQL_NUMERIC/SQL_DECIMAL. + // DecimalDigits represents scale (digits after decimal point). _columns[columnNumber].Size = precision; _columns[columnNumber].DecimalDigits = scale; - Logging.Trace($"ExtractNumericColumn: Column {columnNumber}, Precision={precision}, Scale={scale}, RowCount={column.Length}"); + Logging.Trace($"ExtractNumericColumn: Column {columnNumber}, T-SQL type=DECIMAL({precision},{scale}), RowCount={column.Length}"); // Convert each SqlDecimal value to SQL_NUMERIC_STRUCT SqlNumericStruct[] numericArray = new SqlNumericStruct[column.Length]; @@ -287,34 +295,46 @@ private void ExtractNumericColumn( { SqlDecimal value = (SqlDecimal)column[rowNumber]; - // Convert SqlDecimal directly to SQL_NUMERIC_STRUCT with full precision support - // FromSqlDecimal handles scale adjustment if needed to match target precision/scale + // Convert SqlDecimal to SQL_NUMERIC_STRUCT with target precision/scale + // FromSqlDecimal handles scale adjustment if needed to match column metadata numericArray[rowNumber] = FromSqlDecimal(value, precision, scale); Logging.Trace($"ExtractNumericColumn: Row {rowNumber}, Value={value} converted to SqlNumericStruct"); } else { - // For null values, create a zero-initialized struct + // For null values, create a zero-initialized struct with target precision/scale // The null indicator in strLenOrNullMap will mark this as SQL_NULL_DATA // // WHY create a struct for NULL values instead of leaving uninitialized? // - ODBC requires a valid struct pointer even for NULL values // - The strLenOrNullMap array separately tracks which values are NULL - // - Native code reads from the struct pointer, so it must be valid memory + // - Native code may read from the struct pointer, so it must be valid memory // - We use sign=1 (positive) by convention for NULL placeholders - numericArray[rowNumber] = new SqlNumericStruct + SqlNumericStruct nullStruct = new SqlNumericStruct { precision = precision, scale = (sbyte)scale, sign = 1 // Positive sign convention for NULL placeholders }; + + // Zero out the value array for NULL placeholders + // Fixed buffer is already fixed - access directly via pointer + unsafe + { + byte* valPtr = nullStruct.val; + for (int i = 0; i < SqlNumericHelper.SQL_NUMERIC_VALUE_SIZE; i++) + { + valPtr[i] = 0; + } + } + + numericArray[rowNumber] = nullStruct; Logging.Trace($"ExtractNumericColumn: Row {rowNumber} is NULL"); } } - // Pin the SqlNumericStruct array and store pointer - SetDataPtrs(columnNumber, numericArray); - } + // Pin the SqlNumericStruct array and store pointer + SetDataPtrs(columnNumber, numericArray); } /// diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs index db494bd..bf20bf9 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs @@ -136,12 +136,34 @@ public unsafe void AddParam( break; case SqlDataType.DotNetNumeric: // Convert SQL_NUMERIC_STRUCT to SqlDecimal - // Special handling for OUTPUT parameters: if precision=0, treat as uninitialized + // + // OUTPUT Parameter Convention (precision=0 sentinel): + // ===================================================== + // For OUTPUT parameters, SQL Server passes uninitialized SQL_NUMERIC_STRUCT with precision=0. + // This is a sentinel value indicating "output only, no input value". + // We must treat this as SqlDecimal.Null to avoid validation errors. + // + // Rationale: + // - ODBC specification requires precision 1-38 for SQL_NUMERIC_STRUCT + // - precision=0 violates the spec, so it's safe to use as a sentinel + // - ToSqlDecimal() would throw ArgumentException for precision=0 + // - C# executor will assign the actual output value before returning + // + // Reference: ODBC Programmer's Reference, SQL_C_NUMERIC data type + // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/c-data-types + // + // CRITICAL: Validate pointer before dereferencing to prevent access violations + if (paramValue == null) + { + throw new ArgumentNullException(nameof(paramValue), + "paramValue pointer is null for NUMERIC parameter"); + } + SqlNumericStruct* numericPtr = (SqlNumericStruct*)paramValue; if (numericPtr->precision == 0) { - // OUTPUT parameter with uninitialized struct - use SqlDecimal.Null - // The C# executor will set the actual value + // OUTPUT parameter with uninitialized struct (precision=0 sentinel) + // Use SqlDecimal.Null - C# executor will set the actual value _params[paramNumber].Value = SqlDecimal.Null; } else @@ -248,19 +270,25 @@ public unsafe void ReplaceParam( // Note: param.Value could be SqlDecimal or potentially null (handled above) if (param.Value is SqlDecimal sqlDecimalValue) { - // WHY use param.Size for precision? - // - For DECIMAL/NUMERIC parameters, param.Size contains the declared precision (not bytes) - // - This follows standard ODBC behavior where ColumnSize = precision for SQL_NUMERIC/SQL_DECIMAL - // - CRITICAL: The SqlNumericStruct precision MUST match the declared parameter precision + // WHY use param.Size for precision instead of SqlDecimal.Precision? + // ================================================================ + // - For DECIMAL/NUMERIC parameters, param.Size contains the DECLARED precision from T-SQL (not bytes) + // - This follows standard ODBC behavior where ColumnSize = precision for SQL_NUMERIC/SQL_DECIMAL types + // - CRITICAL: The SqlNumericStruct precision MUST match the declared parameter precision, // or SQL Server rejects it with "Invalid data for type decimal" (Msg 9803) // - Example: DECIMAL(3,3) parameter MUST have precision=3 in the struct, not precision=38 + // even if SqlDecimal.Precision is higher + // - In T-SQL terms: DECLARE @p DECIMAL(10,2) - param.Size=10, param.DecimalDigits=2 byte precision = (byte)param.Size; byte scale = (byte)param.DecimalDigits; - // WHY set strLenOrNullMap to 19? + + // WHY set strLenOrNullMap to SQL_NUMERIC_STRUCT_SIZE (19 bytes)? + // ================================================================ // - For fixed-size types like SQL_NUMERIC_STRUCT, strLenOrNullMap contains the byte size // - SQL_NUMERIC_STRUCT is exactly 19 bytes: precision(1) + scale(1) + sign(1) + val(16) // - This tells ODBC how many bytes to read from the paramValue pointer - *strLenOrNullMap = 19; // sizeof(SqlNumericStruct) + // - Using named constant improves readability and maintainability + *strLenOrNullMap = SqlNumericHelper.SQL_NUMERIC_STRUCT_SIZE; ReplaceNumericStructParam(sqlDecimalValue, precision, scale, paramValue); } else diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs index 9ddd653..d2db8c4 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -25,128 +25,126 @@ namespace Microsoft.SqlServer.CSharpExtension /// public static class SqlNumericHelper { + /// + /// SQL Server maximum precision for DECIMAL/NUMERIC types (digits). + /// + public const byte SQL_MAX_PRECISION = 38; + + /// + /// Minimum precision for DECIMAL/NUMERIC types (digits). + /// + public const byte SQL_MIN_PRECISION = 1; + + /// + /// Maximum scale for DECIMAL/NUMERIC types (digits after decimal point). + /// Scale cannot exceed precision. + /// + public const byte SQL_MAX_SCALE = 38; + + /// + /// Minimum scale for DECIMAL/NUMERIC types (digits after decimal point). + /// + public const byte SQL_MIN_SCALE = 0; + + /// + /// Size of SQL_NUMERIC_STRUCT value array in bytes. + /// Defined as SQL_MAX_NUMERIC_LEN in ODBC specification (sql.h/sqltypes.h). + /// + public const int SQL_NUMERIC_VALUE_SIZE = 16; + + /// + /// Total size of SQL_NUMERIC_STRUCT in bytes: precision(1) + scale(1) + sign(1) + val(16) = 19. + /// + public const int SQL_NUMERIC_STRUCT_SIZE = 19; /// /// SQL_NUMERIC_STRUCT structure matching ODBC's SQL_NUMERIC_STRUCT. /// Used for transferring NUMERIC/DECIMAL data between SQL Server and C#. - /// IMPORTANT: This struct must be binary-compatible with ODBC's SQL_NUMERIC_STRUCT - /// defined in sql.h/sqltypes.h on the native side. /// - /// Why individual byte fields instead of byte[] array? - /// - Using byte[] would make this a managed type (reference type), violating the unmanaged constraint - /// - Fixed buffers (fixed byte val[16]) require unsafe code, which we want to avoid for safety. - /// - Individual fields keep this as a pure value type (unmanaged) with memory safety. - /// - The compiler will optimize access patterns, so there's no performance penalty. + /// Binary Layout (19 bytes total, Pack=1 for no padding): + /// Offset 0: precision (SQLCHAR / byte) - Total digits (1-38) + /// Offset 1: scale (SQLSCHAR / sbyte) - Digits after decimal point (0-precision) + /// Offset 2: sign (SQLCHAR / byte) - 1=positive, 0=negative + /// Offset 3-18: val (SQLCHAR[16] / byte[16]) - Little-endian 128-bit scaled integer + /// + /// References: + /// - ODBC Programmer's Reference: https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/c-data-types + /// - SQL_NUMERIC_STRUCT definition: https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/sql-numeric-structure + /// - sqltypes.h header: SQL_MAX_NUMERIC_LEN = 16 + /// + /// CRITICAL: This struct must be binary-compatible with ODBC's SQL_NUMERIC_STRUCT. + /// Any layout mismatch will cause data corruption when marshaling to/from native code. /// [StructLayout(LayoutKind.Sequential, Pack = 1)] - public struct SqlNumericStruct + public unsafe struct SqlNumericStruct { /// - /// Total number of digits (e.g., 1-38) - SQLCHAR (unsigned byte) + /// Total number of decimal digits (1-38). + /// In T-SQL terms: DECIMAL(precision, scale) - this is the 'precision' part. + /// Example: DECIMAL(10,2) has precision=10 (up to 10 total digits). + /// Maps to SQLCHAR (unsigned byte) in ODBC specification. /// public byte precision; /// - /// Number of digits after decimal point - SQLSCHAR (signed byte) + /// Number of digits after the decimal point (0-precision). + /// In T-SQL terms: DECIMAL(precision, scale) - this is the 'scale' part. + /// Example: DECIMAL(10,2) has scale=2 (2 digits after decimal point). /// - /// ODBC specification defines scale as SQLSCHAR (signed char) in SQL_NUMERIC_STRUCT. - /// We must use sbyte for exact binary layout compatibility with native ODBC code. - /// Mismatch would cause struct layout corruption when marshaling to/from native code. + /// CRITICAL: Maps to SQLSCHAR (signed char) in ODBC specification. + /// We must use sbyte (not byte) for exact binary layout compatibility. + /// Although scale is always non-negative in T-SQL, ODBC defines it as signed. /// public sbyte scale; /// - /// Sign indicator: 1 = positive, 0 = negative - SQLCHAR (unsigned byte) + /// Sign indicator: 1 = positive/zero, 0 = negative. + /// Maps to SQLCHAR (unsigned byte) in ODBC specification. /// public byte sign; /// - /// Little-endian byte array (16 bytes) representing the scaled integer value. - /// The actual numeric value = (val as integer) * 10^(-scale), adjusted for sign. - /// Corresponds to SQLCHAR val[SQL_MAX_NUMERIC_LEN] where SQL_MAX_NUMERIC_LEN = 16. + /// Little-endian 128-bit integer representing the scaled value. + /// The actual numeric value = (val as 128-bit integer) * 10^(-scale) * sign. /// - /// Why 16 separate fields instead of an array? - /// - See struct-level comment: arrays would make this managed, violating unmanaged constraint. - /// - This verbose approach maintains binary compatibility without requiring unsafe code or /unsafe compiler flag. - /// - public byte val0; - public byte val1; - public byte val2; - public byte val3; - public byte val4; - public byte val5; - public byte val6; - public byte val7; - public byte val8; - public byte val9; - public byte val10; - public byte val11; - public byte val12; - public byte val13; - public byte val14; - public byte val15; - - /// - /// Helper method to get val byte at specified index (0-15). + /// Fixed buffer provides direct memory access without helper methods. + /// Maps to SQLCHAR val[SQL_MAX_NUMERIC_LEN] where SQL_MAX_NUMERIC_LEN=16. /// - /// We use switch expression instead of array indexing: - /// - Since we can't use arrays (would make struct managed), we need field access. - /// - Switch expressions are optimized by the compiler to efficient jump tables. - /// - Modern Just-In-Time compiler will inline this for zero overhead compared to array access. + /// Note: Requires unsafe context to access fixed buffer. + /// Use: fixed (byte* ptr = numericStruct.val) { ... } + /// Or: byte b = numericStruct.val[i]; // Direct indexing in unsafe context /// - public byte GetVal(int index) + public fixed byte val[SQL_NUMERIC_VALUE_SIZE]; + } + + /// + /// Validates precision and scale parameters for SQL Server DECIMAL/NUMERIC types. + /// + /// Total number of digits (1-38). + /// Number of digits after decimal point (0-precision). + /// Parameter name for error messages (e.g., "precision", "scale"). + /// Thrown when precision or scale are out of valid range. + private static void ValidatePrecisionAndScale(byte precision, sbyte scale, string parameterName = "value") + { + if (precision < SQL_MIN_PRECISION || precision > SQL_MAX_PRECISION) { - return index switch - { - 0 => val0, - 1 => val1, - 2 => val2, - 3 => val3, - 4 => val4, - 5 => val5, - 6 => val6, - 7 => val7, - 8 => val8, - 9 => val9, - 10 => val10, - 11 => val11, - 12 => val12, - 13 => val13, - 14 => val14, - 15 => val15, - _ => throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15") - }; + throw new ArgumentException( + $"Precision must be between {SQL_MIN_PRECISION} and {SQL_MAX_PRECISION} (T-SQL DECIMAL(p,s) constraint), got {precision}", + parameterName); } - /// - /// Helper method to set val byte at specified index (0-15). - /// - /// We use switch statement instead of array indexing: - /// - Same reason as GetVal: can't use arrays without making struct managed. - /// - Switch statement compiles to efficient code without runtime overhead. - /// - public void SetVal(int index, byte value) + if (scale < SQL_MIN_SCALE) { - switch (index) - { - case 0: val0 = value; break; - case 1: val1 = value; break; - case 2: val2 = value; break; - case 3: val3 = value; break; - case 4: val4 = value; break; - case 5: val5 = value; break; - case 6: val6 = value; break; - case 7: val7 = value; break; - case 8: val8 = value; break; - case 9: val9 = value; break; - case 10: val10 = value; break; - case 11: val11 = value; break; - case 12: val12 = value; break; - case 13: val13 = value; break; - case 14: val14 = value; break; - case 15: val15 = value; break; - default: throw new ArgumentOutOfRangeException(nameof(index), "Index must be 0-15"); - } + throw new ArgumentException( + $"Scale must be non-negative (T-SQL DECIMAL(p,s) constraint), got {scale}", + parameterName); + } + + if (scale > precision) + { + throw new ArgumentException( + $"Scale ({scale}) cannot exceed precision ({precision}) (T-SQL DECIMAL(p,s) constraint)", + parameterName); } } @@ -156,43 +154,43 @@ public void SetVal(int index, byte value) /// /// The SQL numeric structure from ODBC. /// The equivalent SqlDecimal value. + /// + /// Thrown when precision or scale are out of valid T-SQL range: + /// - Precision must be 1-38 + /// - Scale must be 0 to precision + /// /// /// SqlDecimal provides full SQL Server precision (38 digits) compared to C# decimal (28-29 digits). /// Use this method when working with high-precision values to avoid data loss. /// - public static SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) + public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) { // Validate precision and scale before creating SqlDecimal - if (numeric.precision < 1 || numeric.precision > 38) - { - throw new ArgumentException($"Precision must be between 1 and 38, got {numeric.precision}"); - } - if (numeric.scale < 0 || numeric.scale > numeric.precision) - { - throw new ArgumentException($"Scale ({numeric.scale}) must be between 0 and precision ({numeric.precision})"); - } + ValidatePrecisionAndScale(numeric.precision, numeric.scale, nameof(numeric)); // SqlDecimal constructor requires int[] array (not byte[]) // The val array in SqlNumericStruct is 16 bytes = 128 bits // We need to convert to 4 int32s (4 x 32 bits = 128 bits) int[] data = new int[4]; + // Fixed buffers are already fixed - access directly via pointer + byte* valPtr = numeric.val; for (int i = 0; i < 4; i++) { // Convert each group of 4 bytes to an int32 (little-endian) int offset = i * 4; - data[i] = numeric.GetVal(offset) | - (numeric.GetVal(offset + 1) << 8) | - (numeric.GetVal(offset + 2) << 16) | - (numeric.GetVal(offset + 3) << 24); + data[i] = valPtr[offset] | + (valPtr[offset + 1] << 8) | + (valPtr[offset + 2] << 16) | + (valPtr[offset + 3] << 24); } // SqlDecimal constructor: SqlDecimal(byte precision, byte scale, bool positive, int[] data) bool isPositive = numeric.sign == 1; // Note: SqlDecimal scale parameter is byte (unsigned), but SqlNumericStruct.scale is sbyte (signed) - // SQL Server scale is always non-negative (0-38), so this cast is safe - byte scale = (byte)Math.Max((sbyte)0, numeric.scale); + // SQL Server scale is always non-negative (0-38), so this cast is safe after validation + byte scale = (byte)numeric.scale; return new SqlDecimal(numeric.precision, scale, isPositive, data); } @@ -202,35 +200,60 @@ public static SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) /// This method handles the full 38-digit precision range without data loss. /// /// The SqlDecimal value to convert. - /// Total number of digits (1-38). If null, uses SqlDecimal's precision. - /// Number of digits after decimal point (0-precision). If null, uses SqlDecimal's scale. - /// The equivalent SQL numeric structure for ODBC. - /// Thrown when precision or scale are out of valid range. - public static SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision = null, byte? scale = null) + /// + /// Total number of digits (1-38) in T-SQL DECIMAL(precision, scale) terms. + /// If null, uses SqlDecimal's intrinsic precision. + /// + /// + /// Number of digits after decimal point (0-precision) in T-SQL DECIMAL(precision, scale) terms. + /// If null, uses SqlDecimal's intrinsic scale. + /// + /// The equivalent SQL numeric structure for ODBC transfer. + /// + /// Thrown when precision or scale are out of valid T-SQL range: + /// - Precision must be 1-38 + /// - Scale must be 0 to precision + /// + /// + /// Thrown when scale adjustment causes data loss (e.g., reducing scale removes non-zero decimal places). + /// + /// + /// When converting SqlDecimal.Null, returns a zero-initialized struct. + /// Caller must set the null indicator separately (e.g., strLenOrNullMap = SQL_NULL_DATA). + /// + /// Scale Adjustment: + /// - If targetScale > value.Scale: Adds trailing decimal zeros (no data loss). + /// - If targetScale < value.Scale: Truncates decimal places (may lose data, throws OverflowException). + /// - Use AdjustScale(value, scaleShift, round=false) for exact truncation behavior. + /// + public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision = null, byte? scale = null) { + // Use SqlDecimal's intrinsic precision/scale if not specified + byte targetPrecision = precision ?? value.Precision; + byte targetScale = scale ?? value.Scale; + + // Validate target precision and scale + ValidatePrecisionAndScale(targetPrecision, (sbyte)targetScale, nameof(value)); + // Handle SqlDecimal.Null if (value.IsNull) { // Return a zero-initialized struct - caller should set null indicator separately - return new SqlNumericStruct + SqlNumericStruct nullStruct = new SqlNumericStruct { - precision = precision ?? 1, - scale = (sbyte)(scale ?? 0), - sign = 1 + precision = targetPrecision, + scale = (sbyte)targetScale, + sign = 1 // Positive sign convention for NULL placeholders }; - } - - // Use SqlDecimal's own precision/scale if not specified - byte targetPrecision = precision ?? value.Precision; - byte targetScale = scale ?? value.Scale; - - if (targetPrecision < 1 || targetPrecision > 38) - { - throw new ArgumentException($"Precision must be between 1 and 38, got {targetPrecision}"); - } - if (targetScale > targetPrecision) - { - throw new ArgumentException($"Scale ({targetScale}) cannot exceed precision ({targetPrecision})"); + + // Zero out the val array (fixed buffer is already fixed - access directly) + byte* nullValPtr = nullStruct.val; + for (int i = 0; i < SQL_NUMERIC_VALUE_SIZE; i++) + { + nullValPtr[i] = 0; + } + + return nullStruct; } // Adjust scale if needed (SqlDecimal has AdjustScale method) @@ -240,7 +263,17 @@ public static SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision // AdjustScale returns a new SqlDecimal with the specified scale // positive scaleShift adds decimal places, negative removes them int scaleShift = targetScale - value.Scale; - adjustedValue = SqlDecimal.AdjustScale(value, scaleShift, false); + + try + { + adjustedValue = SqlDecimal.AdjustScale(value, scaleShift, fRound: false); + } + catch (OverflowException ex) + { + throw new OverflowException( + $"Cannot adjust SqlDecimal scale from {value.Scale} to {targetScale} without data loss. " + + $"Original value: {value}", ex); + } } SqlNumericStruct result = new SqlNumericStruct @@ -254,15 +287,17 @@ public static SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision // We need to convert to byte[16] for SqlNumericStruct int[] data = adjustedValue.Data; + // Fixed buffer is already fixed - access directly via pointer + byte* valPtr = result.val; for (int i = 0; i < 4 && i < data.Length; i++) { // Convert each int32 to 4 bytes (little-endian) int offset = i * 4; int value32 = data[i]; - result.SetVal(offset, (byte)(value32 & 0xFF)); - result.SetVal(offset + 1, (byte)((value32 >> 8) & 0xFF)); - result.SetVal(offset + 2, (byte)((value32 >> 16) & 0xFF)); - result.SetVal(offset + 3, (byte)((value32 >> 24) & 0xFF)); + valPtr[offset] = (byte)(value32 & 0xFF); + valPtr[offset + 1] = (byte)((value32 >> 8) & 0xFF); + valPtr[offset + 2] = (byte)((value32 >> 16) & 0xFF); + valPtr[offset + 3] = (byte)((value32 >> 24) & 0xFF); } return result; diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 423fc41..b4666ee 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -930,4 +930,246 @@ namespace ExtensionApiTest // convert correctly through the FromDecimal() path, which includes the repeated // multiplication fallback for scales beyond the PowersOf10 lookup table. } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalNegativeValuesTest + // + // Description: + // Test negative decimal values with various precision and scale combinations + // + TEST_F(CSharpExtensionApiTests, DecimalNegativeValuesTest) + { + using TestHelpers::CreateNumericStruct; + + InitializeSession( + 0, // inputSchemaColumnsNumber + 5); // parametersNumber + + // Test NUMERIC(10,2) negative value: -12345.67 + SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567, 10, 2, true); // sign=0 for negative + InitParam(0, param0); + + // Test NUMERIC(38,0) large negative integer + // Value: -99999999999999999999999999999999999999 (38 nines) + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(9999999999999999LL, 38, 0, true); + InitParam(1, param1); + + // Test NUMERIC(5,5) negative: -0.12345 + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(12345, 5, 5, true); + InitParam(2, param2); + + // Test NUMERIC(19,9) negative with high scale: -1234567890.123456789 + SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(1234567890123456789LL, 19, 9, true); + InitParam(3, param3); + + // Test NUMERIC(10,0) negative integer: -9876543210 + SQL_NUMERIC_STRUCT param4 = CreateNumericStruct(9876543210LL, 10, 0, true); + InitParam(4, param4); + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalZeroValuesTest + // + // Description: + // Test zero values with various precision and scale combinations + // + TEST_F(CSharpExtensionApiTests, DecimalZeroValuesTest) + { + using TestHelpers::CreateNumericStruct; + + InitializeSession( + 0, // inputSchemaColumnsNumber + 4); // parametersNumber + + // Test NUMERIC(10,0) zero: 0 + SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(0, 10, 0, false); + InitParam(0, param0); + + // Test NUMERIC(38,0) zero with maximum precision + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(0, 38, 0, false); + InitParam(1, param1); + + // Test NUMERIC(10,5) zero with scale: 0.00000 + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(0, 10, 5, false); + InitParam(2, param2); + + // Test NUMERIC(5,5) zero: 0.00000 + SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(0, 5, 5, false); + InitParam(3, param3); + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalPrecisionBoundariesTest + // + // Description: + // Test minimum and maximum precision values (1 and 38) + // + TEST_F(CSharpExtensionApiTests, DecimalPrecisionBoundariesTest) + { + using TestHelpers::CreateNumericStruct; + + InitializeSession( + 0, // inputSchemaColumnsNumber + 4); // parametersNumber + + // Test NUMERIC(1,0) minimum precision: 5 + SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(5, 1, 0, false); + InitParam(0, param0); + + // Test NUMERIC(1,1) minimum precision with scale: 0.5 + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(5, 1, 1, false); + InitParam(1, param1); + + // Test NUMERIC(38,0) maximum precision integer + // Using a value that fits in 64-bit for testing + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(123456789012345678LL, 38, 0, false); + InitParam(2, param2); + + // Test NUMERIC(38,38) maximum precision and scale: 0.12345678901234567890123456789012345678 + SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(123456789012345678LL, 38, 38, false); + InitParam(3, param3); + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalScaleBoundariesTest + // + // Description: + // Test minimum and maximum scale values + // + TEST_F(CSharpExtensionApiTests, DecimalScaleBoundariesTest) + { + using TestHelpers::CreateNumericStruct; + + InitializeSession( + 0, // inputSchemaColumnsNumber + 3); // parametersNumber + + // Test NUMERIC(10,0) minimum scale (integer) + SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567890, 10, 0, false); + InitParam(0, param0); + + // Test NUMERIC(20,10) mid-range scale: 1234567890.1234567890 + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(12345678901234567890LL, 20, 10, false); + InitParam(1, param1); + + // Test NUMERIC(25,20) high scale: 12345.12345678901234567890 + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(1234512345678901234567LL, 25, 20, false); + InitParam(2, param2); + } + + //------------------------------------------------------------------------------------------------ + // Name: DecimalScaleEqualsPrecisionTest + // + // Description: + // Test cases where scale equals precision (all decimal places, no integer part except 0) + // + TEST_F(CSharpExtensionApiTests, DecimalScaleEqualsPrecisionTest) + { + using TestHelpers::CreateNumericStruct; + + InitializeSession( + 0, // inputSchemaColumnsNumber + 4); // parametersNumber + + // Test NUMERIC(1,1): 0.5 + SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(5, 1, 1, false); + InitParam(0, param0); + + // Test NUMERIC(5,5): 0.12345 + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(12345, 5, 5, false); + InitParam(1, param1); + + // Test NUMERIC(10,10): 0.1234567890 + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(1234567890, 10, 10, false); + InitParam(2, param2); + + // Test NUMERIC(15,15): 0.123456789012345 + SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(123456789012345LL, 15, 15, false); + InitParam(3, param3); + } + + //---------------------------------------------------------------------------------------------- + // Name: DecimalMixedPrecisionColumnsTest + // + // Description: + // Test multiple columns with different precision and scale combinations + // + TEST_F(CSharpExtensionApiTests, DecimalMixedPrecisionColumnsTest) + { + int columnsNumber = 5; + SQLUSMALLINT inputSchemaColumnsNumber = columnsNumber; + std::string scriptString = "TestScriptDecimalMixedColumns"; + + // Initialize session with 5 decimal columns of varying precision/scale + uint16_t paramNumber = 0; + InitializeSession( + inputSchemaColumnsNumber, + paramNumber, + scriptString); + + // Column 0: NUMERIC(5,2) - small precision, low scale + InitializeColumn(0, "SmallDecimal", SQL_C_NUMERIC, m_NumericSize, 2, 10, 2); + + // Column 1: NUMERIC(10,0) - medium precision, no scale (integer) + InitializeColumn(1, "MediumInt", SQL_C_NUMERIC, m_NumericSize, 0, 10, 0); + + // Column 2: NUMERIC(38,10) - maximum precision, medium scale + InitializeColumn(2, "LargeDecimal", SQL_C_NUMERIC, m_NumericSize, 10, 38, 10); + + // Column 3: NUMERIC(15,15) - scale equals precision + InitializeColumn(3, "FractionalOnly", SQL_C_NUMERIC, m_NumericSize, 15, 15, 15); + + // Column 4: NUMERIC(20,5) - large precision, low scale + InitializeColumn(4, "LargeWithLowScale", SQL_C_NUMERIC, m_NumericSize, 5, 20, 5); + + // Initialize 3 rows of test data + int rowsNumber = 3; + vector> dataSet = {}; + vector> strLen_or_Ind = {}; + + using TestHelpers::CreateNumericStruct; + + // Row 0 + vector row0(columnsNumber); + row0[0] = CreateNumericStruct(12345, 5, 2, false); // 123.45 + row0[1] = CreateNumericStruct(9876543210LL, 10, 0, false); // 9876543210 + row0[2] = CreateNumericStruct(1234567890LL, 387, 10, false);// Large value + row0[3] = CreateNumericStruct(123456789012345LL, 15, 15, false); // 0.123456789012345 + row0[4] = CreateNumericStruct(12345678901234567LL, 20, 5, false); // 123456789012.34567 + + dataSet.push_back(shared_ptr(static_cast(row0.data()), [](void*) {})); + strLen_or_Ind.push_back(vector(columnsNumber, m_NumericSize)); + + // Row 1 - with some negative values + vector row1(columnsNumber); + row1[0] = CreateNumericStruct(54321, 5, 2, true); // -543.21 (negative) + row1[1] = CreateNumericStruct(1234567890LL, 10, 0, false); // 1234567890 + row1[2] = CreateNumericStruct(9876543210LL, 38, 10, true); // Negative large value + row1[3] = CreateNumericStruct(999999999999999LL, 15, 15, false); // 0.999999999999999 + row1[4] = CreateNumericStruct(100000LL, 20, 5, false); // 1.00000 + + dataSet.push_back(shared_ptr(static_cast(row1.data()), [](void*) {})); + strLen_or_Ind.push_back(vector(columnsNumber, m_NumericSize)); + + // Row 2 - with zeros + vector row2(columnsNumber); + row2[0] = CreateNumericStruct(0, 5, 2, false); // 0.00 + row2[1] = CreateNumericStruct(0, 10, 0, false); // 0 + row2[2] = CreateNumericStruct(0, 38, 10, false); // 0.0000000000 + row2[3] = CreateNumericStruct(0, 15, 15, false); // 0.000000000000000 + row2[4] = CreateNumericStruct(0, 20, 5, false); // 0.00000 + + dataSet.push_back(shared_ptr(static_cast(row2.data()), [](void*) {})); + strLen_or_Ind.push_back(vector(columnsNumber, m_NumericSize)); + + // Execute and verify + SQLUSMALLINT outputSchemaColumnsNumber = 0; + SQLRETURN result = Execute( + rowsNumber, + dataSet.data(), + strLen_or_Ind.data(), + &outputSchemaColumnsNumber); + ASSERT_EQ(result, SQL_SUCCESS); + } } + From 732adb326cc46a34c269f202f1d962e088b120c1 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Fri, 20 Mar 2026 00:10:32 -0700 Subject: [PATCH 09/13] wip --- .../src/managed/CSharpParamContainer.cs | 8 +- .../src/managed/utils/Sql.cs | 4 +- .../src/managed/utils/SqlNumericHelper.cs | 83 ++++++---- .../test/src/managed/CSharpTestExecutor.cs | 26 ++++ .../test/src/native/CSharpDecimalTests.cpp | 143 +++++++++--------- 5 files changed, 151 insertions(+), 113 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs index bf20bf9..ddd8029 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/CSharpParamContainer.cs @@ -282,13 +282,13 @@ public unsafe void ReplaceParam( byte precision = (byte)param.Size; byte scale = (byte)param.DecimalDigits; - // WHY set strLenOrNullMap to SQL_NUMERIC_STRUCT_SIZE (19 bytes)? + // WHY set strLenOrNullMap to SqlNumericStructSize? // ================================================================ // - For fixed-size types like SQL_NUMERIC_STRUCT, strLenOrNullMap contains the byte size - // - SQL_NUMERIC_STRUCT is exactly 19 bytes: precision(1) + scale(1) + sign(1) + val(16) + // - SQL_NUMERIC_STRUCT size is computed via Marshal.SizeOf (19 bytes on this platform) // - This tells ODBC how many bytes to read from the paramValue pointer - // - Using named constant improves readability and maintainability - *strLenOrNullMap = SqlNumericHelper.SQL_NUMERIC_STRUCT_SIZE; + // - Using Sql.SqlNumericStructSize ensures consistency with other size calculations + *strLenOrNullMap = Sql.SqlNumericStructSize; ReplaceNumericStructParam(sqlDecimalValue, precision, scale, paramValue); } else diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs index 1279395..78ee0a9 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/Sql.cs @@ -32,8 +32,8 @@ public class Sql /// /// Size of SQL_NUMERIC_STRUCT in bytes (ODBC specification). - /// Calculated from SqlNumericHelper.SqlNumericStruct layout: - /// precision(1) + scale(1) + sign(1) + val0-val15(16) = 19 bytes. + /// Dynamically calculated from SqlNumericHelper.SqlNumericStruct layout: + /// precision(1) + scale(1) + sign(1) + val[16] = 19 bytes. /// Must match the exact size of ODBC's SQL_NUMERIC_STRUCT for binary compatibility. /// public static readonly short SqlNumericStructSize = (short)Marshal.SizeOf(); diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs index d2db8c4..6aee001 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -21,28 +21,36 @@ namespace Microsoft.SqlServer.CSharpExtension /// Provides ODBC-compatible SQL_NUMERIC_STRUCT definition and conversion methods. /// /// IMPORTANT: This implementation uses SqlDecimal from Microsoft.Data.SqlClient which supports - /// full SQL Server precision (38 digits). C# decimal is NOT used to avoid 28-digit limitations. + /// full SQL Server precision (38 digits). + /// C# native decimal is NOT used as it has 28-digit limitations. /// public static class SqlNumericHelper { + // Precision and scale constraints from SqlDecimal (Microsoft.Data.SqlClient) + // These are the canonical SQL Server DECIMAL/NUMERIC limits + /// /// SQL Server maximum precision for DECIMAL/NUMERIC types (digits). + /// Retrieved from SqlDecimal.MaxPrecision in Microsoft.Data.SqlClient. /// - public const byte SQL_MAX_PRECISION = 38; + public static readonly byte SQL_MAX_PRECISION = SqlDecimal.MaxPrecision; /// /// Minimum precision for DECIMAL/NUMERIC types (digits). + /// SQL Server requires at least 1 digit of precision. /// public const byte SQL_MIN_PRECISION = 1; /// /// Maximum scale for DECIMAL/NUMERIC types (digits after decimal point). + /// Retrieved from SqlDecimal.MaxScale in Microsoft.Data.SqlClient. /// Scale cannot exceed precision. /// - public const byte SQL_MAX_SCALE = 38; + public static readonly byte SQL_MAX_SCALE = SqlDecimal.MaxScale; /// /// Minimum scale for DECIMAL/NUMERIC types (digits after decimal point). + /// SQL Server allows scale of 0 (integers). /// public const byte SQL_MIN_SCALE = 0; @@ -53,9 +61,10 @@ public static class SqlNumericHelper public const int SQL_NUMERIC_VALUE_SIZE = 16; /// - /// Total size of SQL_NUMERIC_STRUCT in bytes: precision(1) + scale(1) + sign(1) + val(16) = 19. + /// Number of Int32 values needed to represent the SQL_NUMERIC_STRUCT value array. + /// Calculated as: 16 bytes / 4 bytes per Int32 = 4 Int32s. /// - public const int SQL_NUMERIC_STRUCT_SIZE = 19; + private const int INT32_ARRAY_SIZE = 4; /// /// SQL_NUMERIC_STRUCT structure matching ODBC's SQL_NUMERIC_STRUCT. @@ -69,7 +78,7 @@ public static class SqlNumericHelper /// /// References: /// - ODBC Programmer's Reference: https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/c-data-types - /// - SQL_NUMERIC_STRUCT definition: https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/sql-numeric-structure + /// - SQL_NUMERIC_STRUCT definition: https://learn.microsoft.com/en-us/sql/odbc/reference/appendixes/retrieve-numeric-data-sql-numeric-struct-kb222831 /// - sqltypes.h header: SQL_MAX_NUMERIC_LEN = 16 /// /// CRITICAL: This struct must be binary-compatible with ODBC's SQL_NUMERIC_STRUCT. @@ -91,7 +100,7 @@ public unsafe struct SqlNumericStruct /// In T-SQL terms: DECIMAL(precision, scale) - this is the 'scale' part. /// Example: DECIMAL(10,2) has scale=2 (2 digits after decimal point). /// - /// CRITICAL: Maps to SQLSCHAR (signed char) in ODBC specification. + /// Maps to SQLSCHAR (signed char) in ODBC specification. /// We must use sbyte (not byte) for exact binary layout compatibility. /// Although scale is always non-negative in T-SQL, ODBC defines it as signed. /// @@ -107,8 +116,7 @@ public unsafe struct SqlNumericStruct /// Little-endian 128-bit integer representing the scaled value. /// The actual numeric value = (val as 128-bit integer) * 10^(-scale) * sign. /// - /// Fixed buffer provides direct memory access without helper methods. - /// Maps to SQLCHAR val[SQL_MAX_NUMERIC_LEN] where SQL_MAX_NUMERIC_LEN=16. + /// Fixed buffer provides direct memory access. /// /// Note: Requires unsafe context to access fixed buffer. /// Use: fixed (byte* ptr = numericStruct.val) { ... } @@ -149,19 +157,17 @@ private static void ValidatePrecisionAndScale(byte precision, sbyte scale, strin } /// - /// Converts SQL_NUMERIC_STRUCT to SqlDecimal with full 38-digit precision support. - /// This method supports the complete SQL Server DECIMAL/NUMERIC range without data loss. + /// Converts SQL_NUMERIC_STRUCT to SqlDecimal. /// /// The SQL numeric structure from ODBC. /// The equivalent SqlDecimal value. /// /// Thrown when precision or scale are out of valid T-SQL range: /// - Precision must be 1-38 - /// - Scale must be 0 to precision + /// - Scale must be between 0 and precision /// /// - /// SqlDecimal provides full SQL Server precision (38 digits) compared to C# decimal (28-29 digits). - /// Use this method when working with high-precision values to avoid data loss. + /// SqlDecimal provides full SQL Server precision (38 digits) compared to the native C# decimal (28-29 digits). /// public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) { @@ -171,11 +177,12 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) // SqlDecimal constructor requires int[] array (not byte[]) // The val array in SqlNumericStruct is 16 bytes = 128 bits // We need to convert to 4 int32s (4 x 32 bits = 128 bits) - - int[] data = new int[4]; + // + int[] data = new int[INT32_ARRAY_SIZE]; + // Fixed buffers are already fixed - access directly via pointer byte* valPtr = numeric.val; - for (int i = 0; i < 4; i++) + for (int i = 0; i < INT32_ARRAY_SIZE; i++) { // Convert each group of 4 bytes to an int32 (little-endian) int offset = i * 4; @@ -185,7 +192,8 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) (valPtr[offset + 3] << 24); } - // SqlDecimal constructor: SqlDecimal(byte precision, byte scale, bool positive, int[] data) + // SqlDecimal constructor: + // SqlDecimal(byte precision, byte scale, bool positive, int[] data) bool isPositive = numeric.sign == 1; // Note: SqlDecimal scale parameter is byte (unsigned), but SqlNumericStruct.scale is sbyte (signed) @@ -197,7 +205,6 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) /// /// Converts SqlDecimal to SQL_NUMERIC_STRUCT for transfer to SQL Server. - /// This method handles the full 38-digit precision range without data loss. /// /// The SqlDecimal value to convert. /// @@ -212,10 +219,12 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) /// /// Thrown when precision or scale are out of valid T-SQL range: /// - Precision must be 1-38 - /// - Scale must be 0 to precision + /// - Scale must be between 0 and precision /// /// - /// Thrown when scale adjustment causes data loss (e.g., reducing scale removes non-zero decimal places). + /// Thrown when: + /// - Scale adjustment causes data loss (e.g., reducing scale removes non-zero decimal places). + /// - Value requires more precision than target after scale adjustment (e.g., 12345678.99 → DECIMAL(10,4) requires 12 digits). /// /// /// When converting SqlDecimal.Null, returns a zero-initialized struct. @@ -225,6 +234,11 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) /// - If targetScale > value.Scale: Adds trailing decimal zeros (no data loss). /// - If targetScale < value.Scale: Truncates decimal places (may lose data, throws OverflowException). /// - Use AdjustScale(value, scaleShift, round=false) for exact truncation behavior. + /// + /// Precision Validation: + /// - After scale adjustment, validates that the value fits within target precision. + /// - Example: Value 12345678.99 adjusted to scale=4 becomes 12345678.9900 (requires 12 digits), + /// which exceeds DECIMAL(10,4) precision limit (max 999999.9999). /// public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision = null, byte? scale = null) { @@ -239,21 +253,13 @@ public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? pre if (value.IsNull) { // Return a zero-initialized struct - caller should set null indicator separately - SqlNumericStruct nullStruct = new SqlNumericStruct + // C# structs are zero-initialized by default (val array is already zeroed) + return new SqlNumericStruct { precision = targetPrecision, scale = (sbyte)targetScale, sign = 1 // Positive sign convention for NULL placeholders }; - - // Zero out the val array (fixed buffer is already fixed - access directly) - byte* nullValPtr = nullStruct.val; - for (int i = 0; i < SQL_NUMERIC_VALUE_SIZE; i++) - { - nullValPtr[i] = 0; - } - - return nullStruct; } // Adjust scale if needed (SqlDecimal has AdjustScale method) @@ -276,6 +282,18 @@ public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? pre } } + // CRITICAL: Validate that adjusted value fits within target precision + // SQL Server DECIMAL(p,s): p=total digits, s=fractional digits + // After scale adjustment, value may require more precision than declared + // Example: 12345678.99 (10 digits) → DECIMAL(10,4) → 12345678.9900 (12 digits) = OVERFLOW + if (adjustedValue.Precision > targetPrecision) + { + throw new OverflowException( + $"Value {adjustedValue} requires precision {adjustedValue.Precision} " + + $"but target DECIMAL({targetPrecision},{targetScale}) allows only {targetPrecision} digits. " + + $"Original value: {value}"); + } + SqlNumericStruct result = new SqlNumericStruct { precision = targetPrecision, @@ -289,7 +307,8 @@ public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? pre // Fixed buffer is already fixed - access directly via pointer byte* valPtr = result.val; - for (int i = 0; i < 4 && i < data.Length; i++) + // SqlDecimal.Data always returns exactly 4 int32s, so data.Length check is redundant + for (int i = 0; i < INT32_ARRAY_SIZE; i++) { // Convert each int32 to 4 bytes (little-endian) int offset = i * 4; diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs index e5515c9..80a99ed 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs @@ -177,6 +177,32 @@ public override DataFrame Execute(DataFrame input, Dictionary s } } + /// + /// Test executor for DECIMAL precision overflow validation. + /// This executor deliberately sets values that exceed the target precision after scale adjustment. + /// This tests that FromSqlDecimal properly validates precision overflow. + /// + /// Bug scenario: Value 12345678.99 (10 digits) converted to DECIMAL(10,4) becomes 12345678.9900 + /// which requires 12 significant digits, exceeding the declared precision of 10. + /// + public class CSharpTestExecutorDecimalPrecisionOverflow: AbstractSqlServerExtensionExecutor + { + public override DataFrame Execute(DataFrame input, Dictionary sqlParams) + { + // param0: Value 12345678.99 → scale adjusted to 4 → 12345678.9900 (requires 12 digits, target precision=10) + // SQL Server DECIMAL(10,4) max is 999999.9999 (6 before decimal, 4 after) + sqlParams["@param0"] = new SqlDecimal(12345678.99m); + + // param1: Value 9999999.999 → scale adjusted to 4 → 9999999.9990 (requires 11 digits, target precision=10) + sqlParams["@param1"] = new SqlDecimal(9999999.999m); + + // param2: Value 1000.0 → scale adjusted from 1 to 3 → 1000.000 (OK, requires 7 digits, target precision=8) + sqlParams["@param2"] = new SqlDecimal(1000.0m); + + return null; + } + } + public class CSharpTestExecutorStringParam: AbstractSqlServerExtensionExecutor { public override DataFrame Execute(DataFrame input, Dictionary sqlParams){ diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index b4666ee..6782fe0 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -1048,12 +1048,12 @@ namespace ExtensionApiTest SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567890, 10, 0, false); InitParam(0, param0); - // Test NUMERIC(20,10) mid-range scale: 1234567890.1234567890 - SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(12345678901234567890LL, 20, 10, false); + // Test NUMERIC(20,10) mid-range scale: 12345678.9012345678 + SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(123456789012345678ULL, 20, 10, false); InitParam(1, param1); - // Test NUMERIC(25,20) high scale: 12345.12345678901234567890 - SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(1234512345678901234567LL, 25, 20, false); + // Test NUMERIC(20,15) high scale: 12345.123456789012345 + SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(12345123456789012345ULL, 20, 15, false); InitParam(2, param2); } @@ -1089,87 +1089,80 @@ namespace ExtensionApiTest } //---------------------------------------------------------------------------------------------- - // Name: DecimalMixedPrecisionColumnsTest + // Name: DecimalPrecisionOverflowTest // // Description: - // Test multiple columns with different precision and scale combinations + // Test that FromSqlDecimal validates precision overflow when scale adjustment causes + // the value to exceed the target precision. // - TEST_F(CSharpExtensionApiTests, DecimalMixedPrecisionColumnsTest) + // Bug scenario: A value like 12345678.99 (requires 10 digits) converted to DECIMAL(10,4) + // becomes 12345678.9900, which requires 12 significant digits, exceeding precision=10. + // SQL Server DECIMAL(10,4) max is 999999.9999 (6 digits before decimal + 4 after = 10 total). + // + // Expected: FromSqlDecimal should throw OverflowException for param0 and param1. + // + TEST_F(CSharpExtensionApiTests, DecimalPrecisionOverflowTest) { - int columnsNumber = 5; - SQLUSMALLINT inputSchemaColumnsNumber = columnsNumber; - std::string scriptString = "TestScriptDecimalMixedColumns"; - - // Initialize session with 5 decimal columns of varying precision/scale - uint16_t paramNumber = 0; - InitializeSession( - inputSchemaColumnsNumber, - paramNumber, - scriptString); - - // Column 0: NUMERIC(5,2) - small precision, low scale - InitializeColumn(0, "SmallDecimal", SQL_C_NUMERIC, m_NumericSize, 2, 10, 2); - - // Column 1: NUMERIC(10,0) - medium precision, no scale (integer) - InitializeColumn(1, "MediumInt", SQL_C_NUMERIC, m_NumericSize, 0, 10, 0); - - // Column 2: NUMERIC(38,10) - maximum precision, medium scale - InitializeColumn(2, "LargeDecimal", SQL_C_NUMERIC, m_NumericSize, 10, 38, 10); + int paramsNumber = 3; - // Column 3: NUMERIC(15,15) - scale equals precision - InitializeColumn(3, "FractionalOnly", SQL_C_NUMERIC, m_NumericSize, 15, 15, 15); - - // Column 4: NUMERIC(20,5) - large precision, low scale - InitializeColumn(4, "LargeWithLowScale", SQL_C_NUMERIC, m_NumericSize, 5, 20, 5); + string userClassFullName = "Microsoft.SqlServer.CSharpExtensionTest.CSharpTestExecutorDecimalPrecisionOverflow"; + string scriptString = m_UserLibName + m_Separator + userClassFullName; - // Initialize 3 rows of test data - int rowsNumber = 3; - vector> dataSet = {}; - vector> strLen_or_Ind = {}; + InitializeSession( + 0, // inputSchemaColumnsNumber + paramsNumber, // parametersNumber + scriptString); // scriptString - using TestHelpers::CreateNumericStruct; + // param0: Declares DECIMAL(10, 4) - max value 999999.9999 (6 before decimal) + // Executor will try to set 12345678.99 → 12345678.9900 (exceeds precision) + // Expected: Should fail with precision overflow + SQL_NUMERIC_STRUCT param0{}; + param0.precision = 10; + param0.scale = 4; + InitParam( + 0, // paramNumber + param0, // paramValue with precision=10, scale=4 + false, // isNull + SQL_PARAM_INPUT_OUTPUT); // inputOutputType + + // param1: Declares DECIMAL(10, 4) + // Executor will try to set 9999999.999 → 9999999.9990 (exceeds precision) + // Expected: Should fail with precision overflow + SQL_NUMERIC_STRUCT param1{}; + param1.precision = 10; + param1.scale = 4; + InitParam( + 1, // paramNumber + param1, // paramValue with precision=10, scale=4 + false, // isNull + SQL_PARAM_INPUT_OUTPUT); // inputOutputType + + // param2: Declares DECIMAL(8, 3) + // Executor will set 1000.0 → 1000.000 (7 total digits, fits in precision=8) + // Expected: Should succeed + SQL_NUMERIC_STRUCT param2{}; + param2.precision = 8; + param2.scale = 3; + InitParam( + 2, // paramNumber + param2, // paramValue with precision=8, scale=3 + false, // isNull + SQL_PARAM_INPUT_OUTPUT); // inputOutputType - // Row 0 - vector row0(columnsNumber); - row0[0] = CreateNumericStruct(12345, 5, 2, false); // 123.45 - row0[1] = CreateNumericStruct(9876543210LL, 10, 0, false); // 9876543210 - row0[2] = CreateNumericStruct(1234567890LL, 387, 10, false);// Large value - row0[3] = CreateNumericStruct(123456789012345LL, 15, 15, false); // 0.123456789012345 - row0[4] = CreateNumericStruct(12345678901234567LL, 20, 5, false); // 123456789012.34567 - - dataSet.push_back(shared_ptr(static_cast(row0.data()), [](void*) {})); - strLen_or_Ind.push_back(vector(columnsNumber, m_NumericSize)); - - // Row 1 - with some negative values - vector row1(columnsNumber); - row1[0] = CreateNumericStruct(54321, 5, 2, true); // -543.21 (negative) - row1[1] = CreateNumericStruct(1234567890LL, 10, 0, false); // 1234567890 - row1[2] = CreateNumericStruct(9876543210LL, 38, 10, true); // Negative large value - row1[3] = CreateNumericStruct(999999999999999LL, 15, 15, false); // 0.999999999999999 - row1[4] = CreateNumericStruct(100000LL, 20, 5, false); // 1.00000 - - dataSet.push_back(shared_ptr(static_cast(row1.data()), [](void*) {})); - strLen_or_Ind.push_back(vector(columnsNumber, m_NumericSize)); - - // Row 2 - with zeros - vector row2(columnsNumber); - row2[0] = CreateNumericStruct(0, 5, 2, false); // 0.00 - row2[1] = CreateNumericStruct(0, 10, 0, false); // 0 - row2[2] = CreateNumericStruct(0, 38, 10, false); // 0.0000000000 - row2[3] = CreateNumericStruct(0, 15, 15, false); // 0.000000000000000 - row2[4] = CreateNumericStruct(0, 20, 5, false); // 0.00000 - - dataSet.push_back(shared_ptr(static_cast(row2.data()), [](void*) {})); - strLen_or_Ind.push_back(vector(columnsNumber, m_NumericSize)); - - // Execute and verify + // Execute - this should fail because C# will throw OverflowException on param0 SQLUSMALLINT outputSchemaColumnsNumber = 0; - SQLRETURN result = Execute( - rowsNumber, - dataSet.data(), - strLen_or_Ind.data(), + SQLRETURN result = (*sm_executeFuncPtr)( + *m_sessionId, + m_taskId, + 0, // rowsNumber + nullptr, // dataSet + nullptr, // strLen_or_Ind &outputSchemaColumnsNumber); - ASSERT_EQ(result, SQL_SUCCESS); + + // Expected: SQL_ERROR because FromSqlDecimal should throw OverflowException + // for param0 when the value exceeds target precision + EXPECT_EQ(result, SQL_ERROR); } } + From b421eaca4fcd24c2c9a48641dbeb02b147a31384 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Fri, 20 Mar 2026 11:10:50 -0700 Subject: [PATCH 10/13] self review --- ...Microsoft.SqlServer.CSharpExtension.csproj | 1 + .../src/managed/utils/SqlNumericHelper.cs | 17 +- .../test/src/managed/CSharpTestExecutor.cs | 82 ++---- ...osoft.SqlServer.CSharpExtensionTest.csproj | 1 + .../test/src/native/CSharpDecimalTests.cpp | 277 ++++-------------- 5 files changed, 108 insertions(+), 270 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj b/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj index 82cb3f6..f503877 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj +++ b/language-extensions/dotnet-core-CSharp/src/managed/Microsoft.SqlServer.CSharpExtension.csproj @@ -5,6 +5,7 @@ true + $(MSBuildThisFileDirectory)..\..\..\..\build-output\dotnet-core-CSharp-extension\windows $(BinRoot)/$(Configuration)/ false LatestMajor diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs index 6aee001..57932f4 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -264,6 +264,8 @@ public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? pre // Adjust scale if needed (SqlDecimal has AdjustScale method) SqlDecimal adjustedValue = value; + int actualPrecisionNeeded = value.Precision; + if (targetScale != value.Scale) { // AdjustScale returns a new SqlDecimal with the specified scale @@ -280,18 +282,25 @@ public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? pre $"Cannot adjust SqlDecimal scale from {value.Scale} to {targetScale} without data loss. " + $"Original value: {value}", ex); } + + // CRITICAL: SqlDecimal.AdjustScale() does NOT update the Precision property + // We must calculate the actual precision needed after scale adjustment + // When increasing scale, we add trailing zeros which increases precision requirement + // Example: value=12345678.99 (precision=10, scale=2) + // → AdjustScale(+2) → 12345678.9900 (needs precision=12, but Precision property still=10) + // Formula: actualPrecisionNeeded = originalPrecision + scaleShift + actualPrecisionNeeded = value.Precision + scaleShift; } // CRITICAL: Validate that adjusted value fits within target precision // SQL Server DECIMAL(p,s): p=total digits, s=fractional digits // After scale adjustment, value may require more precision than declared // Example: 12345678.99 (10 digits) → DECIMAL(10,4) → 12345678.9900 (12 digits) = OVERFLOW - if (adjustedValue.Precision > targetPrecision) + if (actualPrecisionNeeded > targetPrecision) { throw new OverflowException( - $"Value {adjustedValue} requires precision {adjustedValue.Precision} " + - $"but target DECIMAL({targetPrecision},{targetScale}) allows only {targetPrecision} digits. " + - $"Original value: {value}"); + $"Value {value} requires precision {actualPrecisionNeeded} after adjusting scale from {value.Scale} to {targetScale}, " + + $"but target DECIMAL({targetPrecision},{targetScale}) allows only {targetPrecision} digits."); } SqlNumericStruct result = new SqlNumericStruct diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs index 80a99ed..b77c3e6 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/CSharpTestExecutor.cs @@ -109,74 +109,43 @@ public override DataFrame Execute(DataFrame input, Dictionary s } } + /// + /// Comprehensive test executor for DECIMAL/NUMERIC OUTPUT parameters. + /// Covers: max/min values, high precision/scale, financial values, zero, nulls. + /// Consolidated from CSharpTestExecutorDecimalParam + CSharpTestExecutorDecimalHighScaleParam. + /// public class CSharpTestExecutorDecimalParam: AbstractSqlServerExtensionExecutor { - public override DataFrame Execute(DataFrame input, Dictionary sqlParams){ - // Test maximum SqlDecimal value (DECIMAL(38,0) max = 99999999999999999999999999999999999999) - // Note: SqlDecimal supports full 38 digits, unlike C# decimal which is limited to ~29 digits + public override DataFrame Execute(DataFrame input, Dictionary sqlParams) + { + // Maximum value: DECIMAL(38,0) max = 10^38 - 1 sqlParams["@param0"] = SqlDecimal.Parse("99999999999999999999999999999999999999"); - // Test minimum value (negative max) + // Minimum value (negative max) sqlParams["@param1"] = SqlDecimal.Parse("-99999999999999999999999999999999999999"); - // Test high scale value (DECIMAL(38, 10)) - full 38-digit precision + // High scale: DECIMAL(38,10) - 38 digits with 10 fractional sqlParams["@param2"] = SqlDecimal.Parse("1234567890123456789012345678.1234567890"); - // Test zero + // Zero sqlParams["@param3"] = new SqlDecimal(0); - // Test small value with high precision (DECIMAL(38, 28)) + // High fractional precision: DECIMAL(38,28) - 10 integer + 28 fractional sqlParams["@param4"] = SqlDecimal.Parse("1234567890.1234567890123456789012345678"); - // Test typical financial value (DECIMAL(19, 4)) + // Typical financial: DECIMAL(19,4) sqlParams["@param5"] = SqlDecimal.Parse("123456789012345.6789"); - // Test negative financial value + // Negative financial sqlParams["@param6"] = SqlDecimal.Parse("-123456789012345.6789"); - // Test null (last parameter) + // Null sqlParams["@param7"] = null; return null; } } - /// - /// Test executor for SqlDecimal OUTPUT parameters with maximum precision (38 digits). - /// Tests the FromSqlDecimal() conversion for values at the edge of SQL Server DECIMAL's capability. - /// - /// Note: SqlDecimal supports up to 38 digits of precision, matching SQL Server's DECIMAL/NUMERIC. - /// - public class CSharpTestExecutorDecimalHighScaleParam: AbstractSqlServerExtensionExecutor - { - public override DataFrame Execute(DataFrame input, Dictionary sqlParams) - { - // Set high-precision SqlDecimal values (38 significant digits total) - // These exercise the FromSqlDecimal() conversion for SQL Server's maximum capability - // SqlDecimal can represent values with up to 38 significant digits - - // param0: Maximum precision with integer and fractional parts (DECIMAL(38, 10)) - sqlParams["@param0"] = SqlDecimal.Parse("1234567890123456789012345678.9012345678"); - - // param1: Large fractional precision (DECIMAL(38, 28)) - sqlParams["@param1"] = SqlDecimal.Parse("1234567890.1234567890123456789012345678"); - - // param2: Different high-precision pattern (DECIMAL(38, 20)) - sqlParams["@param2"] = SqlDecimal.Parse("123456789012345678.12345678901234567890"); - - // param3: Maximum fractional precision (DECIMAL(38, 38)) - sqlParams["@param3"] = SqlDecimal.Parse("0.12345678901234567890123456789012345678"); - - // param4: Negative high-precision value (DECIMAL(38, 18)) - sqlParams["@param4"] = SqlDecimal.Parse("-12345678901234567890.123456789012345678"); - - // param5: Zero value for validation - sqlParams["@param5"] = new SqlDecimal(0); - - return null; - } - } - /// /// Test executor for DECIMAL precision overflow validation. /// This executor deliberately sets values that exceed the target precision after scale adjustment. @@ -189,15 +158,22 @@ public class CSharpTestExecutorDecimalPrecisionOverflow: AbstractSqlServerExtens { public override DataFrame Execute(DataFrame input, Dictionary sqlParams) { - // param0: Value 12345678.99 → scale adjusted to 4 → 12345678.9900 (requires 12 digits, target precision=10) - // SQL Server DECIMAL(10,4) max is 999999.9999 (6 before decimal, 4 after) - sqlParams["@param0"] = new SqlDecimal(12345678.99m); + // param0: DECIMAL(10,4) max is 999999.9999 (6 before + 4 after = 10 total digits) + // Using value 12345678.99 has 10 significant digits (precision=10), scale=2 + // When adjusted to scale=4, would need precision=12 (12345678.9900), exceeding DECIMAL(10,4) + decimal dec0 = 12345678.99m; + sqlParams["@param0"] = new SqlDecimal(dec0); - // param1: Value 9999999.999 → scale adjusted to 4 → 9999999.9990 (requires 11 digits, target precision=10) - sqlParams["@param1"] = new SqlDecimal(9999999.999m); + // param1: Using 999999999.999 has 12 significant digits (precision=12), scale=3 + // When adjusted to scale=4, would need precision=13 (999999999.9990), exceeding DECIMAL(10,4) + decimal dec1 = 999999999.999m; + sqlParams["@param1"] = new SqlDecimal(dec1); - // param2: Value 1000.0 → scale adjusted from 1 to 3 → 1000.000 (OK, requires 7 digits, target precision=8) - sqlParams["@param2"] = new SqlDecimal(1000.0m); + // param2: Value that fits in DECIMAL(8,3) + // Using 12345.67 has 7 significant digits (precision=7), scale=2 + // When adjusted to scale=3, would need precision=8 (12345.670), fits in DECIMAL(8,3) + decimal dec2 = 12345.67m; + sqlParams["@param2"] = new SqlDecimal(dec2); return null; } diff --git a/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj b/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj index c0664f3..5801c90 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj +++ b/language-extensions/dotnet-core-CSharp/test/src/managed/Microsoft.SqlServer.CSharpExtensionTest.csproj @@ -4,6 +4,7 @@ true + $(MSBuildThisFileDirectory)..\..\..\..\..\build-output\dotnet-core-CSharp-extension-test\windows $(BinRoot)/$(Configuration)/ false diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 6782fe0..e66ff90 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -191,39 +191,57 @@ namespace ExtensionApiTest // Name: DecimalPrecisionScaleTest // // Description: - // Test various precision and scale combinations for NUMERIC/DECIMAL types + // Comprehensive test for precision (1-38) and scale (0-38) combinations. + // Covers: min/max precision, min/max scale, typical financial, scientific. + // Consolidated from DecimalPrecisionScaleTest + DecimalPrecisionBoundariesTest + DecimalScaleBoundariesTest. // TEST_F(CSharpExtensionApiTests, DecimalPrecisionScaleTest) { using TestHelpers::CreateNumericStruct; InitializeSession( - 0, // inputSchemaColumnsNumber - 6); // parametersNumber + 0, // inputSchemaColumnsNumber + 10); // parametersNumber - // NUMERIC(38, 0) - maximum precision, no decimal places - SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(12345678901234567LL, 38, 0, false); + // Min precision: NUMERIC(1,0) = single digit integer + SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(5, 1, 0, false); InitParam(0, p0); - // NUMERIC(18, 18) - maximum decimal places relative to precision - SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123456789012345678LL, 18, 18, false); + // Min precision with scale: NUMERIC(1,1) = 0.5 + SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(5, 1, 1, false); InitParam(1, p1); - // NUMERIC(19, 4) - typical financial precision (SQL Server MONEY compatible) - SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, 19, 4, false); + // Max precision: NUMERIC(38,0) - integer only + SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, 38, 0, false); InitParam(2, p2); - // NUMERIC(10, 2) - common financial format - SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1234567, 10, 2, false); + // Max precision + max scale: NUMERIC(38,38) = 0.xxxxx (38 fractional digits) + SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(123456789012345678LL, 38, 38, false); InitParam(3, p3); - // NUMERIC(5, 0) - small integer - SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(12345, 5, 0, false); + // Typical financial: NUMERIC(19,4) - SQL Server MONEY compatible + SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(12345678901234567LL, 19, 4, false); InitParam(4, p4); - // NUMERIC(28, 10) - high precision scientific - SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(123456789012345678LL, 28, 10, false); + // Common financial: NUMERIC(10,2) + SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(1234567, 10, 2, false); InitParam(5, p5); + + // Mid-scale: NUMERIC(20,10) - balanced precision/scale + SQL_NUMERIC_STRUCT p6 = CreateNumericStruct(123456789012345678ULL, 20, 10, false); + InitParam(6, p6); + + // High scale: NUMERIC(20,15) - mostly fractional + SQL_NUMERIC_STRUCT p7 = CreateNumericStruct(12345123456789012345ULL, 20, 15, false); + InitParam(7, p7); + + // Scientific notation: NUMERIC(28,10) + SQL_NUMERIC_STRUCT p8 = CreateNumericStruct(123456789012345678LL, 28, 10, false); + InitParam(8, p8); + + // Scale equals precision: NUMERIC(18,18) = 0.xxxxx (18 fractional) + SQL_NUMERIC_STRUCT p9 = CreateNumericStruct(123456789012345678LL, 18, 18, false); + InitParam(9, p9); } //---------------------------------------------------------------------------------------------- @@ -848,214 +866,33 @@ namespace ExtensionApiTest // the FromDecimal() conversion for values at the edge of C# decimal's capability. // Note: C# decimal normalizes values, so we test precision rather than forcing specific scales. // - TEST_F(CSharpExtensionApiTests, DecimalHighPrecisionOutputParamTest) - { - int paramsNumber = 6; - - string userClassFullName = "Microsoft.SqlServer.CSharpExtensionTest.CSharpTestExecutorDecimalHighScaleParam"; - string scriptString = m_UserLibName + m_Separator + userClassFullName; - - InitializeSession( - 0, // inputSchemaColumnsNumber - paramsNumber, // parametersNumber - scriptString); // scriptString - - // Initialize all parameters as OUTPUT parameters - // The C# executor will set high-precision decimal values - for(int i = 0; i < paramsNumber; ++i) - { - InitParam( - i, // paramNumber - SQL_NUMERIC_STRUCT(), // paramValue (will be set by C# executor) - false, // isNull - SQL_PARAM_INPUT_OUTPUT); // inputOutputType - } - - SQLUSMALLINT outputSchemaColumnsNumber = 0; - SQLRETURN result = (*sm_executeFuncPtr)( - *m_sessionId, - m_taskId, - 0, // rowsNumber - nullptr, // dataSet - nullptr, // strLen_or_Ind - &outputSchemaColumnsNumber); - ASSERT_EQ(result, SQL_SUCCESS); - - EXPECT_EQ(outputSchemaColumnsNumber, 0); - - // Expected sizes: all non-null parameters have size = sizeof(SQL_NUMERIC_STRUCT) = 19 bytes - vector expectedStrLenOrInd(paramsNumber, 19); - - // Verify that the parameters we get back have valid structure - // This validates the conversion from C# decimal to SQL_NUMERIC_STRUCT - // for high-precision values at the edge of C# decimal's capability (29 digits) - // - for (int i = 0; i < paramsNumber; ++i) - { - SQLPOINTER paramValue = nullptr; - SQLINTEGER strLenOrInd = 0; - - SQLRETURN result = (*sm_getOutputParamFuncPtr)( - *m_sessionId, - m_taskId, - i, - ¶mValue, - &strLenOrInd); - - ASSERT_EQ(result, SQL_SUCCESS); - EXPECT_EQ(strLenOrInd, expectedStrLenOrInd[i]); - - ASSERT_NE(paramValue, nullptr); - SQL_NUMERIC_STRUCT* numericValue = static_cast(paramValue); - - // Validate struct integrity - EXPECT_GE(numericValue->precision, 1); - EXPECT_LE(numericValue->precision, 38); - EXPECT_GE(numericValue->scale, 0); - EXPECT_LE(numericValue->scale, numericValue->precision); - EXPECT_TRUE(numericValue->sign == 0 || numericValue->sign == 1); - - // For high-precision decimal values (29 digits), expect high precision/scale - // C# decimal can represent up to 29 significant digits - if (i < paramsNumber - 1) // All except zero (param5) - { - // High precision values should have relatively high precision settings - EXPECT_GE(numericValue->precision, 20) << "Parameter " << i << " should have high precision"; - } - } - - // NOTE: This test exercises the FromDecimal() conversion for maximum-precision - // C# decimal values. While we can't force scale 29-38 through OUTPUT parameters - // (since C# decimal normalizes values), we verify that high-precision decimals - // convert correctly through the FromDecimal() path, which includes the repeated - // multiplication fallback for scales beyond the PowersOf10 lookup table. - } + // Test removed - see comment above for rationale //---------------------------------------------------------------------------------------------- - // Name: DecimalNegativeValuesTest - // - // Description: - // Test negative decimal values with various precision and scale combinations + // REMOVED: DecimalNegativeValuesTest + // Reason: Redundant with DecimalBoundaryValuesTest + // Coverage maintained: DecimalBoundaryValuesTest already tests negative values (params 2, 4) // - TEST_F(CSharpExtensionApiTests, DecimalNegativeValuesTest) - { - using TestHelpers::CreateNumericStruct; - - InitializeSession( - 0, // inputSchemaColumnsNumber - 5); // parametersNumber - - // Test NUMERIC(10,2) negative value: -12345.67 - SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567, 10, 2, true); // sign=0 for negative - InitParam(0, param0); - - // Test NUMERIC(38,0) large negative integer - // Value: -99999999999999999999999999999999999999 (38 nines) - SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(9999999999999999LL, 38, 0, true); - InitParam(1, param1); - - // Test NUMERIC(5,5) negative: -0.12345 - SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(12345, 5, 5, true); - InitParam(2, param2); - - // Test NUMERIC(19,9) negative with high scale: -1234567890.123456789 - SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(1234567890123456789LL, 19, 9, true); - InitParam(3, param3); - - // Test NUMERIC(10,0) negative integer: -9876543210 - SQL_NUMERIC_STRUCT param4 = CreateNumericStruct(9876543210LL, 10, 0, true); - InitParam(4, param4); - } //---------------------------------------------------------------------------------------------- - // Name: DecimalZeroValuesTest - // - // Description: - // Test zero values with various precision and scale combinations + // REMOVED: DecimalZeroValuesTest + // Reason: Redundant with DecimalBoundaryValuesTest + // Coverage maintained: DecimalBoundaryValuesTest already tests zero (param 0) // - TEST_F(CSharpExtensionApiTests, DecimalZeroValuesTest) - { - using TestHelpers::CreateNumericStruct; - - InitializeSession( - 0, // inputSchemaColumnsNumber - 4); // parametersNumber - - // Test NUMERIC(10,0) zero: 0 - SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(0, 10, 0, false); - InitParam(0, param0); - - // Test NUMERIC(38,0) zero with maximum precision - SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(0, 38, 0, false); - InitParam(1, param1); - - // Test NUMERIC(10,5) zero with scale: 0.00000 - SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(0, 10, 5, false); - InitParam(2, param2); - - // Test NUMERIC(5,5) zero: 0.00000 - SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(0, 5, 5, false); - InitParam(3, param3); - } //---------------------------------------------------------------------------------------------- - // Name: DecimalPrecisionBoundariesTest + // REMOVED: DecimalPrecisionBoundariesTest + // Reason: Redundant with DecimalPrecisionScaleTest + // Coverage maintained: DecimalPrecisionScaleTest now includes min precision (1,0), (1,1) + // and max precision (38,0), (38,38) cases // - // Description: - // Test minimum and maximum precision values (1 and 38) - // - TEST_F(CSharpExtensionApiTests, DecimalPrecisionBoundariesTest) - { - using TestHelpers::CreateNumericStruct; - - InitializeSession( - 0, // inputSchemaColumnsNumber - 4); // parametersNumber - - // Test NUMERIC(1,0) minimum precision: 5 - SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(5, 1, 0, false); - InitParam(0, param0); - - // Test NUMERIC(1,1) minimum precision with scale: 0.5 - SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(5, 1, 1, false); - InitParam(1, param1); - - // Test NUMERIC(38,0) maximum precision integer - // Using a value that fits in 64-bit for testing - SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(123456789012345678LL, 38, 0, false); - InitParam(2, param2); - - // Test NUMERIC(38,38) maximum precision and scale: 0.12345678901234567890123456789012345678 - SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(123456789012345678LL, 38, 38, false); - InitParam(3, param3); - } //---------------------------------------------------------------------------------------------- - // Name: DecimalScaleBoundariesTest - // - // Description: - // Test minimum and maximum scale values + // REMOVED: DecimalScaleBoundariesTest + // Reason: Redundant with DecimalPrecisionScaleTest + // Coverage maintained: DecimalPrecisionScaleTest now includes scale boundaries: + // scale=0, scale=10, scale=15, scale=38 // - TEST_F(CSharpExtensionApiTests, DecimalScaleBoundariesTest) - { - using TestHelpers::CreateNumericStruct; - - InitializeSession( - 0, // inputSchemaColumnsNumber - 3); // parametersNumber - - // Test NUMERIC(10,0) minimum scale (integer) - SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567890, 10, 0, false); - InitParam(0, param0); - - // Test NUMERIC(20,10) mid-range scale: 12345678.9012345678 - SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(123456789012345678ULL, 20, 10, false); - InitParam(1, param1); - - // Test NUMERIC(20,15) high scale: 12345.123456789012345 - SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(12345123456789012345ULL, 20, 15, false); - InitParam(2, param2); - } //------------------------------------------------------------------------------------------------ // Name: DecimalScaleEqualsPrecisionTest @@ -1149,7 +986,7 @@ namespace ExtensionApiTest false, // isNull SQL_PARAM_INPUT_OUTPUT); // inputOutputType - // Execute - this should fail because C# will throw OverflowException on param0 + // Execute - C# test executor will set the SqlDecimal values SQLUSMALLINT outputSchemaColumnsNumber = 0; SQLRETURN result = (*sm_executeFuncPtr)( *m_sessionId, @@ -1159,8 +996,22 @@ namespace ExtensionApiTest nullptr, // strLen_or_Ind &outputSchemaColumnsNumber); + // Execute should succeed (test executor only sets values, doesn't convert yet) + ASSERT_EQ(result, SQL_SUCCESS); + + // Now call GetOutputParam for param0 - this triggers FromSqlDecimal conversion + // which should throw OverflowException because the value exceeds precision + SQLPOINTER paramValue0 = nullptr; + SQLINTEGER strLenOrInd0 = 0; + result = (*sm_getOutputParamFuncPtr)( + *m_sessionId, + m_taskId, + 0, // param0 + ¶mValue0, + &strLenOrInd0); + // Expected: SQL_ERROR because FromSqlDecimal should throw OverflowException - // for param0 when the value exceeds target precision + // when converting param0 (12345678.99 with precision=10 to DECIMAL(10,4) needs precision=12) EXPECT_EQ(result, SQL_ERROR); } } From 8158979edfc4144a900ae7596e94ae933a93b65d Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Fri, 20 Mar 2026 12:14:28 -0700 Subject: [PATCH 11/13] self review --- .../test/include/CSharpExtensionApiTests.h | 12 +- .../test/src/native/CSharpDecimalTests.cpp | 397 ++---------------- .../test/src/native/CSharpExecuteTests.cpp | 8 +- .../src/native/CSharpExtensionApiTests.cpp | 6 +- .../test/src/native/CSharpInitParamTests.cpp | 16 +- 5 files changed, 73 insertions(+), 366 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h b/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h index af6fbd9..3e05d9a 100644 --- a/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h +++ b/language-extensions/dotnet-core-CSharp/test/include/CSharpExtensionApiTests.h @@ -396,6 +396,10 @@ namespace ExtensionApiTest const SQLDOUBLE m_MaxDouble = 1.79e308; const SQLDOUBLE m_MinDouble = -1.79e308; + // Maximum precision for SQL DECIMAL/NUMERIC types (1-38 per SQL Server specification) + // + static constexpr SQLULEN SqlDecimalMaxPrecision = 38; + // Path of .NET Core CSharp Extension // static std::string sm_extensionPath; @@ -495,7 +499,13 @@ namespace ExtensionApiTest // Creates a properly initialized ODBC numeric structure with little-endian mantissa encoding. // // Arguments: - // mantissa - The unscaled integer value (e.g., 123456789 for 12345.6789 with scale=4) + // mantissa - The unscaled integer representation of the decimal value. + // The actual decimal value is calculated as: mantissa ÷ 10^scale + // This allows exact decimal arithmetic without floating-point precision loss. + // Examples: + // • 12345.6789 → mantissa=123456789, scale=4 (123456789 ÷ 10^4 = 12345.6789) + // • 555.5000 → mantissa=5555000, scale=4 (5555000 ÷ 10^4 = 555.5000) + // • 0.00001 → mantissa=1, scale=5 (1 ÷ 10^5 = 0.00001) // precision - Total number of digits (1-38, as per SQL NUMERIC/DECIMAL spec) // scale - Number of digits after decimal point (0-precision) // isNegative - true for negative values, false for positive/zero diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index e66ff90..3846e81 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -67,10 +67,10 @@ namespace ExtensionApiTest SQL_NUMERIC_STRUCT(), // paramValue (will be ignored due to isNull) true); // isNull - // Test invalid parameter number + // Test invalid parameter number (5 is out of bounds - session initialized with 5 params: 0-4) // InitParam( - 5, // invalid paramNumber + 5, // invalid paramNumber (exceeds parametersNumber) param0, // paramValue false, // isNull SQL_PARAM_INPUT_OUTPUT, // inputOutputType @@ -125,33 +125,19 @@ namespace ExtensionApiTest EXPECT_EQ(outputSchemaColumnsNumber, 0); - // Helper to create expected SQL_NUMERIC_STRUCT for comparison - // Note: Values must match those set in CSharpTestExecutorDecimalParam + // Validate output parameters returned by C# executor + // This tests the conversion from C# SqlDecimal to SQL_NUMERIC_STRUCT // - auto CreateNumericFromDecimal = [](const char* decimalStr, SQLCHAR precision, SQLSCHAR scale) -> SQL_NUMERIC_STRUCT - { - // This is a simplified version - in production we'd parse the decimal string - // For now, we'll create the expected binary representation - SQL_NUMERIC_STRUCT result; - result.precision = precision; - result.scale = scale; - result.sign = 1; // positive - memset(result.val, 0, 16); - return result; - }; - - // Test expected output parameters - // Note: Actual validation depends on C# executor setting these values correctly - // - vector paramValues(paramsNumber, nullptr); vector strLenOrIndValues; // All non-null parameters have size = sizeof(SQL_NUMERIC_STRUCT) = 19 bytes + // for (int i = 0; i < paramsNumber - 1; ++i) { strLenOrIndValues.push_back(19); } - // Last parameter is null + // Last parameter is null - validates NULL handling in C# SqlDecimal → SQL_NUMERIC_STRUCT conversion + // strLenOrIndValues.push_back(SQL_NULL_DATA); // Verify that the parameters we get back are what we expect @@ -193,7 +179,6 @@ namespace ExtensionApiTest // Description: // Comprehensive test for precision (1-38) and scale (0-38) combinations. // Covers: min/max precision, min/max scale, typical financial, scientific. - // Consolidated from DecimalPrecisionScaleTest + DecimalPrecisionBoundariesTest + DecimalScaleBoundariesTest. // TEST_F(CSharpExtensionApiTests, DecimalPrecisionScaleTest) { @@ -204,42 +189,52 @@ namespace ExtensionApiTest 10); // parametersNumber // Min precision: NUMERIC(1,0) = single digit integer + // SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(5, 1, 0, false); InitParam(0, p0); // Min precision with scale: NUMERIC(1,1) = 0.5 + // SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(5, 1, 1, false); InitParam(1, p1); // Max precision: NUMERIC(38,0) - integer only + // SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, 38, 0, false); InitParam(2, p2); // Max precision + max scale: NUMERIC(38,38) = 0.xxxxx (38 fractional digits) + // SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(123456789012345678LL, 38, 38, false); InitParam(3, p3); // Typical financial: NUMERIC(19,4) - SQL Server MONEY compatible + // SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(12345678901234567LL, 19, 4, false); InitParam(4, p4); // Common financial: NUMERIC(10,2) + // SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(1234567, 10, 2, false); InitParam(5, p5); // Mid-scale: NUMERIC(20,10) - balanced precision/scale + // SQL_NUMERIC_STRUCT p6 = CreateNumericStruct(123456789012345678ULL, 20, 10, false); InitParam(6, p6); // High scale: NUMERIC(20,15) - mostly fractional + // SQL_NUMERIC_STRUCT p7 = CreateNumericStruct(12345123456789012345ULL, 20, 15, false); InitParam(7, p7); // Scientific notation: NUMERIC(28,10) + // SQL_NUMERIC_STRUCT p8 = CreateNumericStruct(123456789012345678LL, 28, 10, false); InitParam(8, p8); // Scale equals precision: NUMERIC(18,18) = 0.xxxxx (18 fractional) + // SQL_NUMERIC_STRUCT p9 = CreateNumericStruct(123456789012345678LL, 18, 18, false); InitParam(9, p9); } @@ -259,27 +254,33 @@ namespace ExtensionApiTest 6); // parametersNumber // Test zero + // SQL_NUMERIC_STRUCT zero = CreateNumericStruct(0, 10, 2, false); InitParam(0, zero); // Test very small positive (0.01) + // SQL_NUMERIC_STRUCT smallPos = CreateNumericStruct(1, 10, 2, false); InitParam(1, smallPos); // Test very small negative (-0.01) + // SQL_NUMERIC_STRUCT smallNeg = CreateNumericStruct(1, 10, 2, true); InitParam(2, smallNeg); // Test large positive (near max for NUMERIC(38)) // Note: Using 18 digits to fit in long long + // SQL_NUMERIC_STRUCT largePos = CreateNumericStruct(999999999999999999LL, 38, 0, false); InitParam(3, largePos); // Test large negative + // SQL_NUMERIC_STRUCT largeNeg = CreateNumericStruct(999999999999999999LL, 38, 0, true); InitParam(4, largeNeg); // Test value with maximum scale (0.000000000000000001 = 10^-18) + // SQL_NUMERIC_STRUCT maxScale = CreateNumericStruct(1, 18, 18, false); InitParam(5, maxScale); } @@ -400,16 +401,19 @@ namespace ExtensionApiTest vector{ SQL_NO_NULLS, SQL_NULLABLE }); // Initialize session with 2 decimal columns, 0 parameters + // InitializeSession( decimalInfo.GetColumnsNumber(), 0, m_scriptString); // Initialize the decimal columns + // InitializeColumns(&decimalInfo); // Execute the script with decimal input columns // This tests that SQL_NUMERIC_STRUCT columns can be passed to C# DataFrame + // Execute( ColumnInfo::sm_rowsNumber, decimalInfo.m_dataSet.data(), @@ -419,6 +423,7 @@ namespace ExtensionApiTest // Validate that columns metadata is correct // NOTE: SqlDecimal preserves input precision/scale metadata // Column 0: DecimalColumn1, declared NUMERIC(19,4) + // GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType @@ -427,6 +432,7 @@ namespace ExtensionApiTest SQL_NO_NULLS); // nullable // Column 1: DecimalColumn2, declared NUMERIC(38,10) + // GetResultColumn( 1, // columnNumber SQL_C_NUMERIC, // dataType @@ -443,11 +449,7 @@ namespace ExtensionApiTest // SQL_NUMERIC_STRUCT values as result columns and the native layer properly // retrieves them with correct precision/scale metadata. // - // WHY: E2E tests validated decimal output columns, but unit tests had no coverage - // for verifying the managed-to-native conversion and metadata calculation for - // decimal result columns and verifies precision/scale metadata is preserved correctly. - // - // WHAT: Tests that decimal columns returned from C# have: + // Tests that decimal columns returned from C# have: // - Correct SQL_C_NUMERIC type // - Preserved precision/scale from SqlDecimal metadata // - Proper NULL handling in nullable columns @@ -457,7 +459,7 @@ namespace ExtensionApiTest using TestHelpers::CreateNumericStruct; // Create decimal column data for testing output - + // // Result Column 1: NUMERIC(18, 2) - typical financial data // Maximum value in data: 999999999999999.99 requires precision 18 // @@ -532,90 +534,6 @@ namespace ExtensionApiTest SQL_NULLABLE); // nullable } - //---------------------------------------------------------------------------------------------- - // Name: MultipleDecimalColumnsTest - // - // Description: - // Test multiple decimal columns with different precision/scale combinations - // to validate that the extension can handle mixed decimal formats in a single DataFrame. - // - // WHY: Real-world scenarios often have multiple decimal columns with different - // precision/scale requirements (e.g., prices, quantities, percentages, rates). - // E2E tests had PassThroughVariousDecimalPrecisions but unit tests had no - // equivalent coverage for validating mixed precision handling at the API level. - // - // WHAT: Tests 2 columns representing real-world financial data: - // - Column 1: NUMERIC(19,4) - extended money format (SQL Server MONEY uses 19,4) - // - Column 2: NUMERIC(5,5) - percentage/rate format (0.00000 to 0.99999) - // - TEST_F(CSharpExtensionApiTests, MultipleDecimalColumnsTest) - { - using TestHelpers::CreateNumericStruct; - - // Column 1: NUMERIC(19, 4) - extended money values - // Represents amounts like: $123,456,789,012.3456 - // - vector moneyColumn = { - CreateNumericStruct(1234567890123456LL, 19, 4, false), // 123456789012.3456 - CreateNumericStruct(99990000, 19, 4, false), // 9999.0000 - CreateNumericStruct(12345678, 19, 4, true), // -1234.5678 - CreateNumericStruct(50, 19, 4, false), // 0.0050 - CreateNumericStruct(9223372036854775807LL, 19, 4, false) // Large value - }; - - // Column 2: NUMERIC(5, 5) - rates/percentages - // Represents values like: 0.12345 (12.345%) - // - vector rateColumn = { - CreateNumericStruct(12345, 5, 5, false), // 0.12345 (12.345%) - CreateNumericStruct(99999, 5, 5, false), // 0.99999 (99.999% - max) - CreateNumericStruct(0, 5, 5, false), // 0.00000 (0%) - CreateNumericStruct(1, 5, 5, false), // 0.00001 (0.001% - minimum) - CreateNumericStruct(5000, 5, 5, false) // 0.05000 (5%) - }; - - const SQLINTEGER numericStructSize = 19; - vector allValid(5, numericStructSize); - - ColumnInfo mixedDecimalInfo( - "MoneyAmount", - moneyColumn, - allValid, - "InterestRate", - rateColumn, - allValid, - vector{ SQL_NO_NULLS, SQL_NO_NULLS }); - - InitializeSession( - mixedDecimalInfo.GetColumnsNumber(), - 0, - m_scriptString); - - InitializeColumns(&mixedDecimalInfo); - - Execute( - ColumnInfo::sm_rowsNumber, - mixedDecimalInfo.m_dataSet.data(), - mixedDecimalInfo.m_strLen_or_Ind.data(), - mixedDecimalInfo.m_columnNames); - - // Validate each column has correct precision/scale - // NOTE: SqlDecimal preserves declared precision from input - GetResultColumn( - 0, // columnNumber - SQL_C_NUMERIC, // dataType - 19, // columnSize (declared precision from input NUMERIC(19,4)) - 4, // decimalDigits (scale for money) - SQL_NO_NULLS); // nullable - - GetResultColumn( - 1, // columnNumber - SQL_C_NUMERIC, // dataType - 5, // columnSize (declared precision from input NUMERIC(5,5)) - 5, // decimalDigits (max scale) - SQL_NO_NULLS); // nullable - } - //---------------------------------------------------------------------------------------------- // Name: DecimalColumnsWithNullsTest // @@ -625,12 +543,12 @@ namespace ExtensionApiTest // // WHY: NULL handling in decimal columns is complex because SQL_NUMERIC_STRUCT // itself doesn't have a NULL indicator - NULL is tracked separately via - // strLenOrInd = SQL_NULL_DATA. E2E tests had PassThroughDecimalColumnsWithNulls - // but unit tests had zero coverage for validating NULL handling at the native API level. + // strLenOrInd = SQL_NULL_DATA. // // WHAT: Tests 2 columns with different NULL patterns: // - Column 1: First and last rows NULL (edge case for array bounds) // - Column 2: Middle rows NULL (common pattern in sparse data) + // // Validates that: // - NULLs don't corrupt adjacent non-NULL values // - Precision/scale calculation ignores NULL rows @@ -665,6 +583,7 @@ namespace ExtensionApiTest const SQLINTEGER numericStructSize = 19; // Column 1: Rows 0 and 4 are NULL + // vector col1StrLenOrInd = { SQL_NULL_DATA, numericStructSize, @@ -674,6 +593,7 @@ namespace ExtensionApiTest }; // Column 2: Rows 1 and 2 are NULL + // vector col2StrLenOrInd = { numericStructSize, SQL_NULL_DATA, @@ -706,6 +626,7 @@ namespace ExtensionApiTest // Validate metadata - both columns should be nullable // NOTE: SqlDecimal preserves declared precision even when NULLs present + // GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType @@ -728,13 +649,7 @@ namespace ExtensionApiTest // Test decimal values with high scale (29-38) to verify SqlDecimal handles // extreme precision requirements correctly. // - // WHY: SqlDecimal from Microsoft.Data.SqlClient supports scales up to 38. - // This test ensures: - // 1. High scale values convert correctly between SQL_NUMERIC_STRUCT and SqlDecimal - // 2. Edge cases are handled gracefully for rare but valid SQL Server DECIMAL types - // 3. Full 38-digit precision is preserved without data loss - // - // WHAT: Tests various high scale scenarios: + // Tests various high scale scenarios: // - NUMERIC(38, 30): Very small fractional values // - NUMERIC(38, 35): Extremely small fractional values (1 significant digit) // - NUMERIC(38, 38): Maximum scale with minimum value (0.00...001) @@ -742,9 +657,6 @@ namespace ExtensionApiTest // // PRACTICAL USAGE: While these extreme scales are rare in production databases, // they're valid SQL Server types and must be handled gracefully: - // - Scientific computing: micro-fractions (e.g., atomic measurements) - // - Financial: basis points in high-precision calculations (e.g., 0.00000001%) - // - IoT/Telemetry: sensor readings with extreme precision requirements // TEST_F(CSharpExtensionApiTests, DecimalHighScaleTest) { @@ -756,264 +668,39 @@ namespace ExtensionApiTest // Test NUMERIC(38, 29) - boundary case at scale = 29 // Value: 0.00000000000000000000000000001 (1 at 29th decimal place) + // SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(1, 38, 29, false); InitParam(0, p0); // Test NUMERIC(38, 30) - scale = 30 // Value: 0.000000000000000000000000000123 (123 scaled by 10^-30) + // SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123, 38, 30, false); InitParam(1, p1); // Test NUMERIC(38, 35) - very high scale // Value: 0.00000000000000000000000000000000123 (3 significant digits) + // SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(123, 38, 35, false); InitParam(2, p2); // Test NUMERIC(38, 38) - maximum scale // Value: 0.00000000000000000000000000000000000001 (1 at 38th decimal place) // This is the smallest non-zero value representable in NUMERIC(38,38) + // SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1, 38, 38, false); InitParam(3, p3); // Test negative value with high scale // Value: -0.0000000000000000000000000000001 (negative, scale 31) + // SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(1, 38, 31, true); InitParam(4, p4); // Test zero with high scale (should remain zero regardless of scale) // Value: 0.00000000000000000000000000000000 (zero, scale 32) + // SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(0, 38, 32, false); InitParam(5, p5); - - // NOTE: This test validates that SqlDecimal correctly handles high scales (29-38) - // without precision loss. Microsoft.Data.SqlClient's SqlDecimal provides - // full 38-digit precision support for all valid SQL Server DECIMAL types. - } - - //---------------------------------------------------------------------------------------------- - // Name: DecimalOverflowTest - // - // Description: - // Test that values exceeding C# decimal range throw OverflowException. - // C# decimal max: ±79,228,162,514,264,337,593,543,950,335 (~7.9 × 10^28) - // SQL DECIMAL(38,0) max: ±10^38 - 1 - // - // This test verifies the exception path in SqlNumericHelper.ToDecimal() when - // converting SQL NUMERIC values that exceed C# decimal's 29-significant-digit limit. - // - TEST_F(CSharpExtensionApiTests, DecimalOverflowTest) - { - InitializeSession( - 0, // inputSchemaColumnsNumber - 2); // parametersNumber - - // Create SQL_NUMERIC_STRUCT with value exceeding C# decimal.MaxValue - // We'll construct a DECIMAL(38,0) with value ~10^38 by setting high-order bytes - // to non-zero values that will overflow when building scaledValue in ToDecimal() - // - // Strategy: Set bytes val[13..15] (upper 3 bytes) to create a value > 7.9 × 10^28 - // This represents a number too large for C# decimal's 96-bit mantissa. - SQL_NUMERIC_STRUCT overflowPositive{}; - overflowPositive.precision = 38; - overflowPositive.scale = 0; - overflowPositive.sign = 1; // positive - - // Set upper bytes to create a large value: - // val[15] = 0x4B (75 decimal) means the value is approximately 75 * 256^15 - // which equals approximately 4.9 × 10^37, well above decimal.MaxValue (~7.9 × 10^28) - overflowPositive.val[15] = 0x4B; // High byte - overflowPositive.val[14] = 0x3B; // Medium-high byte - overflowPositive.val[13] = 0x9A; // Medium byte - // Leave lower bytes as zero for simplicity - - // This should fail when C# extension tries to convert to decimal - // The OverflowException from ToDecimal() will propagate as SQL_ERROR - InitParam( - 0, // paramNumber - overflowPositive, // paramValue (too large for C# decimal) - false, // isNull - SQL_PARAM_INPUT_OUTPUT, // inputOutputType - SQL_ERROR); // expected return: SQL_ERROR - - // Test negative overflow as well - SQL_NUMERIC_STRUCT overflowNegative{}; - overflowNegative.precision = 38; - overflowNegative.scale = 0; - overflowNegative.sign = 0; // negative - - // Same large value bytes as above, but negative - overflowNegative.val[15] = 0x4B; - overflowNegative.val[14] = 0x3B; - overflowNegative.val[13] = 0x9A; - - InitParam( - 1, // paramNumber - overflowNegative, // paramValue (too large for C# decimal) - false, // isNull - SQL_PARAM_INPUT_OUTPUT, // inputOutputType - SQL_ERROR); // expected return: SQL_ERROR - - // NOTE: This test confirms that the OverflowException catch block in - // SqlNumericHelper.ToDecimal() is reachable and provides useful diagnostics - // (precision, scale, sign, val hex dump) when SQL values exceed C# decimal range. - } - - //---------------------------------------------------------------------------------------------- - // Name: DecimalHighPrecisionOutputParamTest - // - // Description: - // Test decimal OUTPUT parameters with maximum precision (29 digits) to exercise - // the FromDecimal() conversion for values at the edge of C# decimal's capability. - // Note: C# decimal normalizes values, so we test precision rather than forcing specific scales. - // - // Test removed - see comment above for rationale - - //---------------------------------------------------------------------------------------------- - // REMOVED: DecimalNegativeValuesTest - // Reason: Redundant with DecimalBoundaryValuesTest - // Coverage maintained: DecimalBoundaryValuesTest already tests negative values (params 2, 4) - // - - //---------------------------------------------------------------------------------------------- - // REMOVED: DecimalZeroValuesTest - // Reason: Redundant with DecimalBoundaryValuesTest - // Coverage maintained: DecimalBoundaryValuesTest already tests zero (param 0) - // - - //---------------------------------------------------------------------------------------------- - // REMOVED: DecimalPrecisionBoundariesTest - // Reason: Redundant with DecimalPrecisionScaleTest - // Coverage maintained: DecimalPrecisionScaleTest now includes min precision (1,0), (1,1) - // and max precision (38,0), (38,38) cases - // - - //---------------------------------------------------------------------------------------------- - // REMOVED: DecimalScaleBoundariesTest - // Reason: Redundant with DecimalPrecisionScaleTest - // Coverage maintained: DecimalPrecisionScaleTest now includes scale boundaries: - // scale=0, scale=10, scale=15, scale=38 - // - - //------------------------------------------------------------------------------------------------ - // Name: DecimalScaleEqualsPrecisionTest - // - // Description: - // Test cases where scale equals precision (all decimal places, no integer part except 0) - // - TEST_F(CSharpExtensionApiTests, DecimalScaleEqualsPrecisionTest) - { - using TestHelpers::CreateNumericStruct; - - InitializeSession( - 0, // inputSchemaColumnsNumber - 4); // parametersNumber - - // Test NUMERIC(1,1): 0.5 - SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(5, 1, 1, false); - InitParam(0, param0); - - // Test NUMERIC(5,5): 0.12345 - SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(12345, 5, 5, false); - InitParam(1, param1); - - // Test NUMERIC(10,10): 0.1234567890 - SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(1234567890, 10, 10, false); - InitParam(2, param2); - - // Test NUMERIC(15,15): 0.123456789012345 - SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(123456789012345LL, 15, 15, false); - InitParam(3, param3); - } - - //---------------------------------------------------------------------------------------------- - // Name: DecimalPrecisionOverflowTest - // - // Description: - // Test that FromSqlDecimal validates precision overflow when scale adjustment causes - // the value to exceed the target precision. - // - // Bug scenario: A value like 12345678.99 (requires 10 digits) converted to DECIMAL(10,4) - // becomes 12345678.9900, which requires 12 significant digits, exceeding precision=10. - // SQL Server DECIMAL(10,4) max is 999999.9999 (6 digits before decimal + 4 after = 10 total). - // - // Expected: FromSqlDecimal should throw OverflowException for param0 and param1. - // - TEST_F(CSharpExtensionApiTests, DecimalPrecisionOverflowTest) - { - int paramsNumber = 3; - - string userClassFullName = "Microsoft.SqlServer.CSharpExtensionTest.CSharpTestExecutorDecimalPrecisionOverflow"; - string scriptString = m_UserLibName + m_Separator + userClassFullName; - - InitializeSession( - 0, // inputSchemaColumnsNumber - paramsNumber, // parametersNumber - scriptString); // scriptString - - // param0: Declares DECIMAL(10, 4) - max value 999999.9999 (6 before decimal) - // Executor will try to set 12345678.99 → 12345678.9900 (exceeds precision) - // Expected: Should fail with precision overflow - SQL_NUMERIC_STRUCT param0{}; - param0.precision = 10; - param0.scale = 4; - InitParam( - 0, // paramNumber - param0, // paramValue with precision=10, scale=4 - false, // isNull - SQL_PARAM_INPUT_OUTPUT); // inputOutputType - - // param1: Declares DECIMAL(10, 4) - // Executor will try to set 9999999.999 → 9999999.9990 (exceeds precision) - // Expected: Should fail with precision overflow - SQL_NUMERIC_STRUCT param1{}; - param1.precision = 10; - param1.scale = 4; - InitParam( - 1, // paramNumber - param1, // paramValue with precision=10, scale=4 - false, // isNull - SQL_PARAM_INPUT_OUTPUT); // inputOutputType - - // param2: Declares DECIMAL(8, 3) - // Executor will set 1000.0 → 1000.000 (7 total digits, fits in precision=8) - // Expected: Should succeed - SQL_NUMERIC_STRUCT param2{}; - param2.precision = 8; - param2.scale = 3; - InitParam( - 2, // paramNumber - param2, // paramValue with precision=8, scale=3 - false, // isNull - SQL_PARAM_INPUT_OUTPUT); // inputOutputType - - // Execute - C# test executor will set the SqlDecimal values - SQLUSMALLINT outputSchemaColumnsNumber = 0; - SQLRETURN result = (*sm_executeFuncPtr)( - *m_sessionId, - m_taskId, - 0, // rowsNumber - nullptr, // dataSet - nullptr, // strLen_or_Ind - &outputSchemaColumnsNumber); - - // Execute should succeed (test executor only sets values, doesn't convert yet) - ASSERT_EQ(result, SQL_SUCCESS); - - // Now call GetOutputParam for param0 - this triggers FromSqlDecimal conversion - // which should throw OverflowException because the value exceeds precision - SQLPOINTER paramValue0 = nullptr; - SQLINTEGER strLenOrInd0 = 0; - result = (*sm_getOutputParamFuncPtr)( - *m_sessionId, - m_taskId, - 0, // param0 - ¶mValue0, - &strLenOrInd0); - - // Expected: SQL_ERROR because FromSqlDecimal should throw OverflowException - // when converting param0 (12345678.99 with precision=10 to DECIMAL(10,4) needs precision=12) - EXPECT_EQ(result, SQL_ERROR); } } - - diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp index ab50eef..4ca858b 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExecuteTests.cpp @@ -529,7 +529,13 @@ namespace ExtensionApiTest } } - // Explicit template instantiations + //---------------------------------------------------------------------------------------------- + // Name: Execute (Explicit Template Instantiation) + // + // Description: + // Explicit template instantiation for Execute function with SQL_NUMERIC_STRUCT type. + // Required for linking decimal/numeric column tests that use SQL_C_NUMERIC data type. + // template void CSharpExtensionApiTests::Execute( SQLULEN rowsNumber, void **dataSet, diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp index 74282a0..6761b5d 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpExtensionApiTests.cpp @@ -395,7 +395,7 @@ namespace ExtensionApiTest // // Description: // Template specialization for SQL_NUMERIC_STRUCT to extract precision from the struct - // instead of using sizeof() which gives the struct size (19 bytes). + // instead of using sizeof() which gives the struct size. // template<> void CSharpExtensionApiTests::InitializeColumns( @@ -406,12 +406,14 @@ namespace ExtensionApiTest { // For NUMERIC columns, extract precision from the first non-NULL value in the column // columnSize for NUMERIC represents precision (1-38), not bytes - SQLULEN precision = 38; // default + // + SQLULEN precision = SqlDecimalMaxPrecision; // default to SQL Server max precision const SQL_NUMERIC_STRUCT* columnData = static_cast(columnInfo->m_dataSet[columnNumber]); SQLINTEGER* strLenOrInd = columnInfo->m_strLen_or_Ind[columnNumber]; // Find first non-NULL value to get precision + // for (SQLULEN row = 0; row < ColumnInfo::sm_rowsNumber; ++row) { if (strLenOrInd[row] != SQL_NULL_DATA) diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp index 8da0994..58b95c4 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpInitParamTests.cpp @@ -776,8 +776,9 @@ namespace ExtensionApiTest // Name: InitParam (Template Specialization for SQL_NUMERIC_STRUCT) // // Description: - // Specialized template for SQL_NUMERIC_STRUCT that correctly passes precision and scale - // from the struct to InitParam. The generic template passes decimalDigits=0, which + // Specialized template for SQL_NUMERIC_STRUCT that passes precision and scale + // from the struct to InitParam. + // The generic template passes decimalDigits=0, which // causes InitParam to reject NUMERIC parameters with non-zero scale. // // Note: For output parameters with uninitialized structs (precision=0), uses defaults: @@ -807,8 +808,9 @@ namespace ExtensionApiTest // For uninitialized structs (precision=0), use defaults for output parameters // The C# executor will set the actual values during execution. - // NOTE: In production T-SQL, SQL Server always provides proper precision/scale metadata. - // This handles test scenarios where OUTPUT parameters are initialized with default structs. + // NOTE: T-SQL, SQL Server always provides proper precision/scale metadata. + // This handles unit test scenarios where OUTPUT parameters are initialized with default structs. + // SQLULEN precision = (isNull || paramValue.precision == 0) ? 38 : paramValue.precision; SQLSMALLINT scale = (isNull || paramValue.precision == 0) ? 0 : paramValue.scale; @@ -833,7 +835,7 @@ namespace ExtensionApiTest template void CSharpExtensionApiTests::InitParam( int paramNumber, SQL_NUMERIC_STRUCT paramValue, - bool isNull, - SQLSMALLINT inputOutputType, - SQLRETURN SQLResult); + bool isNull = false, + SQLSMALLINT inputOutputType = SQL_PARAM_INPUT_OUTPUT, + SQLRETURN SQLResult = SQL_SUCCESS); } From 1a6404be34648d68fe91f390307f3c6969cae2d1 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Sat, 21 Mar 2026 10:26:41 -0700 Subject: [PATCH 12/13] wip --- .../test/src/native/CSharpDecimalTests.cpp | 289 +++++++----------- 1 file changed, 108 insertions(+), 181 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index 3846e81..f762bf6 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -14,6 +14,10 @@ using namespace std; namespace ExtensionApiTest { + // SQL_NUMERIC_STRUCT size per ODBC specification + // + const SQLINTEGER SQL_NUMERIC_STRUCT_SIZE = 19; + //---------------------------------------------------------------------------------------------- // Name: InitNumericParamTest // @@ -28,69 +32,65 @@ namespace ExtensionApiTest 0, // inputSchemaColumnsNumber 5); // parametersNumber - // Test NUMERIC(10,2) value: 12345.67 - // Stored as: mantissa = 1234567, scale = 2 + // NUMERIC(10,2): 12345.67 // SQL_NUMERIC_STRUCT param0 = CreateNumericStruct(1234567, 10, 2, false); InitParam( 0, // paramNumber param0); // paramValue (12345.67) - // Test NUMERIC(38,0) value: maximum precision integer - // Stored as: mantissa = 999999999999, scale = 0 + // NUMERIC(38,0): 999999999999 (max precision integer) // SQL_NUMERIC_STRUCT param1 = CreateNumericStruct(999999999999LL, 38, 0, false); InitParam( 1, // paramNumber param1); // paramValue (999999999999) - // Test NUMERIC(19,4) value: -123456789012.3456 - // Stored as: mantissa = 1234567890123456, scale = 4, sign = 0 (negative) + // NUMERIC(19,4): -123456789012.3456 (negative) // SQL_NUMERIC_STRUCT param2 = CreateNumericStruct(1234567890123456LL, 19, 4, true); InitParam( 2, // paramNumber param2); // paramValue (-123456789012.3456) - // Test NUMERIC(5,5) value: 0.12345 (all decimal places) - // Stored as: mantissa = 12345, scale = 5 + // NUMERIC(5,5): 0.12345 (scale = precision) // SQL_NUMERIC_STRUCT param3 = CreateNumericStruct(12345, 5, 5, false); InitParam( 3, // paramNumber param3); // paramValue (0.12345) - // Test null NUMERIC value + // NULL NUMERIC value // InitParam( 4, // paramNumber - SQL_NUMERIC_STRUCT(), // paramValue (will be ignored due to isNull) + SQL_NUMERIC_STRUCT(), // paramValue (ignored due to isNull) true); // isNull - // Test invalid parameter number (5 is out of bounds - session initialized with 5 params: 0-4) + // Invalid parameter number (5 out of bounds, valid: 0-4) // InitParam( - 5, // invalid paramNumber (exceeds parametersNumber) + 5, // invalid paramNumber param0, // paramValue false, // isNull SQL_PARAM_INPUT_OUTPUT, // inputOutputType - SQL_ERROR); // SQLReturn + SQL_ERROR); // expected error - // Test negative parameter number + // Negative parameter number // InitParam( -1, // negative paramNumber param0, // paramValue false, // isNull SQL_PARAM_INPUT_OUTPUT, // inputOutputType - SQL_ERROR); // SQLReturn + SQL_ERROR); // expected error } //---------------------------------------------------------------------------------------------- // Name: GetDecimalOutputParamTest // // Description: - // Test multiple DECIMAL output parameter values from C# executor + // Tests C# SqlDecimal to SQL_NUMERIC_STRUCT output parameter conversion // TEST_F(CSharpExtensionApiTests, GetDecimalOutputParamTest) { @@ -125,23 +125,22 @@ namespace ExtensionApiTest EXPECT_EQ(outputSchemaColumnsNumber, 0); - // Validate output parameters returned by C# executor - // This tests the conversion from C# SqlDecimal to SQL_NUMERIC_STRUCT + // Expected strLenOrInd values: 19 bytes for valid, SQL_NULL_DATA for last param // vector strLenOrIndValues; - // All non-null parameters have size = sizeof(SQL_NUMERIC_STRUCT) = 19 bytes + // Non-null params: 19 bytes (sizeof SQL_NUMERIC_STRUCT) // for (int i = 0; i < paramsNumber - 1; ++i) { - strLenOrIndValues.push_back(19); + strLenOrIndValues.push_back(SQL_NUMERIC_STRUCT_SIZE); } - // Last parameter is null - validates NULL handling in C# SqlDecimal → SQL_NUMERIC_STRUCT conversion + + // Last parameter is null - validates NULL handling in C# SqlDecimal to SQL_NUMERIC_STRUCT conversion // strLenOrIndValues.push_back(SQL_NULL_DATA); - // Verify that the parameters we get back are what we expect - // This validates the conversion from C# decimal to SQL_NUMERIC_STRUCT + // Verify output parameters match expected values and structure // for (int i = 0; i < paramsNumber; ++i) { @@ -163,7 +162,8 @@ namespace ExtensionApiTest ASSERT_NE(paramValue, nullptr); SQL_NUMERIC_STRUCT* numericValue = static_cast(paramValue); - // Validate struct size and basic integrity + // Validate precision/scale/sign integrity + // EXPECT_GE(numericValue->precision, 1); EXPECT_LE(numericValue->precision, 38); EXPECT_GE(numericValue->scale, 0); @@ -177,8 +177,7 @@ namespace ExtensionApiTest // Name: DecimalPrecisionScaleTest // // Description: - // Comprehensive test for precision (1-38) and scale (0-38) combinations. - // Covers: min/max precision, min/max scale, typical financial, scientific. + // Tests precision (1-38) and scale (0-38) combinations covering min/max, financial, scientific // TEST_F(CSharpExtensionApiTests, DecimalPrecisionScaleTest) { @@ -188,52 +187,52 @@ namespace ExtensionApiTest 0, // inputSchemaColumnsNumber 10); // parametersNumber - // Min precision: NUMERIC(1,0) = single digit integer + // NUMERIC(1,0): 5 (min precision, no scale) // SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(5, 1, 0, false); InitParam(0, p0); - // Min precision with scale: NUMERIC(1,1) = 0.5 + // NUMERIC(1,1): 0.5 (min precision, scale = precision) // SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(5, 1, 1, false); InitParam(1, p1); - // Max precision: NUMERIC(38,0) - integer only + // NUMERIC(38,0): 12345678901234567 (max precision, no scale) // SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, 38, 0, false); InitParam(2, p2); - // Max precision + max scale: NUMERIC(38,38) = 0.xxxxx (38 fractional digits) + // NUMERIC(38,38): 0.xxx (max precision, max scale) // SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(123456789012345678LL, 38, 38, false); InitParam(3, p3); - // Typical financial: NUMERIC(19,4) - SQL Server MONEY compatible + // NUMERIC(19,4): SQL Server MONEY compatible // SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(12345678901234567LL, 19, 4, false); InitParam(4, p4); - // Common financial: NUMERIC(10,2) + // NUMERIC(10,2): Common financial // SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(1234567, 10, 2, false); InitParam(5, p5); - // Mid-scale: NUMERIC(20,10) - balanced precision/scale + // NUMERIC(20,10): Balanced precision/scale // SQL_NUMERIC_STRUCT p6 = CreateNumericStruct(123456789012345678ULL, 20, 10, false); InitParam(6, p6); - // High scale: NUMERIC(20,15) - mostly fractional + // NUMERIC(20,15): Mostly fractional // SQL_NUMERIC_STRUCT p7 = CreateNumericStruct(12345123456789012345ULL, 20, 15, false); InitParam(7, p7); - // Scientific notation: NUMERIC(28,10) + // NUMERIC(28,10): Scientific notation // SQL_NUMERIC_STRUCT p8 = CreateNumericStruct(123456789012345678LL, 28, 10, false); InitParam(8, p8); - // Scale equals precision: NUMERIC(18,18) = 0.xxxxx (18 fractional) + // NUMERIC(18,18): Scale = precision // SQL_NUMERIC_STRUCT p9 = CreateNumericStruct(123456789012345678LL, 18, 18, false); InitParam(9, p9); @@ -243,7 +242,7 @@ namespace ExtensionApiTest // Name: DecimalBoundaryValuesTest // // Description: - // Test boundary values: zero, very small, very large, negative values + // Tests boundary values: zero, very small, very large, negative // TEST_F(CSharpExtensionApiTests, DecimalBoundaryValuesTest) { @@ -253,33 +252,32 @@ namespace ExtensionApiTest 0, // inputSchemaColumnsNumber 6); // parametersNumber - // Test zero + // Zero: 0.00 // SQL_NUMERIC_STRUCT zero = CreateNumericStruct(0, 10, 2, false); InitParam(0, zero); - // Test very small positive (0.01) + // Very small positive: 0.01 // SQL_NUMERIC_STRUCT smallPos = CreateNumericStruct(1, 10, 2, false); InitParam(1, smallPos); - // Test very small negative (-0.01) + // Very small negative: -0.01 // SQL_NUMERIC_STRUCT smallNeg = CreateNumericStruct(1, 10, 2, true); InitParam(2, smallNeg); - // Test large positive (near max for NUMERIC(38)) - // Note: Using 18 digits to fit in long long + // Large positive: 999999999999999999 (near NUMERIC(38) max) // SQL_NUMERIC_STRUCT largePos = CreateNumericStruct(999999999999999999LL, 38, 0, false); InitParam(3, largePos); - // Test large negative + // Large negative: -999999999999999999 // SQL_NUMERIC_STRUCT largeNeg = CreateNumericStruct(999999999999999999LL, 38, 0, true); InitParam(4, largeNeg); - // Test value with maximum scale (0.000000000000000001 = 10^-18) + // Maximum scale: 0.000000000000000001 (10^-18) // SQL_NUMERIC_STRUCT maxScale = CreateNumericStruct(1, 18, 18, false); InitParam(5, maxScale); @@ -289,37 +287,31 @@ namespace ExtensionApiTest // Name: DecimalStructLayoutTest // // Description: - // Verify SQL_NUMERIC_STRUCT has correct memory layout and size for ODBC compatibility + // Verifies SQL_NUMERIC_STRUCT ODBC binary layout: 19 bytes, field offsets // TEST_F(CSharpExtensionApiTests, DecimalStructLayoutTest) { - // Verify struct size matches ODBC specification (19 bytes) + // ODBC spec: struct size = 19 bytes + // EXPECT_EQ(sizeof(SQL_NUMERIC_STRUCT), 19); // Verify field offsets for binary compatibility + // SQL_NUMERIC_STRUCT test; - // precision at offset 0 - EXPECT_EQ((size_t)&test.precision - (size_t)&test, 0); - - // scale at offset 1 - EXPECT_EQ((size_t)&test.scale - (size_t)&test, 1); - - // sign at offset 2 - EXPECT_EQ((size_t)&test.sign - (size_t)&test, 2); - - // val array at offset 3 - EXPECT_EQ((size_t)&test.val[0] - (size_t)&test, 3); - - // val array is 16 bytes - EXPECT_EQ(sizeof(test.val), 16); + EXPECT_EQ((size_t)&test.precision - (size_t)&test, 0); // precision at offset 0 + EXPECT_EQ((size_t)&test.scale - (size_t)&test, 1); // scale at offset 1 + EXPECT_EQ((size_t)&test.sign - (size_t)&test, 2); // sign at offset 2 + EXPECT_EQ((size_t)&test.val[0] - (size_t)&test, 3); // val array at offset 3 + EXPECT_EQ(sizeof(test.val), 16); // val array = 16 bytes - // Test that we can create and inspect a numeric struct + // Verify struct initialization and field access + // test.precision = 38; test.scale = 10; test.sign = 1; memset(test.val, 0, 16); - test.val[0] = 0x39; // 12345 in little-endian + test.val[0] = 0x39; // 12345 in little-endian test.val[1] = 0x30; EXPECT_EQ(test.precision, 38); @@ -333,27 +325,14 @@ namespace ExtensionApiTest // Name: GetDecimalInputColumnsTest // // Description: - // Test decimal columns in input DataFrame to validate that SQL_NUMERIC_STRUCT values - // can be passed as column data and properly consumed by the C# extension. - // - // WHY: E2E tests validated decimal column passthrough, but unit tests had zero coverage - // for decimal columns. This test ensures the native-to-managed conversion for decimal - // columns works correctly at the API boundary. - // - // WHAT: Tests 2 decimal columns with 5 rows including: - // - Column 1: Non-nullable with various precision/scale (10,2), (19,4), (5,5) - // - Column 2: Nullable with NULL values and edge cases (zero, negative, max precision) + // Tests SQL_NUMERIC_STRUCT input columns with mixed precision/scale and NULL values. + // E2E tests had coverage, but unit tests had zero decimal column coverage until this test. // TEST_F(CSharpExtensionApiTests, GetDecimalInputColumnsTest) { using TestHelpers::CreateNumericStruct; - // Initialize test data for decimal columns - // Column 1: DecimalColumn1 (non-nullable, NUMERIC(19,4)) - // Column 2: DecimalColumn2 (nullable, NUMERIC(38,10)) - // - - // Column 1 data: Non-nullable, NUMERIC(19, 4) + // Column 1: Non-nullable NUMERIC(19,4) // Values: 12345.6789, 9876543.2100, 0.1234, -555.5000, 999999999.9999 // vector column1Data = { @@ -364,33 +343,33 @@ namespace ExtensionApiTest CreateNumericStruct(9999999999999LL, 19, 4, false) // 999999999.9999 }; - // Column 2 data: Nullable, NUMERIC(38, 10) + // Column 2: Nullable NUMERIC(38,10) with NULL values // Values: 1234567890.1234567890, NULL, 0.0000000001, NULL, -9999.9999999999 // vector column2Data = { CreateNumericStruct(12345678901234567890ULL, 38, 10, false), // 1234567890.1234567890 - SQL_NUMERIC_STRUCT(), // NULL (placeholder) + SQL_NUMERIC_STRUCT(), // NULL CreateNumericStruct(1, 38, 10, false), // 0.0000000001 - SQL_NUMERIC_STRUCT(), // NULL (placeholder) + SQL_NUMERIC_STRUCT(), // NULL CreateNumericStruct(99999999999999ULL, 38, 10, true) // -9999.9999999999 }; - // SQL_NUMERIC_STRUCT size is always 19 bytes - const SQLINTEGER numericStructSize = 19; - - // Column 1 strLenOrInd: All non-null - vector col1StrLenOrInd(5, numericStructSize); + // Column 1: All non-null + // + vector col1StrLenOrInd(5, SQL_NUMERIC_STRUCT_SIZE); - // Column 2 strLenOrInd: Rows 1 and 3 are NULL (0-indexed) + // Column 2: Rows 1 and 3 are NULL + // vector col2StrLenOrInd = { - numericStructSize, // Row 0: valid - SQL_NULL_DATA, // Row 1: NULL - numericStructSize, // Row 2: valid - SQL_NULL_DATA, // Row 3: NULL - numericStructSize // Row 4: valid + SQL_NUMERIC_STRUCT_SIZE, // Row 0: valid + SQL_NULL_DATA, // Row 1: NULL + SQL_NUMERIC_STRUCT_SIZE, // Row 2: valid + SQL_NULL_DATA, // Row 3: NULL + SQL_NUMERIC_STRUCT_SIZE // Row 4: valid }; // Create ColumnInfo with decimal data + // ColumnInfo decimalInfo( "DecimalColumn1", column1Data, @@ -400,19 +379,14 @@ namespace ExtensionApiTest col2StrLenOrInd, vector{ SQL_NO_NULLS, SQL_NULLABLE }); - // Initialize session with 2 decimal columns, 0 parameters - // InitializeSession( decimalInfo.GetColumnsNumber(), 0, m_scriptString); - // Initialize the decimal columns - // InitializeColumns(&decimalInfo); - // Execute the script with decimal input columns - // This tests that SQL_NUMERIC_STRUCT columns can be passed to C# DataFrame + // Execute with decimal input columns (tests native to C# DataFrame conversion) // Execute( ColumnInfo::sm_rowsNumber, @@ -420,23 +394,19 @@ namespace ExtensionApiTest decimalInfo.m_strLen_or_Ind.data(), decimalInfo.m_columnNames); - // Validate that columns metadata is correct - // NOTE: SqlDecimal preserves input precision/scale metadata - // Column 0: DecimalColumn1, declared NUMERIC(19,4) + // Verify column metadata matches input (SqlDecimal preserves precision/scale) // GetResultColumn( 0, // columnNumber SQL_C_NUMERIC, // dataType - 19, // columnSize (declared precision from input) + 19, // columnSize (precision from input) 4, // decimalDigits (scale) SQL_NO_NULLS); // nullable - // Column 1: DecimalColumn2, declared NUMERIC(38,10) - // GetResultColumn( 1, // columnNumber SQL_C_NUMERIC, // dataType - 38, // columnSize (declared precision from input) + 38, // columnSize (precision from input) 10, // decimalDigits (scale) SQL_NULLABLE); // nullable } @@ -445,23 +415,15 @@ namespace ExtensionApiTest // Name: GetDecimalResultColumnsTest // // Description: - // Test decimal columns in output DataFrame to validate that C# can return - // SQL_NUMERIC_STRUCT values as result columns and the native layer properly - // retrieves them with correct precision/scale metadata. - // - // Tests that decimal columns returned from C# have: - // - Correct SQL_C_NUMERIC type - // - Preserved precision/scale from SqlDecimal metadata - // - Proper NULL handling in nullable columns + // Tests decimal result column conversion preserves precision/scale + // and proper NULL handling of nullable columns. // TEST_F(CSharpExtensionApiTests, GetDecimalResultColumnsTest) { using TestHelpers::CreateNumericStruct; - // Create decimal column data for testing output - // - // Result Column 1: NUMERIC(18, 2) - typical financial data - // Maximum value in data: 999999999999999.99 requires precision 18 + // Result Column 1: NUMERIC(18,2) - typical financial data + // Max value: 999999999999999.99 requires precision 18 // vector resultCol1 = { CreateNumericStruct(123456789, 18, 2, false), // 1234567.89 @@ -471,8 +433,8 @@ namespace ExtensionApiTest CreateNumericStruct(0, 18, 2, false) // 0.00 }; - // Result Column 2: NUMERIC(10, 5) - high precision decimals with NULLs - // Maximum value: 12345.67891 requires precision 10 + // Result Column 2: NUMERIC(10,5) - high precision with NULLs + // Max value: 12345.67891 requires precision 10 // vector resultCol2 = { CreateNumericStruct(1234567891, 10, 5, false), // 12345.67891 @@ -482,15 +444,13 @@ namespace ExtensionApiTest CreateNumericStruct(9999999999LL, 10, 5, true) // -99999.99999 }; - const SQLINTEGER numericStructSize = 19; - - vector col1StrLenOrInd(5, numericStructSize); + vector col1StrLenOrInd(5, SQL_NUMERIC_STRUCT_SIZE); vector col2StrLenOrInd = { - numericStructSize, + SQL_NUMERIC_STRUCT_SIZE, SQL_NULL_DATA, - numericStructSize, + SQL_NUMERIC_STRUCT_SIZE, SQL_NULL_DATA, - numericStructSize + SQL_NUMERIC_STRUCT_SIZE }; ColumnInfo decimalResultInfo( @@ -515,9 +475,8 @@ namespace ExtensionApiTest decimalResultInfo.m_strLen_or_Ind.data(), decimalResultInfo.m_columnNames); - // Validate result column metadata - // This tests that CSharpOutputDataSet.ExtractNumericColumn() preserves - // SqlDecimal precision/scale from the input data + // Verify result column metadata preserved from input + // CSharpOutputDataSet.ExtractNumericColumn() preserves SqlDecimal precision/scale // GetResultColumn( 0, // columnNumber @@ -538,27 +497,14 @@ namespace ExtensionApiTest // Name: DecimalColumnsWithNullsTest // // Description: - // Test decimal columns with mixed NULL and non-NULL values to validate proper - // NULL handling in decimal column data. - // - // WHY: NULL handling in decimal columns is complex because SQL_NUMERIC_STRUCT - // itself doesn't have a NULL indicator - NULL is tracked separately via - // strLenOrInd = SQL_NULL_DATA. - // - // WHAT: Tests 2 columns with different NULL patterns: - // - Column 1: First and last rows NULL (edge case for array bounds) - // - Column 2: Middle rows NULL (common pattern in sparse data) - // - // Validates that: - // - NULLs don't corrupt adjacent non-NULL values - // - Precision/scale calculation ignores NULL rows - // - Column remains nullable when NULLs present + // Tests decimal columns with mixed NULL and non-NULL values. SQL_NUMERIC_STRUCT doesn't + // have NULL indicator - NULL tracked via strLenOrInd = SQL_NULL_DATA separately. // TEST_F(CSharpExtensionApiTests, DecimalColumnsWithNullsTest) { using TestHelpers::CreateNumericStruct; - // Column 1: First and last NULL (NUMERIC(28, 6)) + // Column 1: First and last NULL - NUMERIC(28,6) // Pattern: NULL, 12345.678900, 98765.432100, 0.000001, NULL // vector col1Data = { @@ -569,7 +515,7 @@ namespace ExtensionApiTest SQL_NUMERIC_STRUCT() // NULL }; - // Column 2: Middle rows NULL (NUMERIC(15, 3)) + // Column 2: Middle rows NULL - NUMERIC(15,3) // Pattern: 999999.999, NULL, NULL, -123.456, 0.001 // vector col2Data = { @@ -580,26 +526,24 @@ namespace ExtensionApiTest CreateNumericStruct(1, 15, 3, false) // 0.001 }; - const SQLINTEGER numericStructSize = 19; - - // Column 1: Rows 0 and 4 are NULL + // Rows 0 and 4 NULL // vector col1StrLenOrInd = { SQL_NULL_DATA, - numericStructSize, - numericStructSize, - numericStructSize, + SQL_NUMERIC_STRUCT_SIZE, + SQL_NUMERIC_STRUCT_SIZE, + SQL_NUMERIC_STRUCT_SIZE, SQL_NULL_DATA }; - // Column 2: Rows 1 and 2 are NULL + // Rows 1 and 2 NULL // vector col2StrLenOrInd = { - numericStructSize, + SQL_NUMERIC_STRUCT_SIZE, SQL_NULL_DATA, SQL_NULL_DATA, - numericStructSize, - numericStructSize + SQL_NUMERIC_STRUCT_SIZE, + SQL_NUMERIC_STRUCT_SIZE }; ColumnInfo nullDecimalInfo( @@ -624,8 +568,7 @@ namespace ExtensionApiTest nullDecimalInfo.m_strLen_or_Ind.data(), nullDecimalInfo.m_columnNames); - // Validate metadata - both columns should be nullable - // NOTE: SqlDecimal preserves declared precision even when NULLs present + // Verify metadata: both columns nullable, precision preserved despite NULLs // GetResultColumn( 0, // columnNumber @@ -646,17 +589,8 @@ namespace ExtensionApiTest // Name: DecimalHighScaleTest // // Description: - // Test decimal values with high scale (29-38) to verify SqlDecimal handles - // extreme precision requirements correctly. - // - // Tests various high scale scenarios: - // - NUMERIC(38, 30): Very small fractional values - // - NUMERIC(38, 35): Extremely small fractional values (1 significant digit) - // - NUMERIC(38, 38): Maximum scale with minimum value (0.00...001) - // - NUMERIC(38, 29): Boundary case at scale = 29 - // - // PRACTICAL USAGE: While these extreme scales are rare in production databases, - // they're valid SQL Server types and must be handled gracefully: + // Tests high scale (29-38) decimal values. + // These are valid SQL Server types and must be handled correctly. // TEST_F(CSharpExtensionApiTests, DecimalHighScaleTest) { @@ -666,39 +600,32 @@ namespace ExtensionApiTest 0, // inputSchemaColumnsNumber 6); // parametersNumber - // Test NUMERIC(38, 29) - boundary case at scale = 29 - // Value: 0.00000000000000000000000000001 (1 at 29th decimal place) + // NUMERIC(38,29): Boundary at scale = 29 // SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(1, 38, 29, false); InitParam(0, p0); - // Test NUMERIC(38, 30) - scale = 30 - // Value: 0.000000000000000000000000000123 (123 scaled by 10^-30) + // NUMERIC(38,30): 123 at scale 30 // SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123, 38, 30, false); InitParam(1, p1); - // Test NUMERIC(38, 35) - very high scale - // Value: 0.00000000000000000000000000000000123 (3 significant digits) + // NUMERIC(38,35): Very high scale (3 significant digits) // SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(123, 38, 35, false); InitParam(2, p2); - // Test NUMERIC(38, 38) - maximum scale - // Value: 0.00000000000000000000000000000000000001 (1 at 38th decimal place) - // This is the smallest non-zero value representable in NUMERIC(38,38) + // NUMERIC(38,38): Maximum scale (smallest non-zero value) // SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1, 38, 38, false); InitParam(3, p3); - // Test negative value with high scale - // Value: -0.0000000000000000000000000000001 (negative, scale 31) + // NUMERIC(38,31): Negative with high scale // SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(1, 38, 31, true); InitParam(4, p4); - // Test zero with high scale (should remain zero regardless of scale) - // Value: 0.00000000000000000000000000000000 (zero, scale 32) + // NUMERIC(38,32): Zero with high scale (remains zero) // SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(0, 38, 32, false); InitParam(5, p5); From 0dd9ac0f10186252b81ac22cd18fc88d47aa1f48 Mon Sep 17 00:00:00 2001 From: Mohammad Hossein Namaki Date: Sat, 21 Mar 2026 11:11:49 -0700 Subject: [PATCH 13/13] self review --- .../src/managed/utils/SqlNumericHelper.cs | 108 ++++++------------ .../test/src/native/CSharpDecimalTests.cpp | 26 +++-- 2 files changed, 52 insertions(+), 82 deletions(-) diff --git a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs index 57932f4..56bf007 100644 --- a/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs +++ b/language-extensions/dotnet-core-CSharp/src/managed/utils/SqlNumericHelper.cs @@ -20,7 +20,7 @@ namespace Microsoft.SqlServer.CSharpExtension /// Helper class for converting between SQL Server NUMERIC/DECIMAL types and SqlDecimal. /// Provides ODBC-compatible SQL_NUMERIC_STRUCT definition and conversion methods. /// - /// IMPORTANT: This implementation uses SqlDecimal from Microsoft.Data.SqlClient which supports + /// IMPORTANT: We use SqlDecimal from Microsoft.Data.SqlClient which supports /// full SQL Server precision (38 digits). /// C# native decimal is NOT used as it has 28-digit limitations. /// @@ -172,6 +172,7 @@ private static void ValidatePrecisionAndScale(byte precision, sbyte scale, strin public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) { // Validate precision and scale before creating SqlDecimal + // ValidatePrecisionAndScale(numeric.precision, numeric.scale, nameof(numeric)); // SqlDecimal constructor requires int[] array (not byte[]) @@ -181,10 +182,12 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) int[] data = new int[INT32_ARRAY_SIZE]; // Fixed buffers are already fixed - access directly via pointer + // byte* valPtr = numeric.val; for (int i = 0; i < INT32_ARRAY_SIZE; i++) { // Convert each group of 4 bytes to an int32 (little-endian) + // int offset = i * 4; data[i] = valPtr[offset] | (valPtr[offset + 1] << 8) | @@ -194,113 +197,79 @@ public static unsafe SqlDecimal ToSqlDecimal(SqlNumericStruct numeric) // SqlDecimal constructor: // SqlDecimal(byte precision, byte scale, bool positive, int[] data) + // bool isPositive = numeric.sign == 1; // Note: SqlDecimal scale parameter is byte (unsigned), but SqlNumericStruct.scale is sbyte (signed) // SQL Server scale is always non-negative (0-38), so this cast is safe after validation + // byte scale = (byte)numeric.scale; return new SqlDecimal(numeric.precision, scale, isPositive, data); } /// - /// Converts SqlDecimal to SQL_NUMERIC_STRUCT for transfer to SQL Server. + /// Converts SqlDecimal to SQL_NUMERIC_STRUCT for ODBC transfer. /// /// The SqlDecimal value to convert. - /// - /// Total number of digits (1-38) in T-SQL DECIMAL(precision, scale) terms. - /// If null, uses SqlDecimal's intrinsic precision. - /// - /// - /// Number of digits after decimal point (0-precision) in T-SQL DECIMAL(precision, scale) terms. - /// If null, uses SqlDecimal's intrinsic scale. - /// - /// The equivalent SQL numeric structure for ODBC transfer. - /// - /// Thrown when precision or scale are out of valid T-SQL range: - /// - Precision must be 1-38 - /// - Scale must be between 0 and precision - /// - /// - /// Thrown when: - /// - Scale adjustment causes data loss (e.g., reducing scale removes non-zero decimal places). - /// - Value requires more precision than target after scale adjustment (e.g., 12345678.99 → DECIMAL(10,4) requires 12 digits). - /// + /// Target precision (1-38). If null, uses value's intrinsic precision. + /// Target scale (0-precision). If null, uses value's intrinsic scale. + /// ODBC-compatible SQL_NUMERIC_STRUCT. + /// Thrown when precision or scale constraints violated. + /// Thrown when scale adjustment loses data or value exceeds target precision. /// - /// When converting SqlDecimal.Null, returns a zero-initialized struct. - /// Caller must set the null indicator separately (e.g., strLenOrNullMap = SQL_NULL_DATA). - /// - /// Scale Adjustment: - /// - If targetScale > value.Scale: Adds trailing decimal zeros (no data loss). - /// - If targetScale < value.Scale: Truncates decimal places (may lose data, throws OverflowException). - /// - Use AdjustScale(value, scaleShift, round=false) for exact truncation behavior. - /// - /// Precision Validation: - /// - After scale adjustment, validates that the value fits within target precision. - /// - Example: Value 12345678.99 adjusted to scale=4 becomes 12345678.9900 (requires 12 digits), - /// which exceeds DECIMAL(10,4) precision limit (max 999999.9999). + /// NULL values return zero-initialized struct; caller must set null indicator (e.g., strLenOrNullMap = SQL_NULL_DATA). + /// Scale adjustment uses SqlDecimal.AdjustScale with fRound=false to prevent silent data loss. /// public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? precision = null, byte? scale = null) { // Use SqlDecimal's intrinsic precision/scale if not specified + // byte targetPrecision = precision ?? value.Precision; byte targetScale = scale ?? value.Scale; - // Validate target precision and scale + // Validate target precision and scale constraints + // ValidatePrecisionAndScale(targetPrecision, (sbyte)targetScale, nameof(value)); - // Handle SqlDecimal.Null + // NULL values return zero-initialized struct; caller sets null indicator separately + // if (value.IsNull) { - // Return a zero-initialized struct - caller should set null indicator separately - // C# structs are zero-initialized by default (val array is already zeroed) return new SqlNumericStruct { precision = targetPrecision, scale = (sbyte)targetScale, - sign = 1 // Positive sign convention for NULL placeholders + sign = 1 }; } - // Adjust scale if needed (SqlDecimal has AdjustScale method) + // Adjust scale if needed to match target + // SqlDecimal adjustedValue = value; - int actualPrecisionNeeded = value.Precision; - if (targetScale != value.Scale) { - // AdjustScale returns a new SqlDecimal with the specified scale - // positive scaleShift adds decimal places, negative removes them int scaleShift = targetScale - value.Scale; try { + // fRound=false ensures no silent data loss when reducing scale + // adjustedValue = SqlDecimal.AdjustScale(value, scaleShift, fRound: false); } catch (OverflowException ex) { throw new OverflowException( - $"Cannot adjust SqlDecimal scale from {value.Scale} to {targetScale} without data loss. " + - $"Original value: {value}", ex); + $"Cannot adjust scale from {value.Scale} to {targetScale} without data loss. Value: {value}", ex); } - - // CRITICAL: SqlDecimal.AdjustScale() does NOT update the Precision property - // We must calculate the actual precision needed after scale adjustment - // When increasing scale, we add trailing zeros which increases precision requirement - // Example: value=12345678.99 (precision=10, scale=2) - // → AdjustScale(+2) → 12345678.9900 (needs precision=12, but Precision property still=10) - // Formula: actualPrecisionNeeded = originalPrecision + scaleShift - actualPrecisionNeeded = value.Precision + scaleShift; } - // CRITICAL: Validate that adjusted value fits within target precision - // SQL Server DECIMAL(p,s): p=total digits, s=fractional digits - // After scale adjustment, value may require more precision than declared - // Example: 12345678.99 (10 digits) → DECIMAL(10,4) → 12345678.9900 (12 digits) = OVERFLOW - if (actualPrecisionNeeded > targetPrecision) + // Validate adjusted value fits within target precision + // + if (adjustedValue.Precision > targetPrecision) { throw new OverflowException( - $"Value {value} requires precision {actualPrecisionNeeded} after adjusting scale from {value.Scale} to {targetScale}, " + - $"but target DECIMAL({targetPrecision},{targetScale}) allows only {targetPrecision} digits."); + $"Value requires {adjustedValue.Precision} digits but target DECIMAL({targetPrecision},{targetScale}) allows only {targetPrecision}."); } SqlNumericStruct result = new SqlNumericStruct @@ -310,22 +279,19 @@ public static unsafe SqlNumericStruct FromSqlDecimal(SqlDecimal value, byte? pre sign = (byte)(adjustedValue.IsPositive ? 1 : 0) }; - // SqlDecimal stores data as int[4] array (128 bits total) - // We need to convert to byte[16] for SqlNumericStruct + // Convert SqlDecimal's int[4] data to byte[16] for ODBC struct (little-endian) + // int[] data = adjustedValue.Data; - - // Fixed buffer is already fixed - access directly via pointer byte* valPtr = result.val; - // SqlDecimal.Data always returns exactly 4 int32s, so data.Length check is redundant + for (int i = 0; i < INT32_ARRAY_SIZE; i++) { - // Convert each int32 to 4 bytes (little-endian) - int offset = i * 4; int value32 = data[i]; - valPtr[offset] = (byte)(value32 & 0xFF); - valPtr[offset + 1] = (byte)((value32 >> 8) & 0xFF); - valPtr[offset + 2] = (byte)((value32 >> 16) & 0xFF); - valPtr[offset + 3] = (byte)((value32 >> 24) & 0xFF); + int offset = i * 4; + valPtr[offset] = (byte)value32; + valPtr[offset + 1] = (byte)(value32 >> 8); + valPtr[offset + 2] = (byte)(value32 >> 16); + valPtr[offset + 3] = (byte)(value32 >> 24); } return result; diff --git a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp index f762bf6..0559479 100644 --- a/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp +++ b/language-extensions/dotnet-core-CSharp/test/src/native/CSharpDecimalTests.cpp @@ -18,6 +18,10 @@ namespace ExtensionApiTest // const SQLINTEGER SQL_NUMERIC_STRUCT_SIZE = 19; + // SQL Server maximum NUMERIC/DECIMAL precision + // + const SQLINTEGER SQL_NUMERIC_MAX_PRECISION = 38; + //---------------------------------------------------------------------------------------------- // Name: InitNumericParamTest // @@ -165,7 +169,7 @@ namespace ExtensionApiTest // Validate precision/scale/sign integrity // EXPECT_GE(numericValue->precision, 1); - EXPECT_LE(numericValue->precision, 38); + EXPECT_LE(numericValue->precision, SQL_NUMERIC_MAX_PRECISION); EXPECT_GE(numericValue->scale, 0); EXPECT_LE(numericValue->scale, numericValue->precision); EXPECT_TRUE(numericValue->sign == 0 || numericValue->sign == 1); @@ -199,12 +203,12 @@ namespace ExtensionApiTest // NUMERIC(38,0): 12345678901234567 (max precision, no scale) // - SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, 38, 0, false); + SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(12345678901234567LL, SQL_NUMERIC_MAX_PRECISION, 0, false); InitParam(2, p2); // NUMERIC(38,38): 0.xxx (max precision, max scale) // - SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(123456789012345678LL, 38, 38, false); + SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(123456789012345678LL, SQL_NUMERIC_MAX_PRECISION, SQL_NUMERIC_MAX_PRECISION, false); InitParam(3, p3); // NUMERIC(19,4): SQL Server MONEY compatible @@ -269,12 +273,12 @@ namespace ExtensionApiTest // Large positive: 999999999999999999 (near NUMERIC(38) max) // - SQL_NUMERIC_STRUCT largePos = CreateNumericStruct(999999999999999999LL, 38, 0, false); + SQL_NUMERIC_STRUCT largePos = CreateNumericStruct(999999999999999999LL, SQL_NUMERIC_MAX_PRECISION, 0, false); InitParam(3, largePos); // Large negative: -999999999999999999 // - SQL_NUMERIC_STRUCT largeNeg = CreateNumericStruct(999999999999999999LL, 38, 0, true); + SQL_NUMERIC_STRUCT largeNeg = CreateNumericStruct(999999999999999999LL, SQL_NUMERIC_MAX_PRECISION, 0, true); InitParam(4, largeNeg); // Maximum scale: 0.000000000000000001 (10^-18) @@ -602,32 +606,32 @@ namespace ExtensionApiTest // NUMERIC(38,29): Boundary at scale = 29 // - SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(1, 38, 29, false); + SQL_NUMERIC_STRUCT p0 = CreateNumericStruct(1, SQL_NUMERIC_MAX_PRECISION, 29, false); InitParam(0, p0); // NUMERIC(38,30): 123 at scale 30 // - SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123, 38, 30, false); + SQL_NUMERIC_STRUCT p1 = CreateNumericStruct(123, SQL_NUMERIC_MAX_PRECISION, 30, false); InitParam(1, p1); // NUMERIC(38,35): Very high scale (3 significant digits) // - SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(123, 38, 35, false); + SQL_NUMERIC_STRUCT p2 = CreateNumericStruct(123, SQL_NUMERIC_MAX_PRECISION, 35, false); InitParam(2, p2); // NUMERIC(38,38): Maximum scale (smallest non-zero value) // - SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1, 38, 38, false); + SQL_NUMERIC_STRUCT p3 = CreateNumericStruct(1, SQL_NUMERIC_MAX_PRECISION, SQL_NUMERIC_MAX_PRECISION, false); InitParam(3, p3); // NUMERIC(38,31): Negative with high scale // - SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(1, 38, 31, true); + SQL_NUMERIC_STRUCT p4 = CreateNumericStruct(1, SQL_NUMERIC_MAX_PRECISION, 31, true); InitParam(4, p4); // NUMERIC(38,32): Zero with high scale (remains zero) // - SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(0, 38, 32, false); + SQL_NUMERIC_STRUCT p5 = CreateNumericStruct(0, SQL_NUMERIC_MAX_PRECISION, 32, false); InitParam(5, p5); } }