Added support for cl_ext_float_atomics in CBasicTestFetchMinSpecialFloats with atomic_float (#2391)

Related to #2142, according to the work plan, extending
CBasicTestFetchMinSpecialFloats with support for atomic_float.
This commit is contained in:
Marcin Hajder
2026-03-10 16:41:40 +01:00
committed by GitHub
parent a56e8ee92b
commit 6506421614
3 changed files with 310 additions and 28 deletions

View File

@@ -22,6 +22,7 @@
#include "host_atomics.h" #include "host_atomics.h"
#include <algorithm>
#include <iomanip> #include <iomanip>
#include <limits> #include <limits>
#include <sstream> #include <sstream>
@@ -96,6 +97,37 @@ extern cl_int getSupportedMemoryOrdersAndScopes(
cl_device_id device, std::vector<TExplicitMemoryOrderType> &memoryOrders, cl_device_id device, std::vector<TExplicitMemoryOrderType> &memoryOrders,
std::vector<TExplicitMemoryScopeType> &memoryScopes); std::vector<TExplicitMemoryScopeType> &memoryScopes);
union FloatIntUnion {
float f;
uint32_t i;
};
template <typename HostDataType> bool is_qnan(const HostDataType &value)
{
if constexpr (std::is_same_v<HostDataType, float>)
{
FloatIntUnion u;
u.f = value;
if ((u.i & 0x7F800000) != 0x7F800000) return false;
return (u.i & 0x00400000) != 0;
}
else
return std::isnan(value);
}
template <typename HostDataType> bool is_snan(const HostDataType &value)
{
if constexpr (std::is_same_v<HostDataType, float>)
{
FloatIntUnion u;
u.f = value;
if ((u.i & 0x7F800000) != 0x7F800000) return false;
return (u.i & 0x00400000) == 0;
}
else
return std::isnan(value);
}
class AtomicTypeInfo { class AtomicTypeInfo {
public: public:
TExplicitAtomicType _type; TExplicitAtomicType _type;
@@ -187,6 +219,7 @@ public:
virtual bool virtual bool
IsTestNotAsExpected(const HostDataType &expected, IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues, const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) cl_uint whichDestValue)
{ {
return expected return expected
@@ -928,7 +961,7 @@ CBasicTest<HostAtomicType, HostDataType>::ProgramHeader(cl_uint maxNumDestItems)
if constexpr ( if constexpr (
std::is_same_v< std::is_same_v<
HostDataType, HostDataType,
HOST_ATOMIC_DOUBLE> || std::is_same_v<HostDataType, HOST_ATOMIC_FLOAT>) HOST_DOUBLE> || std::is_same_v<HostDataType, HOST_FLOAT>)
{ {
if (std::isinf(_startValue)) if (std::isinf(_startValue))
ss << (_startValue < 0 ? "-" : "") << "INFINITY"; ss << (_startValue < 0 ? "-" : "") << "INFINITY";
@@ -1505,7 +1538,7 @@ int CBasicTest<HostAtomicType, HostDataType>::ExecuteSingleTest(
startRefValues.size() ? &startRefValues[0] : 0, i)) startRefValues.size() ? &startRefValues[0] : 0, i))
break; // no expected value function provided break; // no expected value function provided
if (IsTestNotAsExpected(expected, destItems, i)) if (IsTestNotAsExpected(expected, destItems, startRefValues, i))
{ {
std::stringstream logLine; std::stringstream logLine;
logLine << "ERROR: Result " << i logLine << "ERROR: Result " << i

View File

@@ -18,6 +18,7 @@
#include "harness/testHarness.h" #include "harness/testHarness.h"
#include <mutex> #include <mutex>
#include "CL/cl_half.h" #include "CL/cl_half.h"
#ifdef WIN32 #ifdef WIN32

View File

@@ -1329,6 +1329,7 @@ public:
} }
bool IsTestNotAsExpected(const HostDataType &expected, bool IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues, const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) override cl_uint whichDestValue) override
{ {
if constexpr (is_host_fp_v<HostDataType>) if constexpr (is_host_fp_v<HostDataType>)
@@ -1343,6 +1344,7 @@ public:
return CBasicTestMemOrderScope< return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::IsTestNotAsExpected(expected, HostAtomicType, HostDataType>::IsTestNotAsExpected(expected,
testValues, testValues,
startRefValues,
whichDestValue); whichDestValue);
} }
bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues, bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues,
@@ -1653,6 +1655,7 @@ public:
} }
bool IsTestNotAsExpected(const HostDataType &expected, bool IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues, const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) override cl_uint whichDestValue) override
{ {
if constexpr (std::is_same_v<HostDataType, HOST_HALF>) if constexpr (std::is_same_v<HostDataType, HOST_HALF>)
@@ -1670,6 +1673,7 @@ public:
return CBasicTestMemOrderScope< return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::IsTestNotAsExpected(expected, HostAtomicType, HostDataType>::IsTestNotAsExpected(expected,
testValues, testValues,
startRefValues,
whichDestValue); whichDestValue);
} }
int ExecuteSingleTest(cl_device_id deviceID, cl_context context, int ExecuteSingleTest(cl_device_id deviceID, cl_context context,
@@ -1776,38 +1780,23 @@ static int test_atomic_fetch_add_generic(cl_device_id deviceID,
if (gFloatAtomicsSupported) if (gFloatAtomicsSupported)
{ {
auto spec_vals_fp64 =
CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_DOUBLE,
HOST_DOUBLE>::GetSpecialValues();
CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_DOUBLE, HOST_DOUBLE> CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_DOUBLE, HOST_DOUBLE>
test_spec_double(TYPE_ATOMIC_DOUBLE, useSVM); test_spec_double(TYPE_ATOMIC_DOUBLE, useSVM);
EXECUTE_TEST(error, EXECUTE_TEST(
test_spec_double.Execute(deviceID, context, queue, error,
spec_vals_fp64.size() test_spec_double.Execute(deviceID, context, queue, num_elements));
* spec_vals_fp64.size()));
auto spec_vals_fp32 =
CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_FLOAT,
HOST_FLOAT>::GetSpecialValues();
CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_FLOAT, HOST_FLOAT> CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_FLOAT, HOST_FLOAT>
test_spec_float(TYPE_ATOMIC_FLOAT, useSVM); test_spec_float(TYPE_ATOMIC_FLOAT, useSVM);
EXECUTE_TEST(error, EXECUTE_TEST(
test_spec_float.Execute(deviceID, context, queue, error,
spec_vals_fp32.size() test_spec_float.Execute(deviceID, context, queue, num_elements));
* spec_vals_fp32.size()));
auto spec_vals_halfs =
CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_HALF,
HOST_HALF>::GetSpecialValues();
CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_HALF, HOST_HALF> CBasicTestFetchAddSpecialFloats<HOST_ATOMIC_HALF, HOST_HALF>
test_spec_half(TYPE_ATOMIC_HALF, useSVM); test_spec_half(TYPE_ATOMIC_HALF, useSVM);
EXECUTE_TEST(error, EXECUTE_TEST(
test_spec_half.Execute(deviceID, context, queue, error,
spec_vals_halfs.size() test_spec_half.Execute(deviceID, context, queue, num_elements));
* spec_vals_halfs.size()));
CBasicTestFetchAdd<HOST_ATOMIC_HALF, HOST_HALF> test_half( CBasicTestFetchAdd<HOST_ATOMIC_HALF, HOST_HALF> test_half(
TYPE_ATOMIC_HALF, useSVM); TYPE_ATOMIC_HALF, useSVM);
@@ -2070,6 +2059,7 @@ public:
} }
bool IsTestNotAsExpected(const HostDataType &expected, bool IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues, const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) override cl_uint whichDestValue) override
{ {
if constexpr (is_host_fp_v<HostDataType>) if constexpr (is_host_fp_v<HostDataType>)
@@ -2084,6 +2074,7 @@ public:
return CBasicTestMemOrderScope< return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::IsTestNotAsExpected(expected, HostAtomicType, HostDataType>::IsTestNotAsExpected(expected,
testValues, testValues,
startRefValues,
whichDestValue); whichDestValue);
} }
bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues, bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues,
@@ -3172,18 +3163,21 @@ public:
} }
bool IsTestNotAsExpected(const HostDataType &expected, bool IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues, const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) override cl_uint whichDestValue) override
{ {
if constexpr (is_host_fp_v<HostDataType>) if constexpr (is_host_fp_v<HostDataType>)
{ {
if (whichDestValue == 0) if (whichDestValue == 0)
return CBasicTestMemOrderScope<HostAtomicType, HostDataType>:: return CBasicTestMemOrderScope<HostAtomicType, HostDataType>::
IsTestNotAsExpected(expected, testValues, whichDestValue); IsTestNotAsExpected(expected, testValues, startRefValues,
whichDestValue);
return false; // ignore all but 0 which stores final result return false; // ignore all but 0 which stores final result
} }
return CBasicTestMemOrderScope< return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::IsTestNotAsExpected(expected, HostAtomicType, HostDataType>::IsTestNotAsExpected(expected,
testValues, testValues,
startRefValues,
whichDestValue); whichDestValue);
} }
bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues, bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues,
@@ -3265,6 +3259,251 @@ public:
} }
}; };
template <typename HostAtomicType, typename HostDataType>
class CBasicTestFetchMinSpecialFloats
: public CBasicTestMemOrderScope<HostAtomicType, HostDataType> {
std::vector<HostDataType> ref_vals;
public:
using CBasicTestMemOrderScope<HostAtomicType, HostDataType>::StartValue;
using CBasicTestMemOrderScope<HostAtomicType, HostDataType>::DataType;
using CBasicTestMemOrderScope<HostAtomicType, HostDataType>::MemoryOrder;
using CBasicTestMemOrderScope<HostAtomicType,
HostDataType>::MemoryOrderScopeStr;
using CBasicTestMemOrderScope<HostAtomicType, HostDataType>::LocalMemory;
CBasicTestFetchMinSpecialFloats(TExplicitAtomicType dataType, bool useSVM)
: CBasicTestMemOrderScope<HostAtomicType, HostDataType>(dataType,
useSVM)
{
// StartValue is used as an index divisor in the following test
// logic. It is set to the number of special values, which allows
// threads to be mapped deterministically onto the input data array.
// This enables repeated add operations arranged so that every
// special value is added to every other one (“all-to-all”).
if constexpr (std::is_same_v<HostDataType, HOST_FLOAT>)
{
auto spec_vals = GetSpecialValues();
StartValue(spec_vals.size());
CBasicTestMemOrderScope<HostAtomicType,
HostDataType>::OldValueCheck(false);
}
}
static std::vector<HostDataType> &GetSpecialValues()
{
static std::vector<HostDataType> special_values;
if constexpr (std::is_same_v<HostDataType, HOST_FLOAT>)
{
const HostDataType test_value_zero =
static_cast<HostDataType>(0.0f);
const HostDataType test_value_minus_zero =
static_cast<HostDataType>(-0.0f);
const HostDataType test_value_without_fraction =
static_cast<HostDataType>(2.0f);
const HostDataType test_value_with_fraction =
static_cast<HostDataType>(2.2f);
if (special_values.empty())
{
special_values = {
static_cast<HostDataType>(test_value_minus_zero),
static_cast<HostDataType>(test_value_zero),
static_cast<HostDataType>(test_value_without_fraction),
static_cast<HostDataType>(test_value_with_fraction),
std::numeric_limits<HostDataType>::infinity(),
std::numeric_limits<HostDataType>::quiet_NaN(),
std::numeric_limits<HostDataType>::signaling_NaN(),
-std::numeric_limits<HostDataType>::infinity(),
-std::numeric_limits<HostDataType>::quiet_NaN(),
-std::numeric_limits<HostDataType>::signaling_NaN(),
std::numeric_limits<HostDataType>::lowest(),
std::numeric_limits<HostDataType>::min(),
std::numeric_limits<HostDataType>::max(),
};
if (0 != (CL_FP_DENORM & gFloatFPConfig))
{
special_values.push_back(
std::numeric_limits<HostDataType>::denorm_min());
}
}
}
return special_values;
}
bool GenerateRefs(cl_uint threadCount, HostDataType *startRefValues,
MTdata d) override
{
if constexpr (std::is_same_v<HostDataType, HOST_FLOAT>)
{
if (threadCount > ref_vals.size())
{
ref_vals.assign(threadCount, 0);
auto spec_vals = GetSpecialValues();
cl_uint total_cnt = 0;
while (total_cnt < threadCount)
{
cl_uint block_cnt =
std::min((cl_int)(threadCount - total_cnt),
(cl_int)spec_vals.size());
memcpy(&ref_vals.at(total_cnt), spec_vals.data(),
sizeof(HostDataType) * block_cnt);
total_cnt += block_cnt;
}
}
memcpy(startRefValues, ref_vals.data(),
sizeof(HostDataType) * threadCount);
return true;
}
return false;
}
std::string ProgramCore() override
{
// The start_value variable (set by StartValue) is used
// as a divisor of the thread index when selecting the operand for
// atomic_fetch_add. This groups threads into blocks corresponding
// to the number of special values and implements an “all-to-all”
// addition pattern. As a result, each destination element is
// updated using different combinations of input values, enabling
// consistent comparison between host and device execution.
std::string memoryOrderScope = MemoryOrderScopeStr();
std::string postfix(memoryOrderScope.empty() ? "" : "_explicit");
return std::string(DataType().AddSubOperandTypeName())
+ " start_value = atomic_load_explicit(destMemory+tid, "
"memory_order_relaxed, memory_scope_work_group);\n"
" atomic_store_explicit(destMemory+tid, oldValues[tid], "
"memory_order_relaxed, memory_scope_work_group);\n"
" atomic_fetch_min"
+ postfix + "(&destMemory[tid], ("
+ DataType().AddSubOperandTypeName()
+ ")oldValues[tid/(int)start_value]" + memoryOrderScope + ");\n";
}
void HostFunction(cl_uint tid, cl_uint threadCount,
volatile HostAtomicType *destMemory,
HostDataType *oldValues) override
{
auto spec_vals = GetSpecialValues();
host_atomic_store(&destMemory[tid], (HostDataType)oldValues[tid],
MEMORY_ORDER_SEQ_CST);
host_atomic_fetch_min(&destMemory[tid],
(HostDataType)oldValues[tid / spec_vals.size()],
MemoryOrder());
}
bool ExpectedValue(HostDataType &expected, cl_uint threadCount,
HostDataType *startRefValues,
cl_uint whichDestValue) override
{
expected = StartValue();
if constexpr (std::is_same_v<HostDataType, HOST_FLOAT>)
{
auto spec_vals = GetSpecialValues();
expected =
std::min(startRefValues[whichDestValue],
startRefValues[whichDestValue / spec_vals.size()]);
}
return true;
}
bool IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) override
{
if (testValues[whichDestValue] != expected)
{
auto spec_vals = GetSpecialValues();
// special cases
// min(-0, +0) = min(+0, -0) = +0 or -0,
if (((startRefValues[whichDestValue] == -0.f)
&& (startRefValues[whichDestValue / spec_vals.size()] == 0.f))
|| ((startRefValues[whichDestValue] == 0.f)
&& (startRefValues[whichDestValue / spec_vals.size()]
== -0.f)))
return false;
else if (is_qnan(startRefValues[whichDestValue / spec_vals.size()])
|| is_qnan(startRefValues[whichDestValue]))
{
// min(x, qNaN) = min(qNaN, x) = x,
// min(qNaN, qNaN) = qNaN,
if (is_qnan(startRefValues[whichDestValue / spec_vals.size()])
&& is_qnan(startRefValues[whichDestValue]))
return !is_qnan(testValues[whichDestValue]);
else if (is_qnan(
startRefValues[whichDestValue / spec_vals.size()]))
return !std::isnan(testValues[whichDestValue])
&& testValues[whichDestValue]
!= startRefValues[whichDestValue]; // NaN != NaN always
// true
else
return !std::isnan(testValues[whichDestValue])
&& testValues[whichDestValue]
!= startRefValues[whichDestValue / spec_vals.size()];
}
else if (is_snan(startRefValues[whichDestValue / spec_vals.size()])
|| is_snan(startRefValues[whichDestValue]))
{
// min(x, sNaN) = min(sNaN, x) = NaN or x, and
// min(NaN, sNaN) = min(sNaN, NaN) = NaN
if (std::isnan(testValues[whichDestValue])
|| testValues[whichDestValue]
== startRefValues[whichDestValue]
|| testValues[whichDestValue]
== startRefValues[whichDestValue / spec_vals.size()])
return false;
}
}
return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::IsTestNotAsExpected(expected,
testValues,
startRefValues,
whichDestValue);
}
int ExecuteSingleTest(cl_device_id deviceID, cl_context context,
cl_command_queue queue) override
{
if constexpr (std::is_same_v<HostDataType, HOST_FLOAT>)
{
if (LocalMemory()
&& (gFloatAtomicCaps & CL_DEVICE_LOCAL_FP_ATOMIC_MIN_MAX_EXT)
== 0)
return 0; // skip test - not applicable
if (!LocalMemory()
&& (gFloatAtomicCaps & CL_DEVICE_GLOBAL_FP_ATOMIC_MIN_MAX_EXT)
== 0)
return 0;
if (!CBasicTestMemOrderScope<HostAtomicType,
HostDataType>::LocalMemory()
&& CBasicTestMemOrderScope<HostAtomicType,
HostDataType>::DeclaredInProgram())
{
if ((gFloatFPConfig & CL_FP_INF_NAN) == 0) return 0;
}
}
return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::ExecuteSingleTest(deviceID, context,
queue);
}
cl_uint NumResults(cl_uint threadCount, cl_device_id deviceID) override
{
if constexpr (std::is_same_v<HostDataType, HOST_FLOAT>)
{
return threadCount;
}
return CBasicTestMemOrderScope<HostAtomicType,
HostDataType>::NumResults(threadCount,
deviceID);
}
};
static int test_atomic_fetch_min_generic(cl_device_id deviceID, static int test_atomic_fetch_min_generic(cl_device_id deviceID,
cl_context context, cl_context context,
cl_command_queue queue, cl_command_queue queue,
@@ -3290,6 +3529,12 @@ static int test_atomic_fetch_min_generic(cl_device_id deviceID,
if (gFloatAtomicsSupported) if (gFloatAtomicsSupported)
{ {
CBasicTestFetchMinSpecialFloats<HOST_ATOMIC_FLOAT, HOST_FLOAT>
test_spec_float(TYPE_ATOMIC_FLOAT, useSVM);
EXECUTE_TEST(
error,
test_spec_float.Execute(deviceID, context, queue, num_elements));
CBasicTestFetchMin<HOST_ATOMIC_DOUBLE, HOST_DOUBLE> test_double( CBasicTestFetchMin<HOST_ATOMIC_DOUBLE, HOST_DOUBLE> test_double(
TYPE_ATOMIC_DOUBLE, useSVM); TYPE_ATOMIC_DOUBLE, useSVM);
EXECUTE_TEST( EXECUTE_TEST(
@@ -3478,18 +3723,21 @@ public:
} }
bool IsTestNotAsExpected(const HostDataType &expected, bool IsTestNotAsExpected(const HostDataType &expected,
const std::vector<HostAtomicType> &testValues, const std::vector<HostAtomicType> &testValues,
const std::vector<HostDataType> &startRefValues,
cl_uint whichDestValue) override cl_uint whichDestValue) override
{ {
if constexpr (is_host_fp_v<HostDataType>) if constexpr (is_host_fp_v<HostDataType>)
{ {
if (whichDestValue == 0) if (whichDestValue == 0)
return CBasicTestMemOrderScope<HostAtomicType, HostDataType>:: return CBasicTestMemOrderScope<HostAtomicType, HostDataType>::
IsTestNotAsExpected(expected, testValues, whichDestValue); IsTestNotAsExpected(expected, testValues, startRefValues,
whichDestValue);
return false; // ignore all but 0 which stores final result return false; // ignore all but 0 which stores final result
} }
return CBasicTestMemOrderScope< return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::IsTestNotAsExpected(expected, HostAtomicType, HostDataType>::IsTestNotAsExpected(expected,
testValues, testValues,
startRefValues,
whichDestValue); whichDestValue);
} }
bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues, bool VerifyRefs(bool &correct, cl_uint threadCount, HostDataType *refValues,