Added support for cl_ext_float_atomics in c11_atomics load test along with atomic_half type (#2297)

Related to #2142, according to the work plan, extending CBasicTestLoad
with support for atomic_half.
This commit is contained in:
Marcin Hajder
2025-06-10 17:43:02 +02:00
committed by GitHub
parent 3233d2089f
commit b79a8a2f42
2 changed files with 54 additions and 6 deletions

View File

@@ -164,7 +164,10 @@ CorrespondingType host_atomic_load(volatile AtomicType *a,
TExplicitMemoryOrderType order)
{
#if defined( _MSC_VER ) || (defined( __INTEL_COMPILER ) && defined(WIN32))
return InterlockedExchangeAdd(a, 0);
if (sizeof(CorrespondingType) == 2)
auto prev = InterlockedOr16(reinterpret_cast<volatile SHORT *>(a), 0);
else
return InterlockedExchangeAdd(reinterpret_cast<volatile LONG *>(a), 0);
#elif defined(__GNUC__)
return __sync_add_and_fetch(a, 0);
#else

View File

@@ -328,6 +328,7 @@ public:
HostDataType>::MemoryOrderScopeStr;
using CBasicTestMemOrderScope<HostAtomicType, HostDataType>::MemoryScopeStr;
using CBasicTest<HostAtomicType, HostDataType>::CheckCapabilities;
using CBasicTestMemOrderScope<HostAtomicType, HostDataType>::LocalMemory;
CBasicTestLoad(TExplicitAtomicType dataType, bool useSVM)
: CBasicTestMemOrderScope<HostAtomicType, HostDataType>(dataType,
useSVM)
@@ -349,6 +350,21 @@ public:
== TEST_SKIPPED_ITSELF)
return 0; // skip test - not applicable
if (CBasicTestMemOrderScope<HostAtomicType, HostDataType>::DataType()
._type
== TYPE_ATOMIC_HALF)
{
if (LocalMemory()
&& (gHalfAtomicCaps & CL_DEVICE_LOCAL_FP_ATOMIC_LOAD_STORE_EXT)
== 0)
return 0; // skip test - not applicable
if (!LocalMemory()
&& (gHalfAtomicCaps & CL_DEVICE_GLOBAL_FP_ATOMIC_LOAD_STORE_EXT)
== 0)
return 0;
}
return CBasicTestMemOrderScope<
HostAtomicType, HostDataType>::ExecuteSingleTest(deviceID, context,
queue);
@@ -382,7 +398,13 @@ public:
HostDataType *startRefValues,
cl_uint whichDestValue)
{
expected = (HostDataType)whichDestValue;
if (CBasicTestMemOrderScope<HostAtomicType, HostDataType>::DataType()
._type
!= TYPE_ATOMIC_HALF)
expected = (HostDataType)whichDestValue;
else
expected = cl_half_from_float(static_cast<float>(whichDestValue),
gHalfRoundingMode);
return true;
}
virtual bool VerifyRefs(bool &correct, cl_uint threadCount,
@@ -392,11 +414,25 @@ public:
correct = true;
for (cl_uint i = 0; i < threadCount; i++)
{
if (refValues[i] != (HostDataType)i)
if constexpr (std::is_same<HostDataType, cl_half>::value)
{
log_error("Invalid value for thread %u\n", (cl_uint)i);
correct = false;
return true;
HostDataType test = cl_half_from_float(static_cast<float>(i),
gHalfRoundingMode);
if (refValues[i] != test)
{
log_error("Invalid value for thread %u\n", (cl_uint)i);
correct = false;
return true;
}
}
else
{
if (refValues[i] != (HostDataType)i)
{
log_error("Invalid value for thread %u\n", (cl_uint)i);
correct = false;
return true;
}
}
}
return true;
@@ -431,6 +467,15 @@ static int test_atomic_load_generic(cl_device_id deviceID, cl_context context,
TYPE_ATOMIC_DOUBLE, useSVM);
EXECUTE_TEST(error,
test_double.Execute(deviceID, context, queue, num_elements));
if (gFloatAtomicsSupported)
{
CBasicTestLoad<HOST_ATOMIC_HALF, HOST_HALF> test_half(TYPE_ATOMIC_HALF,
useSVM);
EXECUTE_TEST(error,
test_half.Execute(deviceID, context, queue, num_elements));
}
if (AtomicTypeInfo(TYPE_ATOMIC_SIZE_T).Size(deviceID) == 4)
{
CBasicTestLoad<HOST_ATOMIC_INTPTR_T32, HOST_INTPTR_T32> test_intptr_t(