Testing Existing SVM APIs remaining APIs tests (#2441)

Tests for the following APIs:
* clEnqueueSVMMemcpy
* clEnqueueSVMMemFill
* clEnqueueSVMMap/clEnqueueSVMUnMap
* clEnqueueSVMMigrateMem
* clEnqueueSVMMemFree
* clSetKernelArgSVMPointer
* clSetKernelExecInfo

---------

Signed-off-by: John Kesapides <john.kesapides@arm.com>
This commit is contained in:
John Kesapides
2025-07-22 18:19:20 +01:00
committed by GitHub
parent 69dc9d4d8f
commit b646ba5cae
10 changed files with 1657 additions and 1 deletions

View File

@@ -21,6 +21,13 @@ set(${MODULE_NAME}_SOURCES
test_unified_svm_apis.cpp
test_unified_svm_api_query_defaults.cpp
test_unified_svm_api_suggested_type_index.cpp
test_unified_svm_mem_cpy.cpp
test_unified_svm_mem_fill.cpp
test_unified_svm_migrate.cpp
test_unified_svm_free.cpp
test_unified_svm_setarg.cpp
test_unified_svm_map_unmap.cpp
test_unified_svm_execinfo.cpp
)
set_gnulike_module_compile_flags("-Wno-sometimes-uninitialized -Wno-sign-compare")

View File

@@ -23,6 +23,7 @@
#include "harness/typeWrappers.h"
#include <vector>
#include <string>
#include <algorithm>
#if (defined(_WIN32) || defined(_WIN64)) && defined(_MSC_VER)
#include <windows.h>
@@ -85,5 +86,25 @@ extern cl_int create_cl_objects(cl_device_id device_from_harness, const char** p
extern const char *linked_list_create_and_verify_kernels[];
static inline cl_int check_event_type(cl_event event,
cl_command_type expectedCommandType)
{
cl_command_type commandType;
cl_int error = clGetEventInfo(event, CL_EVENT_COMMAND_TYPE,
sizeof(cl_command_type), &commandType, NULL);
test_error(error, "clGetEventInfo failed");
return commandType == expectedCommandType ? CL_SUCCESS : CL_INVALID_VALUE;
}
static inline void generate_random_inputs(std::vector<cl_uchar> &v, MTdata d)
{
auto random_generator = [&d]() {
return static_cast<cl_uchar>(genrand_int32(d));
};
std::generate(v.begin(), v.end(), random_generator);
}
#endif // #ifndef __COMMON_H__

View File

@@ -0,0 +1,310 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
#include <vector>
struct UnifiedSVMExecInfo : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test reading from USM pointer indirectly using clSetKernelExecInfo.
// The test will perform a memcpy on the device.
cl_int test_svm_exec_info_read(USVMWrapper<cl_uchar> *mem)
{
cl_int err = CL_SUCCESS;
std::vector<cl_uchar> src_data(alloc_count, 0);
auto ptr = mem->get_ptr();
clMemWrapper indirect =
clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
sizeof(ptr), &ptr, &err);
test_error(err, "could not create indirect buffer");
clMemWrapper direct = clCreateBuffer(context, CL_MEM_READ_WRITE,
src_data.size(), nullptr, &err);
test_error(err, "could not create direct buffer");
err = clSetKernelArg(kernel_IndirectAccessRead, 0, sizeof(indirect),
&indirect);
test_error(err, "could not set kernel argument 0");
err = clSetKernelArg(kernel_IndirectAccessRead, 1, sizeof(direct),
&direct);
test_error(err, "could not set kernel argument 1");
size_t test_offsets[] = { 0, alloc_count / 2 };
for (auto offset : test_offsets)
{
// Fill src data with a random pattern
generate_random_inputs(src_data, d);
err = mem->write(src_data);
test_error(err, "could not write to usvm memory");
void *info_ptr = &mem->get_ptr()[offset];
err = clSetKernelExecInfo(kernel_IndirectAccessRead,
CL_KERNEL_EXEC_INFO_SVM_PTRS,
sizeof(void *), &info_ptr);
test_error(err, "could not enable indirect access");
size_t gws{ alloc_count };
err = clEnqueueNDRangeKernel(queue, kernel_IndirectAccessRead, 1,
nullptr, &gws, nullptr, 0, nullptr,
nullptr);
test_error(err, "clEnqueueNDRangeKernel failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
std::vector<cl_uchar> result_data(alloc_count, 0);
err = clEnqueueReadBuffer(queue, direct, CL_TRUE, 0,
result_data.size(), result_data.data(), 0,
nullptr, nullptr);
test_error(err, "clEnqueueReadBuffer failed");
// Validate result
if (result_data != src_data)
{
for (size_t i = 0; i < alloc_count; i++)
{
if (src_data[i] != result_data[i])
{
log_error(
"While attempting indirect read "
"clSetKernelExecInfo with "
"offset:%zu size:%zu \n"
"Data verification mismatch at %zu expected: %d "
"got: %d\n",
offset, alloc_count, i, src_data[i],
result_data[i]);
return TEST_FAIL;
}
}
}
}
return CL_SUCCESS;
}
// Test writing to USM pointer indirectly using clSetKernelExecInfo.
// The test will perform a memcpy on the device.
cl_int test_svm_exec_info_write(USVMWrapper<cl_uchar> *mem)
{
cl_int err = CL_SUCCESS;
std::vector<cl_uchar> src_data(alloc_count, 0);
size_t test_offsets[] = { 0, alloc_count / 2 };
auto ptr = mem->get_ptr();
clMemWrapper indirect =
clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
sizeof(ptr), &ptr, &err);
test_error(err, "could not create indirect buffer");
clMemWrapper direct = clCreateBuffer(context, CL_MEM_READ_WRITE,
alloc_count, nullptr, &err);
test_error(err, "could not create direct buffer");
err = clSetKernelArg(kernel_IndirectAccessWrite, 0, sizeof(indirect),
&indirect);
test_error(err, "could not set kernel argument 0");
err = clSetKernelArg(kernel_IndirectAccessWrite, 1, sizeof(direct),
&direct);
test_error(err, "could not set kernel argument 1");
for (auto offset : test_offsets)
{
// Fill src data with a random pattern
generate_random_inputs(src_data, d);
err = clEnqueueWriteBuffer(queue, direct, CL_NON_BLOCKING, 0,
src_data.size(), src_data.data(), 0,
nullptr, nullptr);
test_error(err, "clEnqueueReadBuffer failed");
void *info_ptr = &mem->get_ptr()[offset];
err = clSetKernelExecInfo(kernel_IndirectAccessWrite,
CL_KERNEL_EXEC_INFO_SVM_PTRS,
sizeof(void *), &info_ptr);
test_error(err, "could not enable indirect access");
size_t gws{ alloc_count };
err = clEnqueueNDRangeKernel(queue, kernel_IndirectAccessWrite, 1,
nullptr, &gws, nullptr, 0, nullptr,
nullptr);
test_error(err, "clEnqueueNDRangeKernel failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
std::vector<cl_uchar> result_data(alloc_count, 0);
err = mem->read(result_data);
test_error(err, "could not read from usvm memory");
// Validate result
if (result_data != src_data)
{
for (size_t i = 0; i < alloc_count; i++)
{
if (src_data[i] != result_data[i])
{
log_error(
"While attempting indirect write "
"clSetKernelExecInfo with "
"offset:%zu size:%zu \n"
"Data verification mismatch at %zu expected: %d "
"got: %d\n",
offset, alloc_count, i, src_data[i],
result_data[i]);
return TEST_FAIL;
}
}
}
}
return CL_SUCCESS;
}
cl_int setup() override
{
cl_int err = UnifiedSVMBase::setup();
if (CL_SUCCESS != err)
{
return err;
}
return createIndirectAccessKernel();
}
cl_int run() override
{
cl_int err;
cl_uint max_ti = static_cast<cl_uint>(deviceUSVMCaps.size());
for (cl_uint ti = 0; ti < max_ti; ti++)
{
auto mem = get_usvm_wrapper<cl_uchar>(ti);
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
log_info(" testing clSetKernelArgSVMPointer() SVM type %u \n",
ti);
err = test_svm_exec_info_read(mem.get());
if (CL_SUCCESS != err)
{
return err;
}
err = test_svm_exec_info_write(mem.get());
if (CL_SUCCESS != err)
{
return err;
}
err = mem->free();
test_error(err, "SVM free failed");
}
return CL_SUCCESS;
}
cl_int createIndirectAccessKernel()
{
cl_int err;
const char *programString = R"(
struct s { const global unsigned char* ptr; };
kernel void test_IndirectAccessRead(const global struct s* src, global unsigned char* dst)
{
dst[get_global_id(0)] = src->ptr[get_global_id(0)];
}
struct d { global unsigned char* ptr; };
kernel void test_IndirectAccessWrite(global struct d* dst, const global unsigned char* src)
{
dst->ptr[get_global_id(0)] = src[get_global_id(0)];
}
)";
clProgramWrapper program;
err = create_single_kernel_helper(
context, &program, &kernel_IndirectAccessRead, 1, &programString,
"test_IndirectAccessRead");
test_error(err, "could not create IndirectAccessRead kernel");
kernel_IndirectAccessWrite =
clCreateKernel(program, "test_IndirectAccessWrite", &err);
test_error(err, "could not create IndirectAccessWrite kernel");
return CL_SUCCESS;
}
clKernelWrapper kernel_IndirectAccessRead;
clKernelWrapper kernel_IndirectAccessWrite;
static constexpr size_t alloc_count = 1024;
};
REGISTER_TEST(unified_svm_exec_info)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMExecInfo Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -0,0 +1,260 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
#include <atomic>
#include <chrono>
#include <vector>
#include <thread>
namespace {
struct CallbackData
{
CallbackData(cl_context ctx, std::vector<cl_svm_capabilities_khr> &caps)
: context{ ctx }, status{ 0 }, svm_pointers{}, svm_caps{ caps }
{}
cl_context context;
std::atomic<cl_uint> status;
std::vector<void *> svm_pointers;
std::vector<cl_svm_capabilities_khr> &svm_caps;
};
// callback which will be passed to clEnqueueSVMFree command
void CL_CALLBACK callback_svm_free(cl_command_queue queue,
cl_uint num_svm_pointers,
void *svm_pointers[], void *user_data)
{
auto data = (CallbackData *)user_data;
data->svm_pointers.resize(num_svm_pointers, 0);
for (size_t i = 0; i < num_svm_pointers; ++i)
{
data->svm_pointers[i] = svm_pointers[i];
if (data->svm_caps[i] & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR)
{
align_free(data);
}
else
{
clSVMFree(data->context, svm_pointers[i]);
}
}
data->status.store(1, std::memory_order_release);
}
void log_error_usvm_ptrs(const std::vector<void *> &v)
{
for (size_t i = 0; i < v.size(); ++i)
{
log_error("\t%zu: %p\n", i, v[i]);
}
}
}
struct UnifiedSVMFree : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test the clEnqueueSVMFree function for a vector of USM pointers
// and validate the callback.
cl_int
test_SVMFreeCallback(std::vector<void *> &buffers,
std::vector<cl_svm_capabilities_khr> &bufferCaps)
{
cl_int err = CL_SUCCESS;
clEventWrapper event;
CallbackData data{ context, bufferCaps };
err = clEnqueueSVMFree(queue, buffers.size(), buffers.data(),
callback_svm_free, &data, 0, 0, &event);
test_error(err, "clEnqueueSVMFree failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
err = check_event_type(event, CL_COMMAND_SVM_FREE);
test_error(err, "Invalid command type returned for clEnqueueSVMFree");
// wait for the callback
while (data.status.load(std::memory_order_acquire) == 0)
{
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
// check if pointers returned in callback are correct
if (data.svm_pointers != buffers)
{
log_error("Invalid SVM pointer returned in the callback \n");
log_error("Expected:\n");
log_error_usvm_ptrs(buffers);
log_error("Got:\n");
log_error_usvm_ptrs(data.svm_pointers);
return TEST_FAIL;
}
return CL_SUCCESS;
}
cl_int test_SVMFree(std::vector<void *> &buffers)
{
cl_int err = CL_SUCCESS;
clEventWrapper event;
err = clEnqueueSVMFree(queue, buffers.size(), buffers.data(), nullptr,
nullptr, 0, 0, &event);
test_error(err, "clEnqueueSVMFree failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
err = check_event_type(event, CL_COMMAND_SVM_FREE);
test_error(err, "Invalid command type returned for clEnqueueSVMFree");
return CL_SUCCESS;
}
cl_int run() override
{
cl_int err;
// Test clEnqueueSVMFree function with a callback
for (int it = 0; it < test_iterations; it++)
{
std::vector<void *> buffers;
std::vector<cl_svm_capabilities_khr> bufferCaps;
size_t numSVMBuffers = get_random_size_t(1, 20, d);
for (int i = 0; i < numSVMBuffers; i++)
{
size_t typeIndex =
get_random_size_t(0, deviceUSVMCaps.size() - 1, d);
auto mem = get_usvm_wrapper<cl_uchar>(typeIndex);
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
buffers.push_back(mem->get_ptr());
bufferCaps.push_back(deviceUSVMCaps[typeIndex]);
mem->reset();
}
err = test_SVMFreeCallback(buffers, bufferCaps);
test_error(err, "test_SVMFree");
}
// We need to filter out the SVM types that support system allocation
// as we cannot test clEnqueueSVMFree without a callback for them
std::vector<size_t> test_indexes;
for (size_t i = 0; i < deviceUSVMCaps.size(); i++)
{
auto caps = deviceUSVMCaps[i];
if (0 == (caps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR))
{
test_indexes.push_back(i);
}
}
if (!test_indexes.empty())
{
// Test clEnqueueSVMFree function with no callback
for (int it = 0; it < test_iterations; it++)
{
std::vector<void *> buffers;
size_t numSVMBuffers = get_random_size_t(1, 20, d);
while (buffers.size() != numSVMBuffers)
{
size_t test_index =
get_random_size_t(0, test_indexes.size() - 1, d);
size_t typeIndex = test_indexes[test_index];
auto mem = get_usvm_wrapper<cl_uchar>(typeIndex);
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
buffers.push_back(mem->get_ptr());
mem->reset();
}
err = test_SVMFree(buffers);
test_error(err, "test_SVMFree");
}
}
return CL_SUCCESS;
}
static constexpr size_t alloc_count = 1024;
static constexpr size_t test_iterations = 100;
};
REGISTER_TEST(unified_svm_free)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMFree Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -0,0 +1,139 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
struct UnifiedSVMMapUnmap : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test the clEnqueueSVMMap and clEnqueueSVMUnmap functions for random
// ranges of a USM allocation and validate the event types.
cl_int test_SVMMapUnmap(USVMWrapper<cl_uchar> *mem, cl_map_flags flags)
{
cl_int err = CL_SUCCESS;
for (size_t it = 0; it < test_iterations; it++)
{
size_t offset = get_random_size_t(0, alloc_count - 1, d);
size_t length = get_random_size_t(1, alloc_count - offset, d);
void *ptr = &mem->get_ptr()[offset];
clEventWrapper map_event;
err = clEnqueueSVMMap(queue, CL_FALSE, flags, ptr, length, 0,
nullptr, &map_event);
test_error(err, "clEnqueueSVMMap failed");
clEventWrapper unmap_event;
err = clEnqueueSVMUnmap(queue, ptr, 0, nullptr, &unmap_event);
test_error(err, "clEnqueueSVMUnmap failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
err = check_event_type(map_event, CL_COMMAND_SVM_MAP);
test_error(err,
"Invalid command type returned for clEnqueueSVMMap");
err = check_event_type(unmap_event, CL_COMMAND_SVM_UNMAP);
test_error(err,
"Invalid command type returned for clEnqueueSVMUnmap");
}
return err;
}
cl_int run() override
{
cl_int err;
cl_map_flags test_flags[] = { CL_MAP_READ, CL_MAP_WRITE,
CL_MAP_WRITE_INVALIDATE_REGION,
CL_MAP_READ | CL_MAP_WRITE };
cl_uint max_ti = static_cast<cl_uint>(deviceUSVMCaps.size());
for (cl_uint ti = 0; ti < max_ti; ti++)
{
if (deviceUSVMCaps[ti] & CL_SVM_CAPABILITY_HOST_MAP_KHR)
{
for (auto flags : test_flags)
{
auto mem = get_usvm_wrapper<cl_uchar>(ti);
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMapUnmap(mem.get(), flags);
test_error(err, "test_SVMMemfill");
err = mem->free();
test_error(err, "SVM free failed");
}
}
}
return CL_SUCCESS;
}
static constexpr size_t alloc_count = 1024;
static constexpr size_t test_iterations = 100;
};
REGISTER_TEST(unified_svm_map_unmap)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMMapUnmap Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -0,0 +1,288 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
#include <vector>
struct UnifiedSVMOPs : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test the clEnqueueSVMMemcpy function for random ranges
// of a USM allocation and validate the results.
cl_int test_SVMMemcpy(USVMWrapper<cl_uchar> *src,
USVMWrapper<cl_uchar> *dst)
{
cl_int err = CL_SUCCESS;
std::vector<cl_uchar> src_data(alloc_count, 0);
std::vector<cl_uchar> dst_data(alloc_count, 0);
for (size_t it = 0; it < test_iterations; it++)
{
// Fill src data with a random pattern
generate_random_inputs(src_data, d);
err = src->write(src_data);
test_error(err, "could not write to usvm memory");
// Fill dst data with zeros
err = dst->write(dst_data);
test_error(err, "could not write to usvm memory");
// Select a random range
size_t offset = get_random_size_t(0, src_data.size() - 1, d);
size_t length = get_random_size_t(1, src_data.size() - offset, d);
void *src_ptr = &src->get_ptr()[offset];
void *dst_ptr = &dst->get_ptr()[offset];
clEventWrapper event;
err = clEnqueueSVMMemcpy(queue, CL_BLOCKING, dst_ptr, src_ptr,
length, 0, nullptr, &event);
test_error(err, "clEnqueueSVMMemcpy failed");
err = check_event_type(event, CL_COMMAND_SVM_MEMCPY);
test_error(err,
"Invalid command type returned for clEnqueueSVMMemcpy");
// Validate result
std::vector<cl_uchar> result_data(alloc_count, 0);
err = dst->read(result_data);
test_error(err, "could not read from usvm memory");
for (size_t i = 0; i < result_data.size(); i++)
{
cl_uchar expected_value;
if (i >= offset && i < length + offset)
{
expected_value = src_data[i];
}
else
{
expected_value = 0;
}
if (expected_value != result_data[i])
{
log_error("While attempting clEnqueueSVMMemcpy with "
"offset:%zu size:%zu \n"
"Data verification mismatch at %zu expected: %d "
"got: %d\n",
offset, length, i, expected_value,
result_data[i]);
return TEST_FAIL;
}
}
}
return CL_SUCCESS;
}
cl_int test_svm_memcpy(cl_uint srcTypeIndex, cl_uint dstTypeIndex)
{
cl_int err;
auto srcMem = get_usvm_wrapper<cl_uchar>(srcTypeIndex);
auto dstMem = get_usvm_wrapper<cl_uchar>(dstTypeIndex);
err = srcMem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = dstMem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMemcpy(srcMem.get(), dstMem.get());
test_error(err, "test_SVMMemcpy");
err = srcMem->free();
test_error(err, "SVM free failed");
err = dstMem->free();
test_error(err, "SVM free failed");
return CL_SUCCESS;
}
cl_int test_svm_memcpy(cl_uint TypeIndex)
{
cl_int err;
const auto caps = deviceUSVMCaps[TypeIndex];
auto mem = get_usvm_wrapper<cl_uchar>(TypeIndex);
auto hostMem = get_hostptr_usvm_wrapper<cl_uchar>();
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = hostMem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
// We check if the memory can be read by the host.
if (caps & CL_SVM_CAPABILITY_HOST_READ_KHR
|| caps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR)
{
err = test_SVMMemcpy(mem.get(), hostMem.get());
test_error(err, "test_SVMMemcpy");
}
// We check if the memory can be written by the host.
if (caps & CL_SVM_CAPABILITY_HOST_WRITE_KHR
|| caps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR)
{
err = test_SVMMemcpy(hostMem.get(), mem.get());
test_error(err, "test_SVMMemcpy");
}
err = mem->free();
test_error(err, "SVM free failed");
err = hostMem->free();
test_error(err, "SVM free failed");
return CL_SUCCESS;
}
cl_int run() override
{
cl_int err;
cl_uint max_ti = static_cast<cl_uint>(deviceUSVMCaps.size());
// Test all possible comabinations between supported types
for (cl_uint src_ti = 0; src_ti < max_ti; src_ti++)
{
for (cl_uint dst_ti = 0; dst_ti < max_ti; dst_ti++)
{
if (check_for_common_memory_type(src_ti, dst_ti))
{
log_info(
" testing clEnqueueSVMMemcpy() SVM type %u -> SVM "
"type %u\n",
src_ti, dst_ti);
err = test_svm_memcpy(src_ti, dst_ti);
if (CL_SUCCESS != err)
{
return err;
}
}
}
}
// For each supported svm type test copy from a host ptr and to a host
// ptr
for (cl_uint ti = 0; ti < max_ti; ti++)
{
log_info(
" testing clEnqueueSVMMemcpy() SVM type %u <-> host ptr \n",
ti);
err = test_svm_memcpy(ti);
if (CL_SUCCESS != err)
{
return err;
}
}
return CL_SUCCESS;
}
template <typename T>
std::unique_ptr<USVMWrapper<T>> get_hostptr_usvm_wrapper()
{
return std::unique_ptr<USVMWrapper<T>>(
new USVMWrapper<T>(nullptr, nullptr, nullptr, CL_UINT_MAX,
CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR
| CL_SVM_CAPABILITY_HOST_READ_KHR
| CL_SVM_CAPABILITY_HOST_WRITE_KHR,
0, nullptr, nullptr, nullptr, nullptr));
}
bool check_for_common_memory_type(cl_uint srcTypeIndex,
cl_uint dstTypeIndex)
{
const auto srcCaps = deviceUSVMCaps[srcTypeIndex];
const auto dstCaps = deviceUSVMCaps[dstTypeIndex];
// Is either allocation a system allocation
if ((srcCaps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR)
|| (dstCaps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR))
{
return true;
}
// Is it possible to use the host
if ((srcCaps & CL_SVM_CAPABILITY_HOST_READ_KHR)
&& (dstCaps & CL_SVM_CAPABILITY_HOST_WRITE_KHR))
{
return true;
}
// Is it posible to use the device
if ((srcCaps & CL_SVM_CAPABILITY_DEVICE_READ_KHR)
&& (dstCaps & CL_SVM_CAPABILITY_DEVICE_WRITE_KHR))
{
return true;
}
return false;
}
static constexpr size_t alloc_count = 1024;
static constexpr size_t test_iterations = 100;
};
REGISTER_TEST(unified_svm_memcpy)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMOPs Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -0,0 +1,229 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
#include <vector>
struct UnifiedSVMMemFill : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test the clEnqueueSVMMemFill function for random patterns
// over a random range of a USM allocation.
cl_int test_SVMMemfill(USVMWrapper<cl_uchar> *mem)
{
cl_int err = CL_SUCCESS;
std::vector<cl_uchar> mem_data(alloc_count, 0);
for (size_t pattern_size = 1; pattern_size <= 128; pattern_size *= 2)
{
std::vector<cl_uchar> fill_data(pattern_size, 0);
// Fill src data with a random pattern
generate_random_inputs(fill_data, d);
err = mem->write(mem_data);
test_error(err, "could not write to usvm memory");
// Select a random range
size_t offset = get_random_size_t(0, mem_data.size() - 1, d);
// Align offset to pattern size
offset &= ~(pattern_size - 1);
// Select a random size.
size_t fill_size =
get_random_size_t(pattern_size, mem_data.size() - offset, d);
// Align length to pattern size
fill_size &= ~(pattern_size - 1);
void *ptr = &mem->get_ptr()[offset];
clEventWrapper event;
err = clEnqueueSVMMemFill(queue, ptr, fill_data.data(),
fill_data.size(), fill_size, 0, nullptr,
&event);
test_error(err, "clEnqueueSVMMemFill failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
err = check_event_type(event, CL_COMMAND_SVM_MEMFILL);
test_error(err,
"Invalid command type returned for clEnqueueSVMMemFill");
// Validate result
std::vector<cl_uchar> result_data(alloc_count, 0);
err = mem->read(result_data);
test_error(err, "could not read from usvm memory");
for (size_t i = 0; i < result_data.size(); i++)
{
cl_uchar expected_value;
if (i >= offset && i < fill_size + offset)
{
expected_value = fill_data[i % pattern_size];
}
else
{
expected_value = mem_data[i];
}
if (expected_value != result_data[i])
{
log_error("While attempting clEnqueueSVMMemFill with "
"offset:%zu size:%zu \n"
"Data verification mismatch at %zu expected: %d "
"got: %d\n",
offset, fill_size, i, expected_value,
result_data[i]);
return TEST_FAIL;
}
}
}
return CL_SUCCESS;
}
cl_int test_svm_memfill(cl_uint srcTypeIndex)
{
cl_int err;
auto mem = get_usvm_wrapper<cl_uchar>(srcTypeIndex);
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMemfill(mem.get());
test_error(err, "test_SVMMemfill");
err = mem->free();
test_error(err, "SVM free failed");
return CL_SUCCESS;
}
cl_int run() override
{
cl_int err;
cl_uint max_ti = static_cast<cl_uint>(deviceUSVMCaps.size());
// For each supported svm type test clEnqueueSVMMemFill for all
// possible pattern sizes
for (cl_uint ti = 0; ti < max_ti; ti++)
{
log_info(" testing clEnqueueSVMMemFill() SVM type %u \n", ti);
err = test_svm_memfill(ti);
if (CL_SUCCESS != err)
{
return err;
}
}
return CL_SUCCESS;
}
template <typename T>
std::unique_ptr<USVMWrapper<T>> get_hostptr_usvm_wrapper()
{
return std::unique_ptr<USVMWrapper<T>>(
new USVMWrapper<T>(nullptr, nullptr, nullptr, CL_UINT_MAX,
CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR
| CL_SVM_CAPABILITY_HOST_READ_KHR
| CL_SVM_CAPABILITY_HOST_WRITE_KHR,
0, nullptr, nullptr, nullptr, nullptr));
}
bool check_for_common_memory_type(cl_uint srcTypeIndex,
cl_uint dstTypeIndex)
{
const auto srcCaps = deviceUSVMCaps[srcTypeIndex];
const auto dstCaps = deviceUSVMCaps[dstTypeIndex];
// Is either allocation a system allocation
if ((srcCaps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR)
|| (dstCaps & CL_SVM_CAPABILITY_SYSTEM_ALLOCATED_KHR))
{
return true;
}
// Is it possible to use the host
if ((srcCaps & CL_SVM_CAPABILITY_HOST_READ_KHR)
&& (dstCaps & CL_SVM_CAPABILITY_HOST_WRITE_KHR))
{
return true;
}
// Is it posible to use the device
if ((srcCaps & CL_SVM_CAPABILITY_DEVICE_READ_KHR)
&& (dstCaps & CL_SVM_CAPABILITY_DEVICE_WRITE_KHR))
{
return true;
}
return false;
}
static constexpr size_t alloc_count = 1024;
static constexpr size_t test_iterations = 100;
};
REGISTER_TEST(unified_svm_memfill)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMMemFill Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -0,0 +1,204 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
#include <cinttypes>
#include <memory>
struct UnifiedSVMMigrate : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test the clEnqueueSVMMigrateMem function for random ranges
// of a USM allocation.
cl_int test_SVMMigrate(USVMWrapper<cl_uchar> *mem,
cl_mem_migration_flags flags, bool random_offset,
bool random_length)
{
cl_int err = CL_SUCCESS;
std::vector<cl_uchar> mem_data(alloc_count, 0);
for (size_t it = 0; it < test_iterations; it++)
{
// Fill src data with a random pattern
generate_random_inputs(mem_data, d);
err = mem->write(mem_data);
test_error(err, "could not write to usvm memory");
// Select a random range
size_t offset = random_offset
? get_random_size_t(0, mem_data.size() - 1, d)
: 0;
size_t length = random_length
? get_random_size_t(1, mem_data.size() - offset, d)
: mem_data.size() - offset;
const void *ptr = &mem->get_ptr()[offset];
clEventWrapper event;
err = clEnqueueSVMMigrateMem(queue, 1, &ptr, &length, flags, 0,
nullptr, &event);
test_error(err, "clEnqueueSVMMigrateMem failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
err = check_event_type(event, CL_COMMAND_SVM_MIGRATE_MEM);
test_error(
err,
"Invalid command type returned for clEnqueueSVMMigrateMem");
}
return CL_SUCCESS;
}
cl_int test_svm_migrate(cl_uint typeIndex)
{
cl_int err;
const cl_mem_migration_flags flags[] = {
0, CL_MIGRATE_MEM_OBJECT_HOST,
CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED,
CL_MIGRATE_MEM_OBJECT_HOST | CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED
};
auto mem = get_usvm_wrapper<cl_uchar>(typeIndex);
// Test migrate whole allocation
for (auto test_flags : flags)
{
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMigrate(mem.get(), test_flags, false, false);
test_error(err, "test_SVMMigrate");
err = mem->free();
test_error(err, "SVM free failed");
}
// Test migrate subset allocation from random offset to end
for (auto test_flags : flags)
{
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMigrate(mem.get(), test_flags, true, false);
test_error(err, "test_SVMMigrate");
err = mem->free();
test_error(err, "SVM free failed");
}
// Test migrate subset allocation from base pointer to random size
for (auto test_flags : flags)
{
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMigrate(mem.get(), test_flags, false, true);
test_error(err, "test_SVMMigrate");
err = mem->free();
test_error(err, "SVM free failed");
}
// Test migrate subset allocation from random offset to random end
for (auto test_flags : flags)
{
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
err = test_SVMMigrate(mem.get(), test_flags, true, true);
test_error(err, "test_SVMMigrate");
err = mem->free();
test_error(err, "SVM free failed");
}
return CL_SUCCESS;
}
cl_int run() override
{
cl_int err;
cl_uint max_ti = static_cast<cl_uint>(deviceUSVMCaps.size());
// For each supported svm type test clEnqueueSVMMigrateMem for all
// possible pattern sizes
for (cl_uint ti = 0; ti < max_ti; ti++)
{
log_info(" testing clEnqueueSVMMigrateMem() SVM type %u \n", ti);
err = test_svm_migrate(ti);
test_error(err, "clEnqueueSVMMigrateMem() testing failed");
}
return CL_SUCCESS;
}
static constexpr size_t alloc_count = 1024;
static constexpr size_t test_iterations = 10;
};
REGISTER_TEST(unified_svm_migrate)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMMigrate Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -0,0 +1,190 @@
//
// Copyright (c) 2025 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "unified_svm_fixture.h"
#include "harness/conversions.h"
#include "harness/testHarness.h"
#include "harness/typeWrappers.h"
#include <vector>
struct UnifiedSVMSetArg : UnifiedSVMBase
{
using UnifiedSVMBase::UnifiedSVMBase;
// Test the clSetKernelArgSVMPointer function for randome ranges
// of a USM allocation. write a random pattern to the USM memory,
// and validate that the kernel writes the correct data.
cl_int test_svm_set_arg(USVMWrapper<cl_uchar> *src)
{
cl_int err = CL_SUCCESS;
std::vector<cl_uchar> src_data(alloc_count, 0);
test_error(err, "clCreateBuffer failed.");
for (size_t it = 0; it < test_iterations; it++)
{
// Fill src data with a random pattern
generate_random_inputs(src_data, d);
err = src->write(src_data);
test_error(err, "could not write to usvm memory");
// Select a random range
size_t offset = get_random_size_t(0, src_data.size() - 1, d);
size_t length = get_random_size_t(1, src_data.size() - offset, d);
void *src_ptr = &src->get_ptr()[offset];
err = clSetKernelArgSVMPointer(test_kernel, 0, src_ptr);
test_error(err, "clSetKernelArgSVMPointer failed");
std::vector<cl_uchar> result_data(length, 0);
clMemWrapper dst_mem = clCreateBuffer(
context, CL_MEM_WRITE_ONLY | CL_MEM_COPY_HOST_PTR,
result_data.size(), result_data.data(), &err);
err = clSetKernelArg(test_kernel, 1, sizeof(dst_mem), &dst_mem);
test_error(err, "clSetKernelArg failed.");
size_t gws{ length };
err = clEnqueueNDRangeKernel(queue, test_kernel, 1, nullptr, &gws,
nullptr, 0, nullptr, nullptr);
test_error(err, "clEnqueueNDRangeKernel failed");
err = clEnqueueReadBuffer(queue, dst_mem, CL_TRUE, 0,
result_data.size(), result_data.data(), 0,
nullptr, nullptr);
test_error(err, "clEnqueueReadBuffer failed");
// Validate result
const cl_uchar *expected_data = src_data.data() + offset;
for (size_t i = 0; i < length; i++)
{
if (expected_data[i] != result_data[i])
{
log_error("While attempting clSetKernelArgSVMPointer with "
"offset:%zu size:%zu \n"
"Data verification mismatch at %zu expected: %d "
"got: %d\n",
offset, length, i, expected_data[i],
result_data[i]);
return TEST_FAIL;
}
}
}
return CL_SUCCESS;
}
cl_int setup() override
{
cl_int err = UnifiedSVMBase::setup();
if (CL_SUCCESS != err)
{
return err;
}
const char *programString = R"(
kernel void test_kernel( const global char* src, global char* dst)
{
dst[get_global_id(0)] = src[get_global_id(0)];
}
)";
clProgramWrapper program;
err = create_single_kernel_helper(context, &program, &test_kernel, 1,
&programString, "test_kernel");
test_error(err, "could not create test_kernel kernel");
return err;
}
cl_int run() override
{
cl_int err;
cl_uint max_ti = static_cast<cl_uint>(deviceUSVMCaps.size());
for (cl_uint ti = 0; ti < max_ti; ti++)
{
auto mem = get_usvm_wrapper<cl_uchar>(ti);
err = mem->allocate(alloc_count);
test_error(err, "SVM allocation failed");
log_info(" testing clSetKernelArgSVMPointer() SVM type %u \n",
ti);
err = test_svm_set_arg(mem.get());
if (CL_SUCCESS != err)
{
return err;
}
err = mem->free();
test_error(err, "SVM free failed");
}
return CL_SUCCESS;
}
clKernelWrapper test_kernel;
static constexpr size_t alloc_count = 1024;
static constexpr size_t test_iterations = 100;
};
REGISTER_TEST(unified_svm_set_arg)
{
if (!is_extension_available(device, "cl_khr_unified_svm"))
{
log_info("cl_khr_unified_svm is not supported, skipping test.\n");
return TEST_SKIPPED_ITSELF;
}
cl_int err;
clContextWrapper contextWrapper;
clCommandQueueWrapper queueWrapper;
// For now: create a new context and queue.
// If we switch to a new test executable and run the tests without
// forceNoContextCreation then this can be removed, and we can just use the
// context and the queue from the harness.
if (context == nullptr)
{
contextWrapper =
clCreateContext(nullptr, 1, &device, nullptr, nullptr, &err);
test_error(err, "clCreateContext failed");
context = contextWrapper;
}
if (queue == nullptr)
{
queueWrapper = clCreateCommandQueue(context, device, 0, &err);
test_error(err, "clCreateCommandQueue failed");
queue = queueWrapper;
}
UnifiedSVMSetArg Test(context, device, queue, num_elements);
err = Test.setup();
test_error(err, "test setup failed");
err = Test.run();
test_error(err, "test failed");
return TEST_PASS;
}

View File

@@ -121,6 +121,8 @@ public:
return CL_SUCCESS;
}
void reset() { data = nullptr; }
cl_int free()
{
if (data)
@@ -136,7 +138,7 @@ public:
test_error(err, "clSVMFreeWithPropertiesKHR failed");
}
data = nullptr;
reset();
}
return CL_SUCCESS;
@@ -165,6 +167,9 @@ public:
err = clEnqueueSVMUnmap(queue, data, 0, nullptr, nullptr);
test_error(err, "clEnqueueSVMUnmap failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
}
else if (caps & CL_SVM_CAPABILITY_DEVICE_WRITE_KHR)
{
@@ -215,6 +220,9 @@ public:
err = clEnqueueSVMUnmap(queue, data, 0, nullptr, nullptr);
test_error(err, "clEnqueueSVMUnmap failed");
err = clFinish(queue);
test_error(err, "clFinish failed");
}
else if (caps & CL_SVM_CAPABILITY_DEVICE_READ_KHR)
{