mirror of
https://github.com/KhronosGroup/OpenCL-CTS.git
synced 2026-03-19 06:09:01 +00:00
Reduce scope of variables (#1228)
Make variables local to loops, with appropriate types. These variables are not read after the loop without being reset first, so this patch doesn't change behaviour. These variables should now be used for one purpose only, making it easier to reason about the code. This will make future refactoring easier. Signed-off-by: Marco Antognini <marco.antognini@arm.com>
This commit is contained in:
@@ -286,7 +286,6 @@ int TestFunc_Double_Double_Double(const Func *f, MTdata d, bool relaxedMode)
|
||||
{
|
||||
TestInfo test_info;
|
||||
cl_int error;
|
||||
size_t i, j;
|
||||
float maxError = 0.0f;
|
||||
double maxErrorVal = 0.0;
|
||||
double maxErrorVal2 = 0.0;
|
||||
@@ -321,7 +320,7 @@ int TestFunc_Double_Double_Double(const Func *f, MTdata d, bool relaxedMode)
|
||||
|
||||
// cl_kernels aren't thread safe, so we make one for each vector size for
|
||||
// every thread
|
||||
for (i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
|
||||
for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
|
||||
{
|
||||
size_t array_size = test_info.threadCount * sizeof(cl_kernel);
|
||||
test_info.k[i] = (cl_kernel *)malloc(array_size);
|
||||
@@ -344,7 +343,7 @@ int TestFunc_Double_Double_Double(const Func *f, MTdata d, bool relaxedMode)
|
||||
}
|
||||
memset(test_info.tinfo, 0,
|
||||
test_info.threadCount * sizeof(*test_info.tinfo));
|
||||
for (i = 0; i < test_info.threadCount; i++)
|
||||
for (cl_uint i = 0; i < test_info.threadCount; i++)
|
||||
{
|
||||
cl_buffer_region region = {
|
||||
i * test_info.subBufferSize * sizeof(cl_double),
|
||||
@@ -371,7 +370,7 @@ int TestFunc_Double_Double_Double(const Func *f, MTdata d, bool relaxedMode)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
for (j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
{
|
||||
test_info.tinfo[i].outBuf[j] = clCreateSubBuffer(
|
||||
gOutBuffer[j], CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION,
|
||||
@@ -413,7 +412,7 @@ int TestFunc_Double_Double_Double(const Func *f, MTdata d, bool relaxedMode)
|
||||
error = ThreadPool_Do(Test, test_info.jobCount, &test_info);
|
||||
|
||||
// Accumulate the arithmetic errors
|
||||
for (i = 0; i < test_info.threadCount; i++)
|
||||
for (cl_uint i = 0; i < test_info.threadCount; i++)
|
||||
{
|
||||
if (test_info.tinfo[i].maxError > maxError)
|
||||
{
|
||||
@@ -437,12 +436,12 @@ int TestFunc_Double_Double_Double(const Func *f, MTdata d, bool relaxedMode)
|
||||
|
||||
exit:
|
||||
// Release
|
||||
for (i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
|
||||
for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
|
||||
{
|
||||
clReleaseProgram(test_info.programs[i]);
|
||||
if (test_info.k[i])
|
||||
{
|
||||
for (j = 0; j < test_info.threadCount; j++)
|
||||
for (cl_uint j = 0; j < test_info.threadCount; j++)
|
||||
clReleaseKernel(test_info.k[i][j]);
|
||||
|
||||
free(test_info.k[i]);
|
||||
@@ -450,12 +449,12 @@ exit:
|
||||
}
|
||||
if (test_info.tinfo)
|
||||
{
|
||||
for (i = 0; i < test_info.threadCount; i++)
|
||||
for (cl_uint i = 0; i < test_info.threadCount; i++)
|
||||
{
|
||||
free_mtdata(test_info.tinfo[i].d);
|
||||
clReleaseMemObject(test_info.tinfo[i].inBuf);
|
||||
clReleaseMemObject(test_info.tinfo[i].inBuf2);
|
||||
for (j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
clReleaseMemObject(test_info.tinfo[i].outBuf[j]);
|
||||
clReleaseCommandQueue(test_info.tinfo[i].tQueue);
|
||||
}
|
||||
@@ -477,7 +476,6 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
dptr func = job->f->dfunc;
|
||||
int ftz = job->ftz;
|
||||
MTdata d = tinfo->d;
|
||||
cl_uint j, k;
|
||||
cl_int error;
|
||||
const char *name = job->f->name;
|
||||
|
||||
@@ -492,7 +490,7 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
// start the map of the output arrays
|
||||
cl_event e[VECTOR_SIZE_COUNT];
|
||||
cl_ulong *out[VECTOR_SIZE_COUNT];
|
||||
for (j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
{
|
||||
out[j] = (cl_ulong *)clEnqueueMapBuffer(
|
||||
tinfo->tQueue, tinfo->outBuf[j], CL_FALSE, CL_MAP_WRITE, 0,
|
||||
@@ -511,11 +509,11 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
// Init input array
|
||||
cl_ulong *p = (cl_ulong *)gIn + thread_id * buffer_elements;
|
||||
cl_ulong *p2 = (cl_ulong *)gIn2 + thread_id * buffer_elements;
|
||||
j = 0;
|
||||
cl_uint idx = 0;
|
||||
int totalSpecialValueCount = specialValuesCount * specialValuesCount;
|
||||
int indx = (totalSpecialValueCount - 1) / buffer_elements;
|
||||
int lastSpecialJobIndex = (totalSpecialValueCount - 1) / buffer_elements;
|
||||
|
||||
if (job_id <= (cl_uint)indx)
|
||||
if (job_id <= (cl_uint)lastSpecialJobIndex)
|
||||
{ // test edge cases
|
||||
cl_double *fp = (cl_double *)p;
|
||||
cl_double *fp2 = (cl_double *)p2;
|
||||
@@ -524,10 +522,10 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
x = (job_id * buffer_elements) % specialValuesCount;
|
||||
y = (job_id * buffer_elements) / specialValuesCount;
|
||||
|
||||
for (; j < buffer_elements; j++)
|
||||
for (; idx < buffer_elements; idx++)
|
||||
{
|
||||
fp[j] = specialValues[x];
|
||||
fp2[j] = specialValues[y];
|
||||
fp[idx] = specialValues[x];
|
||||
fp2[idx] = specialValues[y];
|
||||
if (++x >= specialValuesCount)
|
||||
{
|
||||
x = 0;
|
||||
@@ -538,10 +536,10 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
}
|
||||
|
||||
// Init any remaining values.
|
||||
for (; j < buffer_elements; j++)
|
||||
for (; idx < buffer_elements; idx++)
|
||||
{
|
||||
p[j] = genrand_int64(d);
|
||||
p2[j] = genrand_int64(d);
|
||||
p[idx] = genrand_int64(d);
|
||||
p2[idx] = genrand_int64(d);
|
||||
}
|
||||
|
||||
if ((error = clEnqueueWriteBuffer(tinfo->tQueue, tinfo->inBuf, CL_FALSE, 0,
|
||||
@@ -558,7 +556,7 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
for (j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
{
|
||||
// Wait for the map to finish
|
||||
if ((error = clWaitForEvents(1, e + j)))
|
||||
@@ -626,12 +624,12 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
r = (cl_double *)gOut_Ref + thread_id * buffer_elements;
|
||||
s = (cl_double *)gIn + thread_id * buffer_elements;
|
||||
s2 = (cl_double *)gIn2 + thread_id * buffer_elements;
|
||||
for (j = 0; j < buffer_elements; j++)
|
||||
for (size_t j = 0; j < buffer_elements; j++)
|
||||
r[j] = (cl_double)func.f_ff(s[j], s2[j]);
|
||||
|
||||
// Read the data back -- no need to wait for the first N-1 buffers but wait
|
||||
// for the last buffer. This is an in order queue.
|
||||
for (j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
{
|
||||
cl_bool blocking = (j + 1 < gMaxVectorSizeIndex) ? CL_FALSE : CL_TRUE;
|
||||
out[j] = (cl_ulong *)clEnqueueMapBuffer(
|
||||
@@ -647,9 +645,9 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
|
||||
// Verify data
|
||||
t = (cl_ulong *)r;
|
||||
for (j = 0; j < buffer_elements; j++)
|
||||
for (size_t j = 0; j < buffer_elements; j++)
|
||||
{
|
||||
for (k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
|
||||
for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
|
||||
{
|
||||
cl_ulong *q = out[k];
|
||||
|
||||
@@ -794,7 +792,7 @@ static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
|
||||
}
|
||||
}
|
||||
|
||||
for (j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
|
||||
{
|
||||
if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
|
||||
out[j], 0, NULL, NULL)))
|
||||
|
||||
Reference in New Issue
Block a user