Question

Is there a way to read the values in a cudaArray from the device without wrapping it in a texture reference/object? All of the examples I've looked at use cudaArrays exclusively for creating textures. Is that the only way they can be used, or could I do something like:

__global__ kernel(cudaArray *arr, ...) {
    float x = tex1D<float>(arr, ...);
    ...
}

cudaArray *arr;
cudaMallocArray(&arr, ...);
cudaMemcpyToArray(arr, ...);
kernel<<<...>>>(arr, ...);

So basically, what should go in place of tex1D there? Also, if this is possible I'd be curious if anyone thinks there would be any performance benefit to doing this, but I'll also be running my own tests to see.

Thanks!

Was it helpful?

Solution

cudaArray is defined for texturing or surface memory purposes. As indicated here:

CUDA arrays are opaque memory layouts optimized for texture fetching. They are one dimensional, two dimensional, or three-dimensional and composed of elements, each of which has 1, 2 or 4 components that may be signed or unsigned 8 , 16 or 32 bit integers, 16 bit floats, or 32 bit floats. CUDA arrays are only accessible by kernels through texture fetching as described in Texture Memory or surface reading and writing as described in Surface Memory.

So in effect you have to use either texture functions or surface functions in kernels to access data in a cudaArray.

There are several performance benefit possibilities associated with using texturing. Texturing can imply interpolation (i.e. reading from a texture using floating point coordinates). Any application that needs this kind of data interpolation may benefit from the HW interpolation engines inside the texture units on the GPU.

Another benefit, perhaps the most important for using texturing in arbitrary GPU codes, is the texture cache that backs up the textures stored in global memory. Texturing is a read-only operation, but if you have an array of read-only data, the texture cache may improve or otherwise extend your ability to access data rapidly. This generally implies that there must be data-locality/ data-reuse in your functions that are accessing data stored in the texturing mechanism. Texture data retrieved will not disrupt anything in the L1 cache, so generally this kind of data segmentation/optimization would be part of a larger strategy around data caching. If there were no other demands on L1 cache, the texture mechanism/cache does not provide faster access to data than if it were in the L1 already.

OTHER TIPS

Robert Crovella has already answered to your question. I believe it could be useful for next users to have a worked example for the two solutions: textures and sufaces.

#include <stdio.h>
#include <thrust\device_vector.h>

// --- 2D float texture
texture<float, cudaTextureType2D, cudaReadModeElementType> texRef;

// --- 2D surface memory
surface<void, 2> surf2D;

/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
    if (code != cudaSuccess) 
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

/*************************************/
/* cudaArray PRINTOUT TEXTURE KERNEL */
/*************************************/
__global__ void cudaArrayPrintoutTexture(int width, int height)
{
    unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
    unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

    printf("Thread index: (%i, %i); cudaArray = %f\n", x, y, tex2D(texRef, x / (float)width + 0.5f, y / (float)height + 0.5f));
}

/*************************************/
/* cudaArray PRINTOUT TEXTURE KERNEL */
/*************************************/
__global__ void cudaArrayPrintoutSurface(int width, int height)
{
    unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
    unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;

    float temp;

    surf2Dread(&temp, surf2D, x * 4, y);

    printf("Thread index: (%i, %i); cudaArray = %f\n", x, y, temp);
}

/********/
/* MAIN */
/********/
void main()
{
    int width = 3, height = 3;

    thrust::host_vector<float> h_data(width*height, 3.f);

    // --- Allocate CUDA array in device memory
    cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);

    cudaArray* cuArray;

    /*******************/
    /* TEXTURE BINDING */
    /*******************/
    gpuErrchk(cudaMallocArray(&cuArray, &channelDesc, width, height));

    // --- Copy to host data to device memory
    gpuErrchk(cudaMemcpyToArray(cuArray, 0, 0, thrust::raw_pointer_cast(h_data.data()), width*height*sizeof(float), cudaMemcpyHostToDevice));

    // --- Set texture parameters
    texRef.addressMode[0] = cudaAddressModeWrap;
    texRef.addressMode[1] = cudaAddressModeWrap;
    texRef.filterMode = cudaFilterModeLinear;
    texRef.normalized = true;

    // --- Bind the array to the texture reference
    gpuErrchk(cudaBindTextureToArray(texRef, cuArray, channelDesc));

    // --- Invoking printout kernel
    dim3 dimBlock(3, 3);
    dim3 dimGrid(1, 1);
    cudaArrayPrintoutTexture<<<dimGrid, dimBlock>>>(width, height);

    gpuErrchk(cudaUnbindTexture(texRef));

    gpuErrchk(cudaFreeArray(cuArray));

    /******************/
    /* SURFACE MEMORY */
    /******************/
    gpuErrchk(cudaMallocArray(&cuArray, &channelDesc, width, height, cudaArraySurfaceLoadStore));

    // --- Copy to host data to device memory
    gpuErrchk(cudaMemcpyToArray(cuArray, 0, 0, thrust::raw_pointer_cast(h_data.data()), width*height*sizeof(float), cudaMemcpyHostToDevice));

    gpuErrchk(cudaBindSurfaceToArray(surf2D, cuArray));

    cudaArrayPrintoutSurface<<<dimGrid, dimBlock>>>(width, height);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());

    gpuErrchk(cudaFreeArray(cuArray));
}
Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top