I believe the following code carry out a typical
- copy to device
- call kernel
- copy back to host
workflow.
What I found that was very strange is when I used Trace Application option by the NSight Profiler, in the report, with 'stack trace' turned on, I found out that the most expensive operation is the line in bold, and just that line, while other memoCopy operation cost almost only as 10% or less of this memoCopy operation.
Is this because it is the first line after calling the kernel hence the profiler somehow included the cost of some synchronization into the cost of this particular memoCopy operation?
For the problem like the one I'm working on, which requires very frequent synchronization and 'returning' the result to host, can anyone offer some general advice on the best practice? I was thinking in particularly about two options, which I am not so sure if will eventually help
- use 'zero-copy' memory, (CUDA by Example 11.2)
- create my how synchronization using atomic operations
{
int numP = p_psPtr->P.size();
int numL = p_psPtr->L.size();
// Out partition is in Unit of the Number of Particles
int block_dim = BLOCK_DIM_X;
int grid_dim = numP/block_dim + (numP%block_dim == 0 ? 0:1);
vector<Particle> pVec(p_psPtr->P.begin(), p_psPtr->P.end());
Particle *d_part_arr = 0;
Particle *part_arr = pVec.data();
HANDLE_ERROR(cudaMalloc((void**)&d_part_arr, numP * sizeof(Particle)));
HANDLE_ERROR(cudaMemcpy(d_part_arr, part_arr, numP * sizeof(Particle), cudaMemcpyHostToDevice));
vector<SpringLink> lVec(p_psPtr->L.begin(), p_psPtr->L.end());
SpringLink *d_link_arr = 0;
SpringLink *link_arr = lVec.data();
HANDLE_ERROR(cudaMalloc((void**)&d_link_arr, numL * sizeof(SpringLink)));
HANDLE_ERROR(cudaMemcpy(d_link_arr, link_arr, numL * sizeof(SpringLink), cudaMemcpyHostToDevice));
Point3D *d_oriPos_arr = 0;
Point3D *oriPos_arr = p_originalPos.data();
HANDLE_ERROR(cudaMalloc((void**)&d_oriPos_arr, numP * sizeof(Point3D)));
HANDLE_ERROR(cudaMemcpy(d_oriPos_arr, oriPos_arr, numP * sizeof(Point3D), cudaMemcpyHostToDevice));
Vector3D *d_oriVel_arr = 0;
Vector3D *oriVel_arr = p_originalVel.data();
HANDLE_ERROR(cudaMalloc((void**)&d_oriVel_arr, numP * sizeof(Vector3D)));
HANDLE_ERROR(cudaMemcpy(d_oriVel_arr, oriVel_arr, numP * sizeof(Vector3D), cudaMemcpyHostToDevice));
Point3D *d_updPos_arr = 0;
Point3D *updPos_arr = p_updatedPos.data();
HANDLE_ERROR(cudaMalloc((void**)&d_updPos_arr, numP * sizeof(Point3D)));
HANDLE_ERROR(cudaMemcpy(d_updPos_arr, updPos_arr, numP * sizeof(Point3D), cudaMemcpyHostToDevice));
Vector3D *d_updVel_arr = 0;
Vector3D *updVel_arr = p_updatedVel.data();
HANDLE_ERROR(cudaMalloc((void**)&d_updVel_arr, numP * sizeof(Vector3D)));
HANDLE_ERROR(cudaMemcpy(d_updVel_arr, updVel_arr, numP * sizeof(Vector3D), cudaMemcpyHostToDevice));
int *d_converged_arr = 0;
int *converged_arr = &p_converged[0];
HANDLE_ERROR(cudaMalloc((void**)&d_converged_arr, numP * sizeof(int)));
HANDLE_ERROR(cudaMemcpy(d_converged_arr, converged_arr, numP * sizeof(int), cudaMemcpyHostToDevice));
// Run the function on the device
handleParticleKernel<<<grid_dim, block_dim>>>(d_part_arr, d_link_arr, numP,
d_oriPos_arr, d_oriVel_arr, d_updPos_arr, d_updVel_arr,
d_converged_arr, p_innerLoopIdx, p_dt);
**HANDLE_ERROR(cudaMemcpy(oriPos_arr, d_oriPos_arr, numP * sizeof(Point3D), cudaMemcpyDeviceToHost));**
HANDLE_ERROR(cudaMemcpy(oriVel_arr, d_oriVel_arr, numP * sizeof(Vector3D), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(updPos_arr, d_updPos_arr, numP * sizeof(Point3D), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(updVel_arr, d_updVel_arr, numP * sizeof(Vector3D), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(converged_arr, d_converged_arr, numP * sizeof(int), cudaMemcpyDeviceToHost));
}