Why so many fences? In fact I think you do not even need them at all. You only need a fence when a thread write to local will be readen by other thread. Not when that thread read and write to his local memory.
BTW fences are much better than barriers. In a barrier you force the threads to be in sync. This kills the performance in some cases.
I think you can rewrite your code to gain quite a lot in speed by changing the memory access model.
You can try if this works better (I made many obvious optimizations, without knowing what your code even is doing):
__kernel void squareGpuMatrixMul(__global float * v1, __global float * v2, __global float * v3)
{
int localRow = get_local_id(0);
int localCol = get_local_id(1);
int selectRowFromA = get_group_id(0)*32;
int selectColFromB = get_group_id(1)*32;
int lid= localCol*16+localRow;
__local float Lcache1[ 16][ 16];
__local float Lcache2[ 16][ 16];
__local float Lcache3[ 16][ 16];
__local float Lcache1a[ 16][ 16];
__local float Lcache2a[ 16][ 16];
__local float Lcache3a[ 16][ 16];
__local float Lcache1b[ 16][ 16];
__local float Lcache2b[ 16][ 16];
__local float Lcache3b[ 16][ 16];
__local float Lcache1c[ 16][ 16];
__local float Lcache2c[ 16][ 16];
__local float Lcache3c[ 16][ 16];
float tmp0=0.0f;
float tmp1=0.0f;
float tmp2=0.0f;
float tmp3=0.0f;
float tmp4=0.0f;
float tmp5=0.0f;
float tmp6=0.0f;
float tmp7=0.0f;
float sumPatch=0.0f;
float sumPatcha=0.0f;
float sumPatchb=0.0f;
float sumPatchc=0.0f;
float sumPatch2=0.0f;
float sumPatcha2=0.0f;
float sumPatchb2=0.0f;
float sumPatchc2=0.0f;
Lcache3[localRow][localCol]=0.0f;
Lcache3a[localRow][localCol]=0.0f;
Lcache3b[localRow][localCol]=0.0f;
Lcache3c[localRow][localCol]=0.0f;
for(int i=0;i<1024;i+=32) // this is A's row and B's column parsed by sub-matrices
{
Lcache1[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024];
Lcache2[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024];
Lcache1a[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+ 16];
Lcache2a[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+ 16];
Lcache1b[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+16384];
Lcache2b[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+16384];
Lcache1c[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+ 16+16384];
Lcache2c[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+ 16+16384];
mem_fence(CLK_LOCAL_MEM_FENCE);
sumPatch=0.0f;
sumPatcha=0.0f;
sumPatchb=0.0f;
sumPatchc=0.0f;
sumPatch2=0.0f;
sumPatcha2=0.0f;
sumPatchb2=0.0f;
sumPatchc2=0.0f;
for(int kk=0;kk< 16;kk++) //this is sub-matrix multiplication
{
tmp0=Lcache1[kk][localRow]; // row-major
tmp1=Lcache1a[kk][localRow]; // accesses
tmp2=Lcache1b[kk][localRow]; //to local memory
tmp3=Lcache1c[kk][localRow];
tmp4=Lcache2[kk][localCol];
tmp5=Lcache2a[kk][localCol];
tmp6=Lcache2b[kk][localCol];
tmp7=Lcache2c[kk][localCol];
sumPatch+=tmp0*tmp4;
sumPatcha+=tmp0*tmp6;
sumPatchb+=tmp2*tmp4;
sumPatchc+=tmp2*tmp6;
sumPatch2+=tmp1*tmp5;
sumPatcha2+=tmp1*tmp7;
sumPatchb2+=tmp3*tmp5;
sumPatchc2+=tmp3*tmp7;
}
Lcache3[localRow][localCol]+=sumPatch+sumPatch2;
Lcache3a[localRow][localCol]+=sumPatcha+sumPatcha2;
Lcache3b[localRow][localCol]+=sumPatchb+sumPatchb2;
Lcache3c[localRow][localCol]+=sumPatchc+sumPatchc2;
}
mem_fence(CLK_LOCAL_MEM_FENCE);
v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024]=Lcache3[localRow][localCol];
v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+ 16]=Lcache3a[localRow][localCol];
v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+16384]=Lcache3b[localRow][localCol];
v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+ 16+16384]=Lcache3c[localRow][localCol];
}