Question

The parallel computation of the LCS follows the wavefront pattern. Here is the parallel function that is slower than a serial implementation. (I think that the number of diagonals(parallel) vs number of rows(serial) has something to do with it)

void parallelLCS(char * sequence_a, char * sequence_b, size_t size_a, size_t size_b) {
double start, end;

int ** dp_table = new int*[size_a + 1];

for (int i = 0; i <= size_a; i++)
    dp_table[i] = new int[size_b + 1];

for (int i = 1; i <= size_a; i++)
    dp_table[i][0] = 0;
for (int j = 0; j <= size_b; j++)
    dp_table[0][j] = 0;

int p_threads = 2;
int diagonals = size_a + size_b;

start = omp_get_wtime();
#pragma omp parallel num_threads(p_threads) default(none) firstprivate(p_threads,size_a,size_b,sequence_a,sequence_b) shared(dp_table,diagonals)
{
    for (int curr_diagonal = 1; curr_diagonal <= (diagonals - 1);) {
        int j = omp_get_thread_num() + 1;   //column index
        int i = curr_diagonal - j + 1;      //row index
        for (; j <= curr_diagonal; j += p_threads, i = i - p_threads) {
            if (i <= size_a && j <= size_b) {
                if (sequence_a[i] == sequence_b[j]) {
                    dp_table[i][j] = dp_table[i - 1][j - 1] + 1;
                } else if (dp_table[i - 1][j] >= dp_table[i][j - 1]) {
                    dp_table[i][j] = dp_table[i - 1][j];
                } else {
                    dp_table[i][j] = dp_table[i][j - 1];
                }
            }
        }
        curr_diagonal++;
#pragma omp barrier
    }
}
end = omp_get_wtime();

printf("\nParallel - Final answer: %d\n", dp_table[size_a][size_b]);
printf("Time: %f\n", end - start);

//Delete dp_table
for (int i = 0; i <= size_a; i++)
    delete [] dp_table[i];
delete [] dp_table;
}

and here is the serial function

void serialLCS(char * sequence_a, char * sequence_b, size_t size_a, size_t size_b) {
double start, end;
int ** dp_table = new int*[size_a + 1];
for (int i = 0; i <= size_a; i++)
    dp_table[i] = new int[size_b + 1];

for (int i = 1; i <= size_a; i++)
    dp_table[i][0] = 0;
for (int j = 0; j <= size_b; j++)
    dp_table[0][j] = 0;

start = omp_get_wtime();
for (int i = 1; i <= size_a; i++) {
    for (int j = 1; j <= size_b; j++) {
        if (sequence_a[i] == sequence_b[j]) {
            dp_table[i][j] = dp_table[i - 1][j - 1] + 1;
        } else if (dp_table[i - 1][j] >= dp_table[i][j - 1]) {
            dp_table[i][j] = dp_table[i - 1][j];
        } else {
            dp_table[i][j] = dp_table[i][j - 1];
        }
    }
}
end = omp_get_wtime();
printf("\nSerial - Final answer: %d\n", dp_table[size_a][size_b]);
printf("Time: %f\n", end - start);

//Delete dp_table
for (int i = 0; i <= size_a; i++)
    delete [] dp_table[i];
delete [] dp_table;
}

...thought I'd add the test function

#include <cstdlib>
#include <stdio.h>

#include <omp.h>

void serialLCS(char * sequence_a, char * sequence_b, size_t size_a, size_t size_b);
void parallelLCS(char * sequence_a, char * sequence_b, size_t size_a, size_t size_b);

int main() {

size_t size_a;
size_t size_b;

printf("Enter size of sequence A: ");
scanf("%zd",&size_a);
printf("Enter size of sequence B: ");
scanf("%zd",&size_b);

//keep larger sequence in sequence_a
if (size_b > size_a) size_a ^= size_b ^= size_a ^= size_b;

char * sequence_a = new char[size_a + 1];
char * sequence_b = new char[size_b + 1];
sequence_a[0] = sequence_b[0] = '0';

const size_t alphabet_size = 12;
char A[alphabet_size] = {'A', 'T', 'G', 'C', 'Q', 'W', 'E', 'R', 'Y', 'U', 'I', 'O'};
char AA[alphabet_size] = {'T', 'C', 'A', 'G', 'R', 'E', 'W', 'Q', 'O', 'I', 'U', 'Y'};

for (size_t i = 1; i < size_a; i++) {
    sequence_a[i] = A[rand() % alphabet_size];
}
for (size_t i = 1; i < size_b; i++) {
    sequence_b[i] = AA[rand() % alphabet_size];
}

serialLCS(sequence_a, sequence_b, size_a, size_b);
parallelLCS(sequence_a, sequence_b, size_a, size_b);

delete [] sequence_a;
delete [] sequence_b;

return 0;
}
Was it helpful?

Solution

The problem is not with OpenMP but rather with the way you access the data in the parallel implementation. Even if you run the parallel version with one thread only, it is still about twice as slower.

Well, welcome to the world of non-cache-friendly data structures. Because of the diagonal dependence, you walk the matrix by diagonals but you still store it the usual way. The data access pattern is then strongly non-linear and hence very cache-unfriendly. Observe the amount of L1 and L2 cache misses when running your code in single-threaded mode on an old 16-core Xeon X7350 system:

L1 and L2 cache misses

The green portion of the process timeline represents the serial part of your code. The orange part is the (inactive because of the single-threaded execution) OpenMP parallel region. You can clearly see that the serial code is very cache friendly - not only the amount ot L2 cache misses is relatively low but the amount of L1 cache misses too. But in the parallel section of the code, because of the very large stride when walking the matrix diagonally, the cache is constantly being trashed and the amount of misses is sky high.

With two threads things get even worse. Elements from two adjacent diagonals that belong to the same matrix row are likely to fall into the same cache line. But one of the diagonals is processed by one thread and the other one by another thread. Hence your code runs into a huge amount of false sharing. Not to mention NUMA issues on modern multi-socket AMD64 or (post-)Nehalem systems.

The solution is to not simply walk the matrix by its diagonals but also to store the matrix in a skewed format, so that each diagonal occupies a continuous section in memory.

Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top