Question

Problem: Solve Laplace Equation div2(u)=0 by iteration using finite differences.

Boundary Condition:u(x,0)=1 u(x,1)=u(0,y)=u(1,y)=0

The algorithm is:

u[i,j]= 0.25 * (u[i,j-1] + u[i,j+1] + u[i-1,j] + u[i+1,j])

Environment: Arch Linux + Gcc 4.8.2 & Intel Parallel Studio 2013 SP4.

The code of solving Laplace equation is as follows:

#include <iostream>
#include <cstdlib>
#include <cmath>
#include <time.h> 
#include <sys/time.h>
using namespace std;
double getTimeElapsed(struct timeval end, struct timeval start)
{
    return (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) / 1000000.00;
}
double max(double a,double b)
{
    if (a>b) return a;
    else return b;
}
int main(int argc, char *argv[])
{
    struct timeval t0, t1; 
    double htime;
    double **laplace,prev,qa=0,accu=10e-4;
    int i,j,rowcol,step=0; 
    if (argc != 3) cout << "Usage: hw4 [points at row] [accuracy]" << endl;
    else
    {
        rowcol=atoi(argv[1]);
        accu=atof(argv[2]);
        laplace = new double *[rowcol+1];
        for (i=0;i<=rowcol;i++) 
            laplace[i]=new double [rowcol+1];
        #pragma omp parallel for //MP init
        for (i=0;i<=rowcol;i++)
            for (j=0;j<=rowcol;j++)
            {
                if (j==0) laplace[i][j] = 1.0;
                else laplace[i][j] = 0.0;
            }
        gettimeofday(&t0, NULL);
        while(1)
        {
            #pragma omp parallel for shared(rowcol,laplace,prev) reduction (max:qa) //mp calculation
            for(i=1;i<=rowcol-1;i++)
                for(j=1;j<=rowcol-1;j++)
                {
                    prev=laplace[i][j];
                    laplace[i][j]=0.25*(laplace[i+1][j]+laplace[i-1][j]+laplace[i][j+1]+laplace[i][j-1]);
                    qa=max(qa,abs(laplace[i][j]-prev));
                }
            if (qa < accu)
            {
                gettimeofday(&t1, NULL);
                htime=getTimeElapsed(t1,t0);
                cout << "Done!" << endl << "Time used: " << htime << endl;
                for (i=0;i<=rowcol;i++) delete [] laplace[i];
                delete [] laplace;
                exit(0);
            }
            else 
            {
                step++;
                if (step%80==0) cout << "Iteration = " << step << " ,  Error= " << qa << endl;
                qa=0;
            }
        }
    }
    return 0;
}

I tested the program with 100x100 grid and 1e-06 accuracy.

Intel C++ finishes the program perfectly with 6000 iterations. It produces the same result as the sequential code.

But for GCC, it failed to converge.

I cannot figure out why.

PS: The program compiled by g++ runs but failed to converge as the Intel version did.

Winston

Was it helpful?

Solution

You have race conditions in j and prev. Make them private.

#pragma omp parallel for private(j) //MP init
for (i=0;i<=rowcol;i++)
    for (j=0;j<=rowcol;j++)

and

#pragma omp parallel for private(j,prev) reduction (max:qa) //mp calculation
for(i=1;i<=rowcol-1;i++)
    for(j=1;j<=rowcol-1;j++) {
        prev=laplace[i][j];
Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top