質問

I have some trouble to convert some variable from void* to MPI_Aint. Here is some part of the code :

C:
void myfunc_(MPI_Aint *out_ptr, ...)
...
void *ptr = mmap(...)
...
*out_ptr = (MPI_Aint) ptr;

Fortran :
#ifdef DOUBLE_PREC
  integer, parameter, public :: mytype = KIND(0.0D0)
  integer, parameter, public :: real_type = MPI_DOUBLE_PRECISION
#endif
INTEGER BSIZ, CORE_COMM, status
real(mytype), pointer :: SND
...
call myfunc(SND, BSIZ, real_type, CORE_COMM, status)

mmap is working but there in an error (no error when I comment the last line)

...
mmap succeeded 0x7fab7b490000
...
*** Process received signal ***
Signal: Segmentation fault (11)
Signal code: Address not mapped (1)
Failing at address: (nil)

Any idea to help? Below is the complete C function code :

void myfunc_(MPI_Aint *out_ptr, MPI_Fint *nelem, MPI_Fint *type,
            MPI_Fint *comm, MPI_Fint *ret)
{
MPI_Comm world;
int mype;

world = MPI_Comm_f2c(*comm);
MPI_Comm_rank(world, &mype);

char filename[20];

#define POSIX_SHM

int i,j;

int world_rank = -1, world_size = -1;
int mpi_result = MPI_SUCCESS;

int color = -1;
int ranks_per_node = -1;
MPI_Comm IntraNodeComm;

int node_shmem_bytes; 

mpi_result = MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
assert(mpi_result==MPI_SUCCESS);
mpi_result = MPI_Comm_size(MPI_COMM_WORLD, &world_size);
assert(mpi_result==MPI_SUCCESS);

if (world_rank==0)
{
    char * env_char;
    int units = 1;
    int num_count = 0;
    env_char = getenv("NODE_SHARED_MEMORY");
    if (env_char!=NULL)
    {
        if      ( NULL != strstr(env_char,"G") ) units = 1000000000;
        else if ( NULL != strstr(env_char,"M") ) units = 1000000;
        else if ( NULL != strstr(env_char,"K") ) units = 1000;
        else                                     units = 1;

        num_count = strspn(env_char, "0123456789");
        memset( &env_char[num_count], ' ', strlen(env_char)-num_count);

        node_shmem_bytes = units * atoi(env_char);
        printf("%7d: NODE_SHARED_MEMORY = %d bytes \n", world_rank, node_shmem_bytes );
    }
    else
    {
        node_shmem_bytes = getpagesize();
        printf("%7d: NODE_SHARED_MEMORY = %d bytes \n", world_rank, node_shmem_bytes );
    }
}
mpi_result = MPI_Bcast( &node_shmem_bytes, 1, MPI_INT, 0, MPI_COMM_WORLD );
assert(mpi_result==MPI_SUCCESS);

int node_shmem_count = node_shmem_bytes/sizeof(double);
node_shmem_count = (int) *nelem;
node_shmem_bytes = node_shmem_count * sizeof(double) * 2;

fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);

IntraNodeComm = world;

int subcomm_rank = -1;
mpi_result = MPI_Comm_rank(IntraNodeComm, &subcomm_rank);
assert(mpi_result==MPI_SUCCESS);

sprintf(filename,"/foo_%d_%d_%d",*nelem,*type,*comm);

#if defined(POSIX_SHM)
int fd;
if (subcomm_rank==0)
    fd = shm_open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR );

mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (subcomm_rank!=0)
    fd = shm_open(filename, O_RDWR, S_IRUSR | S_IWUSR );

if (fd<0) printf("%7d: shm_open failed: %d \n", world_rank, fd);
else      printf("%7d: shm_open succeeded: %d \n", world_rank, fd);
#elif defined(DEV_SHM)
int fd = open("/dev/shm/foo", O_RDWR | O_CREAT, S_IRUSR | S_IWUSR );
if (fd<0) printf("%7d: open failed: %d \n", world_rank, fd);
else      printf("%7d: open succeeded: %d \n", world_rank, fd);
#else
int fd = -1;
printf("%7d: no file backing \n", world_rank);
#endif
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (fd>=0 && subcomm_rank==0)
{
    int rc = ftruncate(fd, node_shmem_bytes);
    if (rc==0) printf("%7d: ftruncate succeeded \n", world_rank);
    else       printf("%7d: ftruncate failed \n", world_rank);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

#ifdef __bgp__
double * ptr = NULL;
_BGP_Personality_t pers;
Kernel_GetPersonality(&pers, sizeof(pers));

if( BGP_Personality_processConfig(&pers) == _BGP_PERS_PROCESSCONFIG_SMP )
{
    printf("SMP mode => MAP_PRIVATE | MAP_ANONYMOUS \n");
    ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0 );
}
else
{
    if (node_shmem_bytes>pers.Kernel_Config.SharedMemMB)
    {
        printf("node_shmem_bytes (%d) greater than pers.Kernel_Config.SharedMemMB (%d) - allocating the latter \n", 
               node_shmem_bytes, pers.Kernel_Config.SharedMemMB );
        node_shmem_bytes = pers.Kernel_Config.SharedMemMB;
    }
    ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
}
#else
void *ptr = mmap( NULL, node_shmem_bytes, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 );
#endif
if (ptr==NULL) printf("%7d: mmap failed \n", world_rank);
else           printf("%7d: mmap succeeded %p\n", world_rank,ptr);
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

mpi_result = MPI_Comm_size(IntraNodeComm, &ranks_per_node );
assert(mpi_result==MPI_SUCCESS);
if (0==subcomm_rank) printf("%7d: ranks_per_node = %d \n", world_rank, ranks_per_node);
fflush(stdout);

for (i=0; i<ranks_per_node; i++)
{
    if (i==subcomm_rank)
   {
        printf("%7d: subcomm_rank %d setting the buffer \n", world_rank, subcomm_rank );
        //for (j=0; j<node_shmem_count; j++ ) ptr[j] = (double)i;
        printf("%7d: memset succeeded \n", world_rank);

        int rc = msync(ptr, node_shmem_bytes, MS_INVALIDATE | MS_SYNC);
        if (rc==0) printf("%7d: msync succeeded, %p \n", world_rank, ptr);
        else       printf("%7d: msync failed \n", world_rank);
    }

    fflush(stdout);
    mpi_result = MPI_Barrier(MPI_COMM_WORLD);
    assert(mpi_result==MPI_SUCCESS);

    //printf("%7d: ptr = %lf ... %lf \n", world_rank, ptr[0], ptr[node_shmem_count-1]);
    fflush(stdout);

    mpi_result = MPI_Barrier(MPI_COMM_WORLD);
    assert(mpi_result==MPI_SUCCESS);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

if (ptr!=NULL)
{
    int rc = munmap(ptr, node_shmem_bytes);
    if (rc==0) printf("%7d: munmap succeeded %p, %d\n", world_rank,ptr, (MPI_Aint) ptr);
    else       printf("%7d: munmap failed \n", world_rank);
}
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

#if defined(POSIX_SHM)
//if (fd>=0)
if (fd>=0 && subcomm_rank==0)
{
    int rc = -1;

    rc = shm_unlink(filename);
    if (rc==0) printf("%7d: shm_unlink succeeded %p\n", world_rank,ptr);
    else       printf("%7d: shm_unlink failed \n", world_rank);
}
#elif defined(DEV_SHM)
if (fd>=0 && subcomm_rank==0)
{
    int rc = -1;

    rc = ftruncate(fd, 0);
    if (rc==0) printf("%7d: ftruncate succeeded \n", world_rank);
    else       printf("%7d: ftruncate failed \n", world_rank);

    rc = close(fd);
    if (rc==0) printf("%7d: close succeeded \n", world_rank);
    else       printf("%7d: close failed \n", world_rank);
}
#endif
fflush(stdout);
mpi_result = MPI_Barrier(MPI_COMM_WORLD);
assert(mpi_result==MPI_SUCCESS);

*out_ptr = (MPI_Aint) ptr;

}
役に立ちましたか?

解決

I meant to write you a short comment but it grew somehow a little bit over the limit...

The MPI standard body and implementors have struggled for ages with this C to Fortran memory passing problem. Why not reuse their efforts instead of rediscovering the fact, that a round wheel works better than a square one?

Just take a look at the MPI standard function MPI_ALLOC_MEM which is supposed to allocate special memory in MPI and return it to the user code. The MPI-2.2 standard defines its Fortran interface as:

MPI_ALLOC_MEM(SIZE, INFO, BASEPTR, IERROR)
    INTEGER INFO, IERROR
    INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR

The modern Fortran 2008 interface in MPI-3.0 uses ISO_C_BINDING and comes as:

MPI_Alloc_mem(size, info, baseptr, ierror)
    USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR
    INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size
    TYPE(MPI_Info), INTENT(IN) :: info
    TYPE(C_PTR), INTENT(OUT) :: baseptr
    INTEGER, OPTIONAL, INTENT(OUT) :: ierror

The standard gives the following example on how to use the call:

USE mpi_f08
USE, INTRINSIC :: ISO_C_BINDING
TYPE(C_PTR) :: p
REAL, DIMENSION(:,:), POINTER :: a
INTEGER, DIMENSION(2) :: shape
INTEGER(KIND=MPI_ADDRESS_KIND) :: size
shape = (/100,100/)
size = 4 * shape(1) * shape(2)
CALL MPI_Alloc_mem(size,MPI_INFO_NULL,p,ierr)
CALL C_F_POINTER(p, a, shape)
...
a(3,5) = 2.71
...
CALL MPI_Free_mem(a, ierr)

Basically the the C_F_POINTER routine from ISO_C_BINDING binds the C pointer to the Fortran pointer and then the memory, pointed by the former becomes available through the latter.

This is how Open MPI implements the F08 MPI_Alloc_mem:

subroutine MPI_Alloc_mem_f08(size,info,baseptr,ierror)
   use, intrinsic :: ISO_C_BINDING, only : C_PTR
   use :: mpi_f08_types, only : MPI_Info, MPI_ADDRESS_KIND
   use :: mpi_f08, only : ompi_alloc_mem_f
   implicit none
   INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: size
   TYPE(MPI_Info), INTENT(IN) :: info
   TYPE(C_PTR), INTENT(OUT) :: baseptr
   INTEGER, OPTIONAL, INTENT(OUT) :: ierror
   integer :: c_ierror

   call ompi_alloc_mem_f(size,info%MPI_VAL,baseptr,c_ierror)
   if (present(ierror)) ierror = c_ierror

end subroutine MPI_Alloc_mem_f08

ompi_alloc_mem_f is a C function that interfaces the internal C implementation to Fortran:

void ompi_alloc_mem_f(MPI_Aint *size, MPI_Fint *info, char *baseptr, MPI_Fint *ierr)
{
    int ierr_c;
    MPI_Info c_info = MPI_Info_f2c(*info);

    ierr_c = MPI_Alloc_mem(*size, c_info, baseptr);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
}

So you can see that the TYPE(C_PTR) baseptr argument from Fortran simply comes in as a pointer, passed (as usual) by reference. This is not quite evident here, since the MPI standard defines the last argument to MPI_Alloc_mem, where a pointer to the allocated memory is returned, as void * while it is in fact a void pointer passed by reference (i.e. void **). Also the dummy baseptr argument is actually void ** but is declared simply as char * because of reasons :) The same function is used to implement the old Fortran interface, so the char *baseptr maps to an INTEGER(KIND=MPI_ADDRESS_KIND) actual argument.

The lesson is that while MPI_ADDRESS_KIND integers in Fortran are meant to store both pointer and pointer difference values, you should not use MPI_Aint as pointer argument type in C but rather regular double pointers like void **.

他のヒント

I am unsure whether the line you can comment out to avoid the problem is the following or not:

*out_ptr = (MPI_Aint) ptr;

Your dereferencing is not consistent.

ptr is a double * and not directly convertible to an MPI_Aint.

Perhaps you want

*out_ptr = *(MPI_Aint *)ptr;

if the the caller is passing in a pointer (as out_ptr,) to a location that you want to store the single MPI_Aint found at *ptr. However that does not make sense in light of your allocating node_shmem_bytes so perhaps:

out_ptr = (MPI_Aint *)ptr

which would set (the local to myfunc copy) out_ptr to a block of MPI_Aint objects, but the caller would not see that. I don't know the Fortran -> C calling conventions being used, but perhaps you want to pass a pointer to an MPI_Aint * that the C program can place ptr in?

ライセンス: CC-BY-SA帰属
所属していません StackOverflow
scroll top