This code was used to reproduce the Intel MPI bug initializing XIOS server on MN3. It can be easily compiled by doing `mpicc bench.cpp -o bench` and then submitted to the scheduler.
/* skip C++ Binding for mpich , intel MPI */
#define MPICH_SKIP_MPICXX
/* skip C++ Binding for SGI MPI library */
#define MPI_NO_CPPBIND
/* skip C++ Binding for OpenMPI */
#define OMPI_SKIP_MPICXX
#include <mpi.h>
#include <stdio.h>
int main(int argc, char *argv[]){
int rank;
int world_size;
int recv_msgs = 0;
int bufferout[1];
void* bufferin;
int flag;
int receiving;
int msg_size;
int rank_src;
MPI_Status status;
MPI_Request request;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (world_size < 2)
{
printf("Please run with two processes.\n");fflush(stdout);
MPI_Finalize();
return 0;
}
if (rank == 0)
{
while (recv_msgs < world_size-1){
MPI_Iprobe(MPI_ANY_SOURCE,1,MPI_COMM_WORLD, &flag, &status);
if (flag == true){
rank_src = status.MPI_SOURCE;
MPI_Get_count(&status, MPI_INT, &msg_size);
bufferin=new int[msg_size];
MPI_Irecv(bufferin,msg_size,MPI_INT,rank_src,1,MPI_COMM_WORLD,&request);
receiving = true;
while (receiving){
MPI_Test(&request,&flag,&status);
if (flag==true)
{
rank_src = status.MPI_SOURCE;
MPI_Get_count(&status,MPI_CHAR,&msg_size);
delete [] bufferin;
receiving = false;
recv_msgs++;
printf("%d/%d Recv OK\n", recv_msgs, world_size-1);
}
}
}
}
printf("Complete!:Request:%d\n", request);
}
else{
bufferout[0] = (world_size * 10000) + rank;
MPI_Send(bufferout,sizeof(bufferout),MPI_INT,0,1,MPI_COMM_WORLD) ;
printf("SentOK:%d\n", rank);
}
printf("MPI_Finalize:Before:%d\n", rank);
MPI_Finalize();
printf("MPI_Finalize:After:%d\n", rank);
return 0;
}