LAM/MPI logo

LAM/MPI General User's Mailing List Archives

  |   Home   |   Download   |   Documentation   |   FAQ   |   all just in this list

From: John Korah (j_korah_at_[hidden])
Date: 2005-03-21 10:30:51


I have set the env variable to to
LAM_MPI_THREAD_LEVEL=2
(MPI_THREAD_SERIALIZED). Wont that take care of it?
I have attached the code

Thanks
John

#include <stdio.h>
#include <iostream.h>
#include "mpi.h"
#include <stdlib.h>
#include <math.h>

#include <unistd.h>
#include <pthread.h>
#include <errno.h>
#include <sys/time.h>
#define DATA_TYPE int
#define DATA_TYPE_MPI MPI_INT

double start_time,end_time,startTimeL2,endTimeL2;
int ret=100;
void *retval;
void *send(void *);
void *receive(void *);
int delay(int);

struct sendStruct
{
        int latency;//latency
        int dest; //destination to send packet
        int tag;
        DATA_TYPE *sendBuf;
        int sendSize;
        MPI_Request *request;
        MPI_Status *status;
};

struct recvStruct
{
        int src;
        int tag;
        int recvSize;
        void *recvBuf;
        MPI_Request *request;
        MPI_Status *status;
};
int main(int argc, char *argv[])
{
    
    int numprocs, myrank;
    
        //MPI initialization statements
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
    int noN=numprocs;
    int threadLevel;
    MPI_Query_thread(&threadLevel);
    cout<<"Proc: "<< myrank << "The thread level
"<<threadLevel <<": " << MPI_THREAD_SERIALIZED<<endl;
    //MPI_Request requestA[noN-1];
    MPI_Request requestRecv[noN-1];
    MPI_Request requestSend[noN-1];
    MPI_Status statusSend[noN-1];
    MPI_Status statusRecv[noN-1];
        
        
        /*Generating SN info*/
    int i=myrank;
        sendStruct sendParam[noN-1];
        recvStruct recvParam[noN-1];
        
                
        //Buffers;
        DATA_TYPE *sendBuf[noN-1];
        DATA_TYPE *recvBuf[noN-1];
        //create buffers
        for(int t1=0;t1<noN-1;++t1)
        {
                sendBuf[t1]=new DATA_TYPE[100];
                recvBuf[t1]=new DATA_TYPE[100];
        }
        
        //Buffer size
        int recvSize;
        int sendSize;
        recvSize=100;
        sendSize=100;
        
        //latency
        int lat=1;
        //send and receive processors
        int sendProc,recvProc;
        
        sendProc=i+1;
        recvProc=i-1;
        if(sendProc > noN-1)
                sendProc = 0;
        if(recvProc < 0)
                recvProc = noN-1;
                        
        //init threads
        pthread_t waitThreadSend[noN-1];
        pthread_t waitThreadRecv[noN-1];
        
        int retv;
        
        start_time=MPI_Wtime();
                
        for(int t1=0;t1<noN-1;++t1)
        {
                sendParam[t1].latency = lat;
                sendParam[t1].dest = sendProc++;
                sendParam[t1].tag = i;
                sendParam[t1].sendBuf = sendBuf[t1];
                sendParam[t1].sendSize = sendSize;
                sendParam[t1].request = &requestSend[t1];
                sendParam[t1].status = &statusSend[t1];
                        
        
retv=pthread_create(&waitThreadSend[t1],NULL,&send,(void
*)&sendParam[t1]);
                if(sendProc > noN-1)
                        sendProc = 0;
                recvParam[t1].src = recvProc;
                recvParam[t1].tag = recvProc--;
                recvParam[t1].recvSize = recvSize;
                recvParam[t1].recvBuf = recvBuf[t1];
                recvParam[t1].request = &requestRecv[t1];
                recvParam[t1].status = &statusRecv[t1];
        
retv=pthread_create(&waitThreadRecv[t1],NULL,&receive,(void
*)&recvParam[t1]);
                if(recvProc < 0)
                        recvProc = noN-1;
        }
        
        cout<<"Processor: " << myrank <<" finished "<<endl;
        for(int t1=0; t1<noN-1;++t1)
        {
                pthread_join(waitThreadRecv[t1],NULL);
                cout<<"Processor :"<<myrank << " killed " << t1
<<endl;
        }
        end_time=MPI_Wtime();
        cout<<"Processor " << myrank <<" The Duration of
simulation: "<<end_time-start_time << endl;
        pthread_exit(NULL);
        MPI_Finalize();
        
}

void *send(void *ptr)
{
       sendStruct a= *((sendStruct *)ptr);
       cout<<"latency "<< a.latency << endl;
      
MPI_Send(a.sendBuf,a.sendSize,DATA_TYPE_MPI,a.dest,a.tag,MPI_COMM_WORLD);
                                                        
                                                               
       cout<<"Finished sending Processor: "<<a.tag<<"
thread "<<a.dest<<endl;
pthread_exit(NULL);
}

void *receive(void *ptr)
{
recvStruct a= *((recvStruct *)ptr);
        
MPI_Irecv(a.recvBuf,a.recvSize,DATA_TYPE_MPI,a.src,a.tag,MPI_COMM_WORLD,a.request);
                cout<<"Finished receveing Processor: "<<a.src<<"
thread "<<a.tag<<endl;
                MPI_Wait(a.request,a.status);
            pthread_exit(NULL);
}

int delay(int amount)
   {
    sleep(amount);
    return (0);
  }
--- David Cronk <cronk_at_[hidden]> wrote:
> Not enough information. Do you have locks around
> the send and Irecv?
> If you have two threads enter an MPI call
> concurrently you may get
> deadlock. You must assure this never happens.
>
> Dave.
>
> John Korah wrote:
> > Hi,
> >
> > I have a multithreaded application. I use seperate
> > threads to do MPI_Send and MPI_Irecv. Before each
> > MPI_Send() i use a sleep() function to suspend the
> > thread (to simulate network latency). The problem
> is
> > the threads are not moving beyond sleep(). When I
> set
> > sleep(o) or sleep(t) where 0.999<t<0, it works
> fine...
> >
> >
> > Thanks
> > John
> >
> >
> >
> >
> > __________________________________
> > Do you Yahoo!?
> > Yahoo! Small Business - Try our new resources
> site!
> > http://smallbusiness.yahoo.com/resources/
> > _______________________________________________
> > This list is archived at
> http://www.lam-mpi.org/MailArchives/lam/
> >
>
> --
> Dr. David Cronk, Ph.D. phone:
> (865) 974-3735
> Research Leader fax:
> (865) 974-8296
> Innovative Computing Lab
> http://www.cs.utk.edu/~cronk
> University of Tennessee, Knoxville
> _______________________________________________
> This list is archived at
> http://www.lam-mpi.org/MailArchives/lam/
>

                
__________________________________
Do you Yahoo!?
Make Yahoo! your home page
http://www.yahoo.com/r/hs