hello support,
I've got a mpi code. I compiled it using LAM 6.3
When I'm trying to run this benchmark program,
I got these errors.
MPI_Comm_rank: invalid communicator (rank 4, MPI_COMM_WORLD)
MPI_Comm_rank: invalid communicator (rank 0, MPI_COMM_WORLD)
MPI_Comm_rank: invalid communicator (rank 1, MPI_COMM_WORLD)
MPI_Comm_rank: invalid communicator (rank 2, MPI_COMM_WORLD)
MPI_Comm_rank: invalid communicator (rank 3, MPI_COMM_WORLD)
Rank (1, MPI_COMM_WORLD): Call stack within LAM:
Rank (1, MPI_COMM_WORLD): - MPI_Comm_rank()
Rank (1, MPI_COMM_WORLD): - main()
Rank (0, MPI_COMM_WORLD): Call stack within LAM:
Rank (0, MPI_COMM_WORLD): - MPI_Comm_rank()
Rank (0, MPI_COMM_WORLD): - main()
Rank (2, MPI_COMM_WORLD): Call stack within LAM:
Rank (2, MPI_COMM_WORLD): - MPI_Comm_rank()
Rank (2, MPI_COMM_WORLD): - main()
Rank (4, MPI_COMM_WORLD): Call stack within LAM:
Rank (4, MPI_COMM_WORLD): - MPI_Comm_rank()
Rank (4, MPI_COMM_WORLD): - main()
Rank (3, MPI_COMM_WORLD): Call stack within LAM:
Rank (3, MPI_COMM_WORLD): - MPI_Comm_rank()
Rank (3, MPI_COMM_WORLD): - main()
----------------------------------------------------------------------------
-
One of the processes started by mpirun has exited with a nonzero exit
code. This typically indicates that the process finished in error.
If your process did not finish in error, be sure to include a "return
0" or "exit(0)" in your C code before exiting the application.
PID 20675 failed on node n1 with exit status 1.
----------------------------------------------------------------------------
-
I'm running on an Intel Linux Cluster, using the Lahey Fijitsu
Compiler (v.6.5.1).
I'm attaching the output from the code. I would
absolutely appreciate any advice on what's happening
with the code.
sincerely,
NDONG NNA Guitry-E.
include '/usr/local/mpich/mpich-1.2.2/include/mpif.h'
C -- Variables --
integer nproc,rank,rc,nmax,i,j,k,ierr,max
real y,r,t0,t1,tt, y2
C integer status(MPI_STATUS_SIZE)
c -- nb est le nombre de processus --
c Normal MPI Startup
call MPI_INIT(ierr)
call MPI_COMM_RANK(MPI_COMM_WORLD, rank,ierr)
call MPI_COMM_SIZE(MPI_COMM_WORLD, nproc,ierr)
print *,"Process ", rank, " of ",nproc, " is alive "
max=10000
nmax=1
tt=0.00000008
c broadcast max
call MPI_BCAST(max,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
c check for quit signal
if (max .le. 10000) goto 1000
do n=1,nmax
if (rank .eq. 0) then
202 write(*,'(a)')' Test de performance du fortran Double precision'
endif
c---debut du comptage du temps
call cpu_time(t0)
do k = 1, 100
do j=1,40
r=10.50000008
y=0.50000008*r
do i=1+rank,max,nproc
xi=dble(i)
y=2.00000008*xi*xi+((xi/(y*y)+r)*y*r)
y2=1.00000008/y
y=r/y*(y+r-y*r)*y2*y2
if(y.gt.r.or.y.le.0.00000008) y=0.50000008*r
r=r+0.01000008
enddo
print *, "Process ", rank, " , r partielle : ", r
print *, "Process ", rank, " , y partielle : ", y
c Collecter toutes les sommes partielles
call MPI_REDUCE(r, rtotal, 1, MPI_REAL, MPI_SUM, 0,
$ MPI_COMM_WORLD, ierr)
c Collecter toutes les produits partiels
call MPI_REDUCE(y, ytotal, 1, MPI_REAL, MPI_PROD, 0,
$ MPI_COMM_WORLD, ierr)
enddo
enddo
call cpu_time(t1)
if (rank .eq. 0) then
write(*,28) ytotal, rtotal
28 format(' ytotal vaut :',f20.9,' rtotal vaut :',f20.9)
write(*,27) t1-t0
27 format(' benmark vax en',f10.3,' secondes')
endif
tt=tt+t1-t0
enddo
if (rank .eq. 0) then
write(*,'(a,f10.3)') ' CPU moyen : ',tt/real(nmax)
endif
1000 call MPI_FINALIZE(rc)
stop
end
|