source: LMDZ4/branches/LMDZ4-dev/libf/dyn3dpar/parallel.F90 @ 1264

Last change on this file since 1264 was 1222, checked in by Ehouarn Millour, 15 years ago

Changes and cleanups to enable compiling without physics
and without ioipsl.

IOIPSL related cleanups:

  • bibio/writehist.F encapsulate the routine (which needs IOIPSL to function)

with #ifdef IOIPSL flag.

  • dyn3d/abort_gcm.F, dyn3dpar/abort_gcm.F and dyn3dpar/getparam.F90: use ioipsl_getincom module when not compiling with IOIPSL library, in order to always be able to use getin() routine.
  • removed unused "use IOIPSL" in dyn3dpar/guide_p_mod.F90
  • calendar related issue: Initialize day_ref and annee_ref in iniacademic.F (i.e. when they are not read from start.nc file)

Earth-specific programs/routines/modules:
create_etat0.F, fluxstokenc.F, limit_netcdf.F, startvar.F
(versions in dyn3d and dyn3dpar)
These routines and modules, which by design and porpose are made to function with
Earth physics are encapsulated with #CPP_EARTH cpp flag.

Earth-specific instructions:

  • calls to qminimum (specific treatment of first 2 tracers, i.e. water) in dyn3d/caladvtrac.F, dyn3d/integrd.F, dyn3dpar/caladvtrac_p.F, dyn3dpar/integrd_p.F only if (planet_type == 'earth')

Interaction with parallel physics:

  • routine dyn3dpar/parallel.F90 uses "surface_data" module (which is in the physics ...) to know value of "type_ocean" . Encapsulated that with #ifdef CPP_EARTH and set to a default type_ocean="dummy" otherwise.
  • So far, only Earth physics are parallelized, so all the interaction between parallel dynamics and parallel physics are encapsulated with #ifdef CCP_EARTH (this way we can run parallel without any physics). The (dyn3dpar) routines which contains such interaction are: bands.F90, gr_dyn_fi_p.F, gr_fi_dyn_p.F, mod_interface_dyn_phys.F90 This should later (when improving dyn/phys interface) be encapsulated with a more general and appropriate #ifdef CPP_PHYS cpp flag.

I checked that these changes do not alter results (on the simple
32x24x11 bench) on Ciclad (seq & mpi), Brodie (seq, mpi & omp) and
Vargas (seq, mpi & omp).

EM

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.6 KB
RevLine 
[1222]1!
2! $Id: parallel.F90 1222 2009-08-07 11:48:33Z fairhead $
3!
[630]4  module parallel
[806]5  USE mod_const_mpi
[1000]6   
7    LOGICAL,SAVE :: using_mpi
8    LOGICAL,SAVE :: using_omp
9   
[630]10    integer, save :: mpi_size
11    integer, save :: mpi_rank
12    integer, save :: jj_begin
13    integer, save :: jj_end
14    integer, save :: jj_nb
15    integer, save :: ij_begin
16    integer, save :: ij_end
17    logical, save :: pole_nord
18    logical, save :: pole_sud
19   
20    integer, allocatable, save, dimension(:) :: jj_begin_para
21    integer, allocatable, save, dimension(:) :: jj_end_para
22    integer, allocatable, save, dimension(:) :: jj_nb_para
[764]23    integer, save :: OMP_CHUNK
[985]24    integer, save :: omp_rank
25    integer, save :: omp_size 
26!$OMP THREADPRIVATE(omp_rank)
27
[630]28 contains
29 
30    subroutine init_parallel
31    USE vampir
32    implicit none
[1000]33#ifdef CPP_MPI
34      include 'mpif.h'
35#endif
36#include "dimensions.h"
37#include "paramet.h"
[1222]38#include "iniprint.h"
39
[630]40      integer :: ierr
41      integer :: i,j
42      integer :: type_size
43      integer, dimension(3) :: blocklen,type
[985]44      integer :: comp_id
[1000]45
46#ifdef CPP_OMP   
[985]47      INTEGER :: OMP_GET_NUM_THREADS
48      EXTERNAL OMP_GET_NUM_THREADS
49      INTEGER :: OMP_GET_THREAD_NUM
50      EXTERNAL OMP_GET_THREAD_NUM
51#endif 
[764]52
[1000]53#ifdef CPP_MPI
54       using_mpi=.TRUE.
55#else
56       using_mpi=.FALSE.
57#endif
58     
[1089]59
60#ifdef CPP_OMP
61       using_OMP=.TRUE.
62#else
63       using_OMP=.FALSE.
64#endif
65     
[630]66      call InitVampir
[1000]67     
68      IF (using_mpi) THEN
69#ifdef CPP_MPI
70        call MPI_COMM_SIZE(COMM_LMDZ,mpi_size,ierr)
71        call MPI_COMM_RANK(COMM_LMDZ,mpi_rank,ierr)
72#endif
73      ELSE
74        mpi_size=1
75        mpi_rank=0
76      ENDIF
[630]77 
78     
79      allocate(jj_begin_para(0:mpi_size-1))
80      allocate(jj_end_para(0:mpi_size-1))
81      allocate(jj_nb_para(0:mpi_size-1))
82     
83      do i=0,mpi_size-1
84        jj_nb_para(i)=(jjm+1)/mpi_size
85        if ( i < MOD((jjm+1),mpi_size) ) jj_nb_para(i)=jj_nb_para(i)+1
86       
87        if (jj_nb_para(i) <= 2 ) then
88         
[1222]89         write(lunout,*)"Arret : le nombre de bande de lattitude par process est trop faible (<2)."
90         write(lunout,*)" ---> diminuez le nombre de CPU ou augmentez la taille en lattitude"
[630]91         
[1000]92#ifdef CPP_MPI
93          IF (using_mpi) call MPI_ABORT(COMM_LMDZ,-1, ierr)
94#endif         
[630]95        endif
96       
97      enddo
98     
99!      jj_nb_para(0)=11
100!      jj_nb_para(1)=25
101!      jj_nb_para(2)=25
102!      jj_nb_para(3)=12     
103
104      j=1
105     
106      do i=0,mpi_size-1
107       
108        jj_begin_para(i)=j
109        jj_end_para(i)=j+jj_Nb_para(i)-1
110        j=j+jj_Nb_para(i)
111     
112      enddo
113     
114      jj_begin = jj_begin_para(mpi_rank)
115      jj_end   = jj_end_para(mpi_rank)
116      jj_nb    = jj_nb_para(mpi_rank)
117     
118      ij_begin=(jj_begin-1)*iip1+1
119      ij_end=jj_end*iip1
120     
121      if (mpi_rank.eq.0) then
122        pole_nord=.TRUE.
123      else
124        pole_nord=.FALSE.
125      endif
126     
127      if (mpi_rank.eq.mpi_size-1) then
128        pole_sud=.TRUE.
129      else
130        pole_sud=.FALSE.
131      endif
132       
[1222]133      write(lunout,*)"init_parallel: jj_begin",jj_begin
134      write(lunout,*)"init_parallel: jj_end",jj_end
135      write(lunout,*)"init_parallel: ij_begin",ij_begin
136      write(lunout,*)"init_parallel: ij_end",ij_end
[985]137
138!$OMP PARALLEL
139
[1000]140#ifdef CPP_OMP
[985]141!$OMP MASTER
142        omp_size=OMP_GET_NUM_THREADS()
143!$OMP END MASTER
144        omp_rank=OMP_GET_THREAD_NUM()   
145#else   
146        omp_size=1
147        omp_rank=0
148#endif
149!$OMP END PARALLEL         
[630]150   
151    end subroutine init_parallel
152
153   
154    subroutine SetDistrib(jj_Nb_New)
155    implicit none
156
[792]157#include "dimensions.h"
158#include "paramet.h"
[630]159
160      INTEGER,dimension(0:MPI_Size-1) :: jj_Nb_New
161      INTEGER :: i 
162 
163      jj_Nb_Para=jj_Nb_New
164     
165      jj_begin_para(0)=1
166      jj_end_para(0)=jj_Nb_Para(0)
167     
168      do i=1,mpi_size-1
169       
170        jj_begin_para(i)=jj_end_para(i-1)+1
171        jj_end_para(i)=jj_begin_para(i)+jj_Nb_para(i)-1
172     
173      enddo
174     
175      jj_begin = jj_begin_para(mpi_rank)
176      jj_end   = jj_end_para(mpi_rank)
177      jj_nb    = jj_nb_para(mpi_rank)
178     
179      ij_begin=(jj_begin-1)*iip1+1
180      ij_end=jj_end*iip1
181
182    end subroutine SetDistrib
183
184
185
186   
187    subroutine Finalize_parallel
[764]188#ifdef CPP_COUPLE
189    use mod_prism_proto
190#endif
[1222]191#ifdef CPP_EARTH
192! Ehouarn: surface_data module is in 'phylmd' ...
[995]193      use surface_data, only : type_ocean
[884]194      implicit none
[1222]195#else
196      implicit none
197! without the surface_data module, we declare (and set) a dummy 'type_ocean'
198      character(len=6),parameter :: type_ocean="dummy"
199#endif
200! #endif of #ifdef CPP_EARTH
[630]201
[884]202      include "dimensions.h"
203      include "paramet.h"
[1000]204#ifdef CPP_MPI
205      include 'mpif.h'
206#endif     
207
[630]208      integer :: ierr
209      integer :: i
210      deallocate(jj_begin_para)
211      deallocate(jj_end_para)
212      deallocate(jj_nb_para)
[764]213
[995]214      if (type_ocean == 'couple') then
[764]215#ifdef CPP_COUPLE
[884]216         call prism_terminate_proto(ierr)
217         IF (ierr .ne. PRISM_Ok) THEN
218            call abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
219         endif
220#endif
221      else
[1000]222#ifdef CPP_MPI
223         IF (using_mpi) call MPI_FINALIZE(ierr)
224#endif
[884]225      end if
[630]226     
227    end subroutine Finalize_parallel
[764]228       
[630]229    subroutine Pack_Data(Field,ij,ll,row,Buffer)
230    implicit none
231
[792]232#include "dimensions.h"
233#include "paramet.h"
[630]234
235      integer, intent(in) :: ij,ll,row
236      real,dimension(ij,ll),intent(in) ::Field
237      real,dimension(ll*iip1*row), intent(out) :: Buffer
238           
239      integer :: Pos
240      integer :: i,l
241     
242      Pos=0
243      do l=1,ll
244        do i=1,row*iip1
245          Pos=Pos+1
246          Buffer(Pos)=Field(i,l)
247        enddo
248      enddo
249     
250    end subroutine Pack_data
251     
252    subroutine Unpack_Data(Field,ij,ll,row,Buffer)
253    implicit none
254
[792]255#include "dimensions.h"
256#include "paramet.h"
[630]257
258      integer, intent(in) :: ij,ll,row
259      real,dimension(ij,ll),intent(out) ::Field
260      real,dimension(ll*iip1*row), intent(in) :: Buffer
261           
262      integer :: Pos
263      integer :: i,l
264     
265      Pos=0
266     
267      do l=1,ll
268        do i=1,row*iip1
269          Pos=Pos+1
270          Field(i,l)=Buffer(Pos)
271        enddo
272      enddo
273     
274    end subroutine UnPack_data
[1000]275
276   
277    SUBROUTINE barrier
278    IMPLICIT NONE
279#ifdef CPP_MPI
280    INCLUDE 'mpif.h'
281#endif
282    INTEGER :: ierr
283   
284!$OMP CRITICAL (MPI)     
285#ifdef CPP_MPI
286      IF (using_mpi) CALL MPI_Barrier(COMM_LMDZ,ierr)
287#endif
288!$OMP END CRITICAL (MPI)
289   
290    END SUBROUTINE barrier
291       
[630]292     
293    subroutine exchange_hallo(Field,ij,ll,up,down)
294    USE Vampir
295    implicit none
[792]296#include "dimensions.h"
297#include "paramet.h"   
[1000]298#ifdef CPP_MPI
[630]299    include 'mpif.h'
[1000]300#endif   
[630]301      INTEGER :: ij,ll
302      REAL, dimension(ij,ll) :: Field
303      INTEGER :: up,down
304     
305      INTEGER :: ierr
306      LOGICAL :: SendUp,SendDown
307      LOGICAL :: RecvUp,RecvDown
308      INTEGER, DIMENSION(4) :: Request
[1000]309#ifdef CPP_MPI
[630]310      INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: Status
[1000]311#else
312      INTEGER, DIMENSION(1,4) :: Status
313#endif
[630]314      INTEGER :: NbRequest
315      REAL, dimension(:),allocatable :: Buffer_Send_up,Buffer_Send_down
316      REAL, dimension(:),allocatable :: Buffer_Recv_up,Buffer_Recv_down
317      INTEGER :: Buffer_size     
[985]318
[1000]319      IF (using_mpi) THEN
320
321        CALL barrier
[630]322     
[1000]323        call VTb(VThallo)
324     
325        SendUp=.TRUE.
326        SendDown=.TRUE.
327        RecvUp=.TRUE.
328        RecvDown=.TRUE.
329         
330        IF (pole_nord) THEN
331          SendUp=.FALSE.
332          RecvUp=.FALSE.
333        ENDIF
334   
335        IF (pole_sud) THEN
336          SendDown=.FALSE.
337          RecvDown=.FALSE.
338        ENDIF
[630]339       
[1000]340        if (up.eq.0) then
341          SendDown=.FALSE.
342          RecvUp=.FALSE.
343        endif
[630]344     
[1000]345        if (down.eq.0) then
346          SendUp=.FALSE.
347          RecvDown=.FALSE.
348        endif
[630]349     
[1000]350        NbRequest=0
[630]351 
[1000]352        IF (SendUp) THEN
353          NbRequest=NbRequest+1
354          buffer_size=down*iip1*ll
355          allocate(Buffer_Send_up(Buffer_size))
356          call PACK_Data(Field(ij_begin,1),ij,ll,down,Buffer_Send_up)
[985]357!$OMP CRITICAL (MPI)
[1000]358#ifdef CPP_MPI
359          call MPI_ISSEND(Buffer_send_up,Buffer_Size,MPI_REAL8,MPI_Rank-1,1,     &
360                          COMM_LMDZ,Request(NbRequest),ierr)
361#endif
[985]362!$OMP END CRITICAL (MPI)
[1000]363        ENDIF
[630]364 
[1000]365        IF (SendDown) THEN
366          NbRequest=NbRequest+1
367           
368          buffer_size=up*iip1*ll
369          allocate(Buffer_Send_down(Buffer_size))
370          call PACK_Data(Field(ij_end+1-up*iip1,1),ij,ll,up,Buffer_send_down)
[630]371       
[985]372!$OMP CRITICAL (MPI)
[1000]373#ifdef CPP_MPI
374          call MPI_ISSEND(Buffer_send_down,Buffer_Size,MPI_REAL8,MPI_Rank+1,1,     &
375                          COMM_LMDZ,Request(NbRequest),ierr)
376#endif
[985]377!$OMP END CRITICAL (MPI)
[1000]378        ENDIF
[630]379   
380 
[1000]381        IF (RecvUp) THEN
382          NbRequest=NbRequest+1
383          buffer_size=up*iip1*ll
384          allocate(Buffer_recv_up(Buffer_size))
[630]385             
[985]386!$OMP CRITICAL (MPI)
[1000]387#ifdef CPP_MPI
388          call MPI_IRECV(Buffer_recv_up,Buffer_size,MPI_REAL8,MPI_Rank-1,1,  &
389                          COMM_LMDZ,Request(NbRequest),ierr)
390#endif
[985]391!$OMP END CRITICAL (MPI)
[630]392     
393       
[1000]394        ENDIF
[630]395 
[1000]396        IF (RecvDown) THEN
397          NbRequest=NbRequest+1
398          buffer_size=down*iip1*ll
399          allocate(Buffer_recv_down(Buffer_size))
[630]400       
[985]401!$OMP CRITICAL (MPI)
[1000]402#ifdef CPP_MPI
403          call MPI_IRECV(Buffer_recv_down,Buffer_size,MPI_REAL8,MPI_Rank+1,1,     &
404                          COMM_LMDZ,Request(NbRequest),ierr)
405#endif
[985]406!$OMP END CRITICAL (MPI)
[630]407       
[1000]408        ENDIF
[630]409 
[1000]410#ifdef CPP_MPI
411        if (NbRequest > 0) call MPI_WAITALL(NbRequest,Request,Status,ierr)
412#endif
413        IF (RecvUp)  call Unpack_Data(Field(ij_begin-up*iip1,1),ij,ll,up,Buffer_Recv_up)
414        IF (RecvDown) call Unpack_Data(Field(ij_end+1,1),ij,ll,down,Buffer_Recv_down) 
[630]415
[1000]416        call VTe(VThallo)
417        call barrier
418     
419      ENDIF  ! using_mpi
420     
[630]421      RETURN
422     
423    end subroutine exchange_Hallo
424   
[985]425
[630]426    subroutine Gather_Field(Field,ij,ll,rank)
427    implicit none
[792]428#include "dimensions.h"
[1222]429#include "paramet.h"
430#include "iniprint.h"
[1000]431#ifdef CPP_MPI
[630]432    include 'mpif.h'
[1000]433#endif   
[630]434      INTEGER :: ij,ll,rank
435      REAL, dimension(ij,ll) :: Field
436      REAL, dimension(:),allocatable :: Buffer_send   
437      REAL, dimension(:),allocatable :: Buffer_Recv
438      INTEGER, dimension(0:MPI_Size-1) :: Recv_count, displ
439      INTEGER :: ierr
440      INTEGER ::i
441     
[1000]442      IF (using_mpi) THEN
[985]443
[1000]444        if (ij==ip1jmp1) then
445           allocate(Buffer_send(iip1*ll*(jj_end-jj_begin+1)))
446           call Pack_Data(Field(ij_begin,1),ij,ll,jj_end-jj_begin+1,Buffer_send)
447        else if (ij==ip1jm) then
448           allocate(Buffer_send(iip1*ll*(min(jj_end,jjm)-jj_begin+1)))
449           call Pack_Data(Field(ij_begin,1),ij,ll,min(jj_end,jjm)-jj_begin+1,Buffer_send)
450        else
[1222]451           write(lunout,*)ij 
[1000]452        stop 'erreur dans Gather_Field'
453        endif
454       
455        if (MPI_Rank==rank) then
456          allocate(Buffer_Recv(ij*ll))
457
[985]458!CDIR NOVECTOR
[1000]459          do i=0,MPI_Size-1
460             
461            if (ij==ip1jmp1) then
462              Recv_count(i)=(jj_end_para(i)-jj_begin_para(i)+1)*ll*iip1
463            else if (ij==ip1jm) then
464              Recv_count(i)=(min(jj_end_para(i),jjm)-jj_begin_para(i)+1)*ll*iip1
465            else
466              stop 'erreur dans Gather_Field'
467            endif
468                   
469            if (i==0) then
470              displ(i)=0
471            else
472              displ(i)=displ(i-1)+Recv_count(i-1)
473            endif
474           
475          enddo
[630]476         
[1000]477        endif
[985]478 
479!$OMP CRITICAL (MPI)
[1000]480#ifdef CPP_MPI
481        call MPI_GATHERV(Buffer_send,(min(ij_end,ij)-ij_begin+1)*ll,MPI_REAL8,   &
482                          Buffer_Recv,Recv_count,displ,MPI_REAL8,rank,COMM_LMDZ,ierr)
483#endif
[985]484!$OMP END CRITICAL (MPI)
[630]485     
[1000]486        if (MPI_Rank==rank) then                 
[630]487     
[1000]488          if (ij==ip1jmp1) then
489            do i=0,MPI_Size-1
490              call Unpack_Data(Field((jj_begin_para(i)-1)*iip1+1,1),ij,ll,                 &
491                               jj_end_para(i)-jj_begin_para(i)+1,Buffer_Recv(displ(i)+1))
492            enddo
493          else if (ij==ip1jm) then
494            do i=0,MPI_Size-1
495               call Unpack_Data(Field((jj_begin_para(i)-1)*iip1+1,1),ij,ll,                       &
496                               min(jj_end_para(i),jjm)-jj_begin_para(i)+1,Buffer_Recv(displ(i)+1))
497            enddo
498          endif
499        endif
500      ENDIF ! using_mpi
[630]501     
502    end subroutine Gather_Field
[985]503
504
[630]505    subroutine AllGather_Field(Field,ij,ll)
506    implicit none
[792]507#include "dimensions.h"
508#include "paramet.h"   
[1000]509#ifdef CPP_MPI
[630]510    include 'mpif.h'
[1000]511#endif   
[630]512      INTEGER :: ij,ll
513      REAL, dimension(ij,ll) :: Field
514      INTEGER :: ierr
515     
[1000]516      IF (using_mpi) THEN
517        call Gather_Field(Field,ij,ll,0)
[985]518!$OMP CRITICAL (MPI)
[1000]519#ifdef CPP_MPI
[764]520      call MPI_BCAST(Field,ij*ll,MPI_REAL8,0,COMM_LMDZ,ierr)
[1000]521#endif
[985]522!$OMP END CRITICAL (MPI)
[1000]523      ENDIF
[630]524     
525    end subroutine AllGather_Field
526   
527   subroutine Broadcast_Field(Field,ij,ll,rank)
528    implicit none
[792]529#include "dimensions.h"
530#include "paramet.h"   
[1000]531#ifdef CPP_MPI
[630]532    include 'mpif.h'
[1000]533#endif   
[630]534      INTEGER :: ij,ll
535      REAL, dimension(ij,ll) :: Field
536      INTEGER :: rank
537      INTEGER :: ierr
538     
[1000]539      IF (using_mpi) THEN
540     
[985]541!$OMP CRITICAL (MPI)
[1000]542#ifdef CPP_MPI
[764]543      call MPI_BCAST(Field,ij*ll,MPI_REAL8,rank,COMM_LMDZ,ierr)
[1000]544#endif
[985]545!$OMP END CRITICAL (MPI)
[630]546     
[1000]547      ENDIF
[630]548    end subroutine Broadcast_Field
549       
550   
551    /* 
552  Subroutine verif_hallo(Field,ij,ll,up,down)
553    implicit none
[792]554#include "dimensions.h"
555#include "paramet.h"   
[630]556    include 'mpif.h'
557   
558      INTEGER :: ij,ll
559      REAL, dimension(ij,ll) :: Field
560      INTEGER :: up,down
561     
562      REAL,dimension(ij,ll): NewField
563     
564      NewField=0
565     
566      ijb=ij_begin
567      ije=ij_end
568      if (pole_nord)
569      NewField(ij_be       
570*/
571  end module parallel
Note: See TracBrowser for help on using the repository browser.