Changeset 1000
- Timestamp:
- Oct 6, 2008, 10:43:22 AM (16 years ago)
- Location:
- LMDZ4/trunk/libf/dyn3dpar
- Files:
-
- 11 added
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
LMDZ4/trunk/libf/dyn3dpar/calfis_p.F
r985 r1000 34 34 USE dimphy 35 35 USE mod_phys_lmdz_para, mpi_root_xx=>mpi_root 36 USE parallel, ONLY : omp_chunk 36 USE parallel, ONLY : omp_chunk, using_mpi 37 37 USE mod_interface_dyn_phys 38 38 USE Write_Field … … 107 107 #include "comgeom2.h" 108 108 #include "control.h" 109 #ifdef CPP_MPI 109 110 include 'mpif.h' 110 111 #endif 111 112 c Arguments : 112 113 c ----------- … … 212 213 REAL,SAVE,dimension(1:iim,1:llm):: du_send,du_recv,dv_send,dv_recv 213 214 INTEGER :: ierr 215 #ifdef CPP_MPI 214 216 INTEGER,dimension(MPI_STATUS_SIZE,4) :: Status 217 #else 218 INTEGER,dimension(1,4) :: Status 219 #endif 215 220 INTEGER, dimension(4) :: Req 216 221 REAL,ALLOCATABLE,SAVE:: zdufi2(:,:),zdvfi2(:,:) … … 792 797 call stop_timer(timer_physic) 793 798 c$OMP END MASTER 794 799 800 IF (using_mpi) THEN 801 795 802 if (MPI_rank>0) then 796 803 … … 803 810 804 811 c$OMP BARRIER 812 #ifdef CPP_MPI 805 813 c$OMP MASTER 806 814 !$OMP CRITICAL (MPI) … … 811 819 !$OMP END CRITICAL (MPI) 812 820 c$OMP END MASTER 821 #endif 813 822 c$OMP BARRIER 814 823 … … 817 826 if (MPI_rank<MPI_Size-1) then 818 827 c$OMP BARRIER 828 #ifdef CPP_MPI 819 829 c$OMP MASTER 820 830 !$OMP CRITICAL (MPI) … … 825 835 !$OMP END CRITICAL (MPI) 826 836 c$OMP END MASTER 827 c$OMP BARRIER 837 #endif 828 838 endif 829 839 830 840 c$OMP BARRIER 841 842 843 #ifdef CPP_MPI 831 844 c$OMP MASTER 832 845 !$OMP CRITICAL (MPI) … … 840 853 !$OMP END CRITICAL (MPI) 841 854 c$OMP END MASTER 855 #endif 856 842 857 c$OMP BARRIER 843 858 859 ENDIF ! using_mpi 860 861 844 862 c$OMP DO SCHEDULE(STATIC,OMP_CHUNK) 845 863 DO l=1,llm -
LMDZ4/trunk/libf/dyn3dpar/conf_gcm.F
r985 r1000 712 712 omp_chunk=1 713 713 CALL getin('omp_chunk',omp_chunk) 714 715 !Config key = ok_strato 716 !Config Desc = activation de la version strato 717 !Config Def = .FALSE. 718 !Config Help = active la version stratosphérique de LMDZ de F. Lott 719 720 ok_strato=.FALSE. 721 CALL getin('ok_strato',ok_strato) 714 722 715 723 write(lunout,*)' #########################################' … … 748 756 write(lunout,*)' use_mpi_alloc = ', use_mpi_alloc 749 757 write(lunout,*)' omp_chunk = ', omp_chunk 758 write(lunout,*)' ok_strato = ', ok_strato 750 759 c 751 760 RETURN -
LMDZ4/trunk/libf/dyn3dpar/covnat_p.F
r774 r1000 66 66 67 67 DO l = 1,klevel 68 DO ij = 1,ip1jm68 DO ij = ijb,ije 69 69 vnat( ij,l ) = vcov( ij,l ) / cv(ij) 70 70 ENDDO -
LMDZ4/trunk/libf/dyn3dpar/disvert.F
r774 r1000 11 11 #include "paramet.h" 12 12 #include "iniprint.h" 13 #include "logic.h" 13 14 c 14 15 c======================================================================= … … 99 100 DO l = 1, llm 100 101 x = 2.*asin(1.) * (FLOAT(l)-0.5) / float(llm+1) 101 dsig(l) = 1.0 + 7.0 * SIN(x)**2 102 103 IF (ok_strato) THEN 104 dsig(l) =(1.0 + 7.0 * SIN(x)**2) 105 & *(0.5*(1.-tanh(1.*(x-asin(1.))/asin(1.))))**2 106 ELSE 107 dsig(l) = 1.0 + 7.0 * SIN(x)**2 108 ENDIF 109 102 110 snorm = snorm + dsig(l) 103 111 ENDDO -
LMDZ4/trunk/libf/dyn3dpar/disvert0.F
r774 r1000 13 13 #include "paramet.h" 14 14 #include "iniprint.h" 15 #include "logic.h" 15 16 c 16 17 c======================================================================= … … 104 105 DO l = 1, llm 105 106 x = 2.*asin(1.) * (FLOAT(l)-0.5) / float(llm+1) 106 dsig(l) = 1.0 + 7.0 * SIN(x)**2 107 108 IF (ok_strato) THEN 109 dsig(l) =(1.0 + 7.0 * SIN(x)**2) 110 & *(0.5*(1.-tanh(1.*(x-asin(1.))/asin(1.))))**2 111 ELSE 112 dsig(l) = 1.0 + 7.0 * SIN(x)**2 113 ENDIF 114 107 115 snorm = snorm + dsig(l) 108 116 ENDDO -
LMDZ4/trunk/libf/dyn3dpar/fluxstokenc_p.F
r764 r1000 27 27 REAL phis(ip1jmp1) 28 28 29 REAL pbaruc(ip1jmp1,llm),pbarvc(ip1jm,llm)29 REAL,SAVE :: pbaruc(ip1jmp1,llm),pbarvc(ip1jm,llm) 30 30 REAL massem(ip1jmp1,llm),tetac(ip1jmp1,llm),phic(ip1jmp1,llm) 31 31 … … 55 55 cym wg(:,:) = 0. 56 56 57 c$OMP MASTER 58 57 59 if(first) then 58 60 … … 98 100 pbaruc(ijb:ije,1:llm)=0 99 101 100 if(pole_sud) ije=ij_end-iip1102 IF (pole_sud) ije=ij_end-iip1 101 103 pbarvc(ijb:ije,1:llm)=0 102 104 ENDIF … … 134 136 iadvtr = iadvtr+1 135 137 136 138 c$OMP END MASTER 139 c$OMP BARRIER 137 140 c Test pour savoir si on advecte a ce pas de temps 138 141 IF ( iadvtr.EQ.istdyn ) THEN 142 c$OMP MASTER 139 143 c normalisation 140 144 ijb=ij_begin … … 162 166 c 1. calcul de w 163 167 c 2. groupement des mailles pres du pole. 164 168 c$OMP END MASTER 169 c$OMP BARRIER 165 170 call Register_Hallo(pbaruc,ip1jmp1,llm,1,1,1,1,Req) 166 171 call Register_Hallo(pbarvc,ip1jm,llm,1,1,1,1,Req) 167 172 call SendRequest(Req) 173 c$OMP BARRIER 168 174 call WaitRequest(Req) 169 175 c$OMP BARRIER 176 c$OMP MASTER 170 177 CALL groupe_p( massem, pbaruc,pbarvc, pbarug,pbarvg,wg ) 171 178 … … 226 233 227 234 C 228 235 c$OMP END MASTER 229 236 ENDIF ! if iadvtr.EQ.istdyn 230 237 -
LMDZ4/trunk/libf/dyn3dpar/gcm.F
r985 r1000 49 49 c Declarations: 50 50 c ------------- 51 include 'mpif.h'52 51 #include "dimensions.h" 53 52 #include "paramet.h" … … 220 219 CALL set_bands 221 220 CALL Init_interface_dyn_phys 222 call MPI_BARRIER(COMM_LMDZ,ierr) 221 CALL barrier 222 223 223 if (mpi_rank==0) call WriteBands 224 224 call SetDistrib(jj_Nb_Caldyn) -
LMDZ4/trunk/libf/dyn3dpar/gr_u_scal_p.F
r985 r1000 50 50 ijb=ij_begin 51 51 ije=ij_end 52 if (pole_nord) ijb=ij_begin+iip153 52 54 53 DO l=1,nx 55 DO ij=ijb ,ije54 DO ij=ijb+1,ije 56 55 x_scal(ij,l)= 57 56 s (aireu(ij)*x_u(ij,l)+aireu(ij-1)*x_u(ij-1,l)) -
LMDZ4/trunk/libf/dyn3dpar/initdynav_p.F
r774 r1000 70 70 integer zan, dayref 71 71 integer :: jjb,jje,jjn 72 73 ! definition du domaine d'ecriture pour le rebuild 74 75 INTEGER,DIMENSION(2) :: ddid 76 INTEGER,DIMENSION(2) :: dsg 77 INTEGER,DIMENSION(2) :: dsl 78 INTEGER,DIMENSION(2) :: dpf 79 INTEGER,DIMENSION(2) :: dpl 80 INTEGER,DIMENSION(2) :: dhs 81 INTEGER,DIMENSION(2) :: dhe 82 83 INTEGER :: dynave_domain_id 84 72 85 73 86 if (adjust) return … … 95 108 jje=jj_end 96 109 jjn=jj_nb 110 111 ddid=(/ 1,2 /) 112 dsg=(/ iip1,jjp1 /) 113 dsl=(/ iip1,jjn /) 114 dpf=(/ 1,jjb /) 115 dpl=(/ iip1,jje /) 116 dhs=(/ 0,0 /) 117 dhe=(/ 0,0 /) 118 119 call flio_dom_set(mpi_size,mpi_rank,ddid,dsg,dsl,dpf,dpl,dhs,dhe, 120 . 'box',dynave_domain_id) 97 121 98 call histbeg(trim(infile)//'_'//trim(int2str(mpi_rank))//'.nc', 99 . iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 100 . 1, iip1, 1, jjn, 101 . tau0, zjulian, tstep, thoriid, fileid) 122 call histbeg(trim(infile),iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 123 . 1, iip1, 1, jjn,tau0, zjulian, tstep, thoriid, 124 . fileid,dynave_domain_id) 102 125 103 126 C -
LMDZ4/trunk/libf/dyn3dpar/initfluxsto_p.F
r774 r1000 74 74 logical ok_sync 75 75 integer :: jjb,jje,jjn 76 77 ! definition du domaine d'ecriture pour le rebuild 78 79 INTEGER,DIMENSION(2) :: ddid 80 INTEGER,DIMENSION(2) :: dsg 81 INTEGER,DIMENSION(2) :: dsl 82 INTEGER,DIMENSION(2) :: dpf 83 INTEGER,DIMENSION(2) :: dpl 84 INTEGER,DIMENSION(2) :: dhs 85 INTEGER,DIMENSION(2) :: dhe 86 87 INTEGER :: dynu_domain_id 88 INTEGER :: dynv_domain_id 89 90 76 91 C 77 92 C Initialisations … … 100 115 jje=jj_end 101 116 jjn=jj_nb 117 118 ddid=(/ 1,2 /) 119 dsg=(/ iip1,jjp1 /) 120 dsl=(/ iip1,jjn /) 121 dpf=(/ 1,jjb /) 122 dpl=(/ iip1,jje /) 123 dhs=(/ 0,0 /) 124 dhe=(/ 0,0 /) 125 126 call flio_dom_set(mpi_size,mpi_rank,ddid,dsg,dsl,dpf,dpl,dhs,dhe, 127 . 'box',dynu_domain_id) 102 128 103 call histbeg(trim(infile)//'_'//trim(int2str(mpi_rank))//'.nc', 104 . iip1, rlong(:,1), jjp1, rlat(1,jjb:jje), 105 . 1, iip1, 1, jjn, 106 . tau0, zjulian, tstep, uhoriid, fileid) 129 call histbeg(trim(infile),iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 130 . 1, iip1, 1, jjn, tau0, zjulian, tstep, uhoriid, 131 . fileid,dynu_domain_id) 107 132 C 108 133 C Creation du fichier histoire pour la grille en V (oblige pour l'instant, … … 124 149 if (pole_sud) jjn=jj_nb-1 125 150 126 call histbeg('fluxstokev_'//trim(int2str(mpi_rank))//'.nc', 127 . iip1, rlong(:,1), jjm, rlat(1,jjb:jje), 128 . 1, iip1, 1, jjn, 129 . tau0, zjulian, tstep, vhoriid, filevid) 151 ddid=(/ 1,2 /) 152 dsg=(/ iip1,jjm /) 153 dsl=(/ iip1,jjn /) 154 dpf=(/ 1,jjb /) 155 dpl=(/ iip1,jje /) 156 dhs=(/ 0,0 /) 157 dhe=(/ 0,0 /) 158 159 call flio_dom_set(mpi_size,mpi_rank,ddid,dsg,dsl,dpf,dpl,dhs,dhe, 160 . 'box',dynv_domain_id) 161 162 call histbeg('fluxstokev',iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 163 . 1, iip1, 1, jjn,tau0, zjulian, tstep, vhoriid, 164 . filevid,dynv_domain_id) 130 165 131 166 rl(1,1) = 1. -
LMDZ4/trunk/libf/dyn3dpar/inithist_p.F
r774 r1000 70 70 integer zan, dayref 71 71 integer :: jjb,jje,jjn 72 73 ! definition du domaine d'ecriture pour le rebuild 74 75 INTEGER,DIMENSION(2) :: ddid 76 INTEGER,DIMENSION(2) :: dsg 77 INTEGER,DIMENSION(2) :: dsl 78 INTEGER,DIMENSION(2) :: dpf 79 INTEGER,DIMENSION(2) :: dpl 80 INTEGER,DIMENSION(2) :: dhs 81 INTEGER,DIMENSION(2) :: dhe 82 83 INTEGER :: dynu_domain_id 84 INTEGER :: dynv_domain_id 72 85 C 73 86 C Initialisations … … 95 108 jje=jj_end 96 109 jjn=jj_nb 97 98 call histbeg(trim(infile)//'_'//trim(int2str(mpi_rank))//'.nc', 99 . iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 100 . 1, iip1, 1, jjn, 101 . tau0, zjulian, tstep, uhoriid, fileid) 110 111 112 ddid=(/ 1,2 /) 113 dsg=(/ iip1,jjp1 /) 114 dsl=(/ iip1,jjn /) 115 dpf=(/ 1,jjb /) 116 dpl=(/ iip1,jje /) 117 dhs=(/ 0,0 /) 118 dhe=(/ 0,0 /) 119 120 call flio_dom_set(mpi_size,mpi_rank,ddid,dsg,dsl,dpf,dpl,dhs,dhe, 121 . 'box',dynu_domain_id) 122 123 call histbeg(trim(infile),iip1, rlong(:,1), jjn, 124 . rlat(1,jjb:jje), 1, iip1, 1, jjn, tau0, 125 . zjulian, tstep, uhoriid, fileid,dynu_domain_id) 102 126 C 103 127 C Creation du fichier histoire pour la grille en V (oblige pour l'instant, … … 117 141 if (pole_sud) jje=jj_end-1 118 142 if (pole_sud) jjn=jj_nb-1 119 120 call histbeg('dyn_histv_'//trim(int2str(mpi_rank))//'.nc', 121 . iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 122 . 1, iip1, 1, jjn, 123 . tau0, zjulian, tstep, vhoriid, filevid) 143 144 ddid=(/ 1,2 /) 145 dsg=(/ iip1,jjm /) 146 dsl=(/ iip1,jjn /) 147 dpf=(/ 1,jjb /) 148 dpl=(/ iip1,jje /) 149 dhs=(/ 0,0 /) 150 dhe=(/ 0,0 /) 151 152 call flio_dom_set(mpi_size,mpi_rank,ddid,dsg,dsl,dpf,dpl,dhs,dhe, 153 . 'box',dynv_domain_id) 154 155 call histbeg('dyn_histv', iip1, rlong(:,1), jjn, rlat(1,jjb:jje), 156 . 1, iip1, 1, jjn, tau0, zjulian, tstep, vhoriid, 157 . filevid,dynv_domain_id) 124 158 C 125 159 C Appel a histhori pour rajouter les autres grilles horizontales -
LMDZ4/trunk/libf/dyn3dpar/leapfrog_p.F
r995 r1000 76 76 #include "advtrac.h" 77 77 78 include 'mpif.h'79 78 integer nq 80 79 … … 237 236 c$OMP MASTER 238 237 239 !$OMP CRITICAL (MPI) 240 call MPI_BARRIER(COMM_LMDZ,ierr) 241 !$OMP END CRITICAL (MPI) 242 238 CALL barrier 239 243 240 c$OMP END MASTER 244 241 c$OMP BARRIER … … 736 733 call Register_SwapFieldHallo(flxw,flxw,ip1jmp1,llm, 737 734 * jj_Nb_physic,2,2,Request_physic) 738 call SetDistrib(jj_nb_Physic)739 735 740 736 call SendRequest(Request_Physic) … … 853 849 c ajout des tendances physiques: 854 850 c ------------------------------ 851 IF (ok_strato) THEN 852 CALL top_bound_p( vcov,ucov,teta, dufi,dvfi,dtetafi) 853 ENDIF 854 855 855 CALL addfi_p( nqmx, dtphys, leapf, forward , 856 856 $ ucov, vcov, teta , q ,ps , … … 1289 1289 c$OMP BARRIER 1290 1290 call WaitRequest(TestRequest) 1291 c$OMP BARRIER 1291 1292 c$OMP MASTER 1292 1293 CALL writedynav_p(histaveid, nqmx, itau,vcov , … … 1340 1341 CALL writehist_p(histid,histvid, nqmx,itau,vcov, 1341 1342 s ucov,teta,phi,q,masse,ps,phis) 1342 c#else 1343 c call Gather_Field(unat,ip1jmp1,llm,0) 1344 c call Gather_Field(vnat,ip1jm,llm,0) 1345 c call Gather_Field(teta,ip1jmp1,llm,0) 1346 c call Gather_Field(ps,ip1jmp1,1,0) 1347 c do iq=1,nqmx 1348 c call Gather_Field(q(1,1,iq),ip1jmp1,llm,0) 1349 c enddo 1350 c 1351 c if (mpi_rank==0) then 1352 c#include "write_grads_dyn.h" 1353 c endif 1343 1354 1344 #endif 1355 1345 c$OMP END MASTER … … 1445 1435 call WaitRequest(TestRequest) 1446 1436 1437 c$OMP BARRIER 1447 1438 c$OMP MASTER 1448 1439 CALL writedynav_p(histaveid, nqmx, itau,vcov , -
LMDZ4/trunk/libf/dyn3dpar/logic.h
r774 r1000 2 2 ! $Header$ 3 3 ! 4 c 5 c 6 c-----------------------------------------------------------------------7 cINCLUDE 'logic.h'4 ! 5 ! 6 !----------------------------------------------------------------------- 7 ! INCLUDE 'logic.h' 8 8 9 COMMON/logic/ purmats,iflag_phys,forward,leapf,apphys, 10 . statcl,conser,apdiss,apdelq,saison,ecripar,fxyhypb,ysinus11 . ,read_start,ok_guide9 COMMON/logic/ purmats,iflag_phys,forward,leapf,apphys, & 10 & statcl,conser,apdiss,apdelq,saison,ecripar,fxyhypb,ysinus & 11 & ,read_start,ok_guide,ok_strato 12 12 13 LOGICAL purmats,forward,leapf,apphys,statcl,conser, 14 . apdiss,apdelq,saison,ecripar,fxyhypb,ysinus15 . ,read_start,ok_guide13 LOGICAL purmats,forward,leapf,apphys,statcl,conser, & 14 & apdiss,apdelq,saison,ecripar,fxyhypb,ysinus & 15 & ,read_start,ok_guide,ok_strato 16 16 17 17 INTEGER iflag_phys 18 c$OMP THREADPRIVATE(/logic/)19 c-----------------------------------------------------------------------18 !$OMP THREADPRIVATE(/logic/) 19 !----------------------------------------------------------------------- -
LMDZ4/trunk/libf/dyn3dpar/mod_const_para.F90
r985 r1000 11 11 12 12 IMPLICIT NONE 13 #ifdef CPP_MPI 13 14 INCLUDE 'mpif.h' 15 #endif 14 16 INTEGER :: ierr 15 17 INTEGER :: comp_id … … 30 32 CALL prism_get_localcomm_proto(COMM_LMDZ,ierr) 31 33 !$OMP END MASTER 34 #ifdef CPP_MPI 35 COMM_LMDZ=MPI_COMM_WORLD 36 MPI_REAL_LMDZ=MPI_REAL8 37 #endif 32 38 #endif 33 39 ELSE 40 CALL init_mpi 41 ENDIF 42 43 END SUBROUTINE Init_const_mpi 44 45 SUBROUTINE Init_mpi 46 IMPLICIT NONE 47 #ifdef CPP_MPI 48 INCLUDE 'mpif.h' 49 #endif 50 INTEGER :: ierr 51 INTEGER :: thread_required 52 INTEGER :: thread_provided 53 54 #ifdef CPP_MPI 34 55 !$OMP MASTER 35 thread_required=MPI_THREAD_SERIALIZED 36 CALL MPI_INIT_THREAD(thread_required,thread_provided,ierr) 37 IF (thread_provided < thread_required) THEN 38 CALL abort_gcm('The multithreaded level of MPI librairy do not provide the requiered level', & 39 'mod_const_mpi::Init_const_mpi',1) 40 ENDIF 41 COMM_LMDZ=MPI_COMM_WORLD 56 thread_required=MPI_THREAD_SERIALIZED 57 58 CALL MPI_INIT_THREAD(thread_required,thread_provided,ierr) 59 IF (thread_provided < thread_required) THEN 60 PRINT *,'Warning : The multithreaded level of MPI librairy do not provide the requiered level', & 61 ' in mod_const_mpi::Init_const_mpi' 62 ENDIF 63 COMM_LMDZ=MPI_COMM_WORLD 64 MPI_REAL_LMDZ=MPI_REAL8 42 65 !$OMP END MASTER 43 END IF 66 #endif 44 67 45 MPI_REAL_LMDZ=MPI_REAL8 46 END SUBROUTINE Init_const_mpi 47 68 END SUBROUTINE Init_mpi 69 48 70 END MODULE mod_const_mpi -
LMDZ4/trunk/libf/dyn3dpar/mod_hallo.F90
r985 r1000 50 50 MaxBufferSize_Used=0 51 51 52 IF (use_mpi_alloc ) THEN52 IF (use_mpi_alloc .AND. using_mpi) THEN 53 53 CALL create_global_mpi_buffer 54 54 ELSE … … 67 67 SUBROUTINE create_global_mpi_buffer 68 68 IMPLICIT NONE 69 INCLUDE 'mpif.h' 69 #ifdef CPP_MPI 70 INCLUDE 'mpif.h' 71 #endif 70 72 POINTER (Pbuffer,MPI_Buffer(MaxBufferSize)) 71 73 REAL :: MPI_Buffer 74 #ifdef CPP_MPI 72 75 INTEGER(KIND=MPI_ADDRESS_KIND) :: BS 76 #else 77 INTEGER(KIND=8) :: BS 78 #endif 73 79 INTEGER :: i,ierr 74 80 … … 76 82 Bs=8*MaxBufferSize 77 83 !$OMP CRITICAL (MPI) 84 #ifdef CPP_MPI 78 85 CALL MPI_ALLOC_MEM(BS,MPI_INFO_NULL,Pbuffer,ierr) 86 #endif 79 87 !$OMP END CRITICAL (MPI) 80 88 DO i=1,MaxBufferSize … … 164 172 #include "dimensions.h" 165 173 #include "paramet.h" 166 include 'mpif.h'167 174 168 175 INTEGER :: ij,ll,offset,size,target … … 186 193 #include "dimensions.h" 187 194 #include "paramet.h" 188 include 'mpif.h'189 195 190 196 INTEGER :: ij,ll,offset,size,target … … 211 217 #include "dimensions.h" 212 218 #include "paramet.h" 213 include 'mpif.h'214 219 215 220 INTEGER :: ij,ll … … 260 265 #include "dimensions.h" 261 266 #include "paramet.h" 262 include 'mpif.h'263 267 264 268 INTEGER :: ij,ll,Up,Down … … 313 317 #include "dimensions.h" 314 318 #include "paramet.h" 319 #ifdef CPP_MPI 315 320 include 'mpif.h' 316 321 #endif 317 322 INTEGER :: ij,ll 318 323 REAL, dimension(ij,ll) :: Field … … 379 384 #include "dimensions.h" 380 385 #include "paramet.h" 386 #ifdef CPP_MPI 381 387 include 'mpif.h' 388 #endif 382 389 383 390 type(request),target :: a_request … … 428 435 429 436 !$OMP CRITICAL (MPI) 430 call MPI_ISSEND(Buffer(req%Pos),SizeBuffer,MPI_REAL8,rank,a_request%tag+1000*omp_rank, & 437 438 #ifdef CPP_MPI 439 call MPI_ISSEND(Buffer(req%Pos),SizeBuffer,MPI_REAL_LMDZ,rank,a_request%tag+1000*omp_rank, & 431 440 COMM_LMDZ,Req%MSG_Request,ierr) 441 #endif 442 IF (.NOT.using_mpi) THEN 443 PRINT *,'Erreur, echange MPI en mode sequentiel !!!' 444 STOP 445 ENDIF 432 446 ! PRINT *,"-------------------------------------------------------------------" 433 447 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->" … … 460 474 call allocate_buffer(SizeBuffer,Req%Index,Req%Pos) 461 475 !$OMP CRITICAL (MPI) 462 call MPI_IRECV(Buffer(Req%Pos),SizeBuffer,MPI_REAL8,rank,a_request%tag+1000*omp_rank, & 476 477 #ifdef CPP_MPI 478 call MPI_IRECV(Buffer(Req%Pos),SizeBuffer,MPI_REAL_LMDZ,rank,a_request%tag+1000*omp_rank, & 463 479 COMM_LMDZ,Req%MSG_Request,ierr) 480 #endif 481 IF (.NOT.using_mpi) THEN 482 PRINT *,'Erreur, echange MPI en mode sequentiel !!!' 483 STOP 484 ENDIF 485 464 486 ! PRINT *,"-------------------------------------------------------------------" 465 487 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->" … … 480 502 #include "dimensions.h" 481 503 #include "paramet.h" 504 #ifdef CPP_MPI 482 505 include 'mpif.h' 506 #endif 483 507 484 508 type(request),target :: a_request … … 486 510 type(Hallo),pointer :: PtrHallo 487 511 integer, dimension(2*mpi_size) :: TabRequest 512 #ifdef CPP_MPI 488 513 integer, dimension(MPI_STATUS_SIZE,2*mpi_size) :: TabStatus 514 #else 515 integer, dimension(1,2*mpi_size) :: TabStatus 516 #endif 489 517 integer :: NbRequest 490 518 integer :: i,rank,pos,ij,l,ierr … … 515 543 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->",NbRequest,"en attente" 516 544 ! PRINT *,"No des requetes :",TabRequest(1:NbRequest) 545 #ifdef CPP_MPI 517 546 call MPI_WAITALL(NbRequest,TabRequest,TabStatus,ierr) 547 #endif 518 548 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->",NbRequest,"complete" 519 549 ! PRINT *,"-------------------------------------------------------------------" … … 567 597 #include "dimensions.h" 568 598 #include "paramet.h" 599 #ifdef CPP_MPI 569 600 include 'mpif.h' 570 601 #endif 571 602 type(request),target :: a_request 572 603 type(request_SR),pointer :: Req 573 604 type(Hallo),pointer :: PtrHallo 574 605 integer, dimension(mpi_size) :: TabRequest 606 #ifdef CPP_MPI 575 607 integer, dimension(MPI_STATUS_SIZE,mpi_size) :: TabStatus 608 #else 609 integer, dimension(1,mpi_size) :: TabStatus 610 #endif 576 611 integer :: NbRequest 577 612 integer :: i,rank,pos,ij,l,ierr … … 594 629 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->",NbRequest,"en attente" 595 630 ! PRINT *,"No des requetes :",TabRequest(1:NbRequest) 631 #ifdef CPP_MPI 596 632 call MPI_WAITALL(NbRequest,TabRequest,TabStatus,ierr) 633 #endif 597 634 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->",NbRequest,"complete" 598 635 ! PRINT *,"-------------------------------------------------------------------" … … 617 654 #include "dimensions.h" 618 655 #include "paramet.h" 656 #ifdef CPP_MPI 619 657 include 'mpif.h' 658 #endif 620 659 621 660 type(request),target :: a_request … … 623 662 type(Hallo),pointer :: PtrHallo 624 663 integer, dimension(mpi_size) :: TabRequest 664 #ifdef CPP_MPI 625 665 integer, dimension(MPI_STATUS_SIZE,mpi_size) :: TabStatus 666 #else 667 integer, dimension(1,mpi_size) :: TabStatus 668 #endif 626 669 integer :: NbRequest 627 670 integer :: i,rank,pos,ij,l,ierr … … 645 688 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->",NbRequest,"en attente" 646 689 ! PRINT *,"No des requetes :",TabRequest(1:NbRequest) 690 #ifdef CPP_MPI 647 691 call MPI_WAITALL(NbRequest,TabRequest,TabStatus,ierr) 692 #endif 648 693 ! PRINT *,"Process de rang",mpi_rank,"Task : ",omp_rank,"--->",NbRequest,"complete" 649 694 ! PRINT *,"-------------------------------------------------------------------" … … 691 736 #include "dimensions.h" 692 737 #include "paramet.h" 693 include 'mpif.h'694 738 695 739 INTEGER :: ij,ll,l … … 731 775 #include "dimensions.h" 732 776 #include "paramet.h" 733 include 'mpif.h'734 777 735 778 INTEGER :: ij,ll,Up,Down -
LMDZ4/trunk/libf/dyn3dpar/parallel.F90
r995 r1000 1 1 module parallel 2 2 USE mod_const_mpi 3 3 4 LOGICAL,SAVE :: using_mpi 5 LOGICAL,SAVE :: using_omp 6 4 7 integer, save :: mpi_size 5 8 integer, save :: mpi_rank … … 25 28 USE vampir 26 29 implicit none 30 #ifdef CPP_MPI 31 include 'mpif.h' 32 #endif 33 #include "dimensions.h" 34 #include "paramet.h" 27 35 28 36 integer :: ierr … … 31 39 integer, dimension(3) :: blocklen,type 32 40 integer :: comp_id 33 #ifdef _OPENMP 41 42 #ifdef CPP_OMP 34 43 INTEGER :: OMP_GET_NUM_THREADS 35 44 EXTERNAL OMP_GET_NUM_THREADS … … 37 46 EXTERNAL OMP_GET_THREAD_NUM 38 47 #endif 39 include 'mpif.h' 40 #include "dimensions.h" 41 #include "paramet.h" 42 48 49 #ifdef CPP_MPI 50 using_mpi=.TRUE. 51 #else 52 using_mpi=.FALSE. 53 #endif 54 43 55 call InitVampir 44 call MPI_COMM_SIZE(COMM_LMDZ,mpi_size,ierr) 45 call MPI_COMM_RANK(COMM_LMDZ,mpi_rank,ierr) 56 57 IF (using_mpi) THEN 58 #ifdef CPP_MPI 59 call MPI_COMM_SIZE(COMM_LMDZ,mpi_size,ierr) 60 call MPI_COMM_RANK(COMM_LMDZ,mpi_rank,ierr) 61 #endif 62 ELSE 63 mpi_size=1 64 mpi_rank=0 65 ENDIF 46 66 47 67 … … 57 77 58 78 print *,"Arret : le nombre de bande de lattitude par process est trop faible (<2)." 59 79 print *," ---> diminuez le nombre de CPU ou augmentez la taille en lattitude" 60 80 61 call MPI_ABORT(COMM_LMDZ,-1, ierr) 62 81 #ifdef CPP_MPI 82 IF (using_mpi) call MPI_ABORT(COMM_LMDZ,-1, ierr) 83 #endif 63 84 endif 64 85 … … 106 127 !$OMP PARALLEL 107 128 108 #ifdef _OPENMP129 #ifdef CPP_OMP 109 130 !$OMP MASTER 110 131 omp_size=OMP_GET_NUM_THREADS() … … 162 183 include "dimensions.h" 163 184 include "paramet.h" 185 #ifdef CPP_MPI 186 include 'mpif.h' 187 #endif 188 164 189 integer :: ierr 165 190 integer :: i 166 include 'mpif.h'167 168 191 deallocate(jj_begin_para) 169 192 deallocate(jj_end_para) … … 178 201 #endif 179 202 else 180 call MPI_FINALIZE(ierr) 203 #ifdef CPP_MPI 204 IF (using_mpi) call MPI_FINALIZE(ierr) 205 #endif 181 206 end if 182 207 … … 229 254 230 255 end subroutine UnPack_data 256 257 258 SUBROUTINE barrier 259 IMPLICIT NONE 260 #ifdef CPP_MPI 261 INCLUDE 'mpif.h' 262 #endif 263 INTEGER :: ierr 264 265 !$OMP CRITICAL (MPI) 266 #ifdef CPP_MPI 267 IF (using_mpi) CALL MPI_Barrier(COMM_LMDZ,ierr) 268 #endif 269 !$OMP END CRITICAL (MPI) 270 271 END SUBROUTINE barrier 272 231 273 232 274 subroutine exchange_hallo(Field,ij,ll,up,down) … … 235 277 #include "dimensions.h" 236 278 #include "paramet.h" 279 #ifdef CPP_MPI 237 280 include 'mpif.h' 238 281 #endif 239 282 INTEGER :: ij,ll 240 283 REAL, dimension(ij,ll) :: Field … … 245 288 LOGICAL :: RecvUp,RecvDown 246 289 INTEGER, DIMENSION(4) :: Request 290 #ifdef CPP_MPI 247 291 INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: Status 292 #else 293 INTEGER, DIMENSION(1,4) :: Status 294 #endif 248 295 INTEGER :: NbRequest 249 296 REAL, dimension(:),allocatable :: Buffer_Send_up,Buffer_Send_down … … 251 298 INTEGER :: Buffer_size 252 299 253 !$OMP CRITICAL (MPI) 254 call MPI_Barrier(COMM_LMDZ,ierr) 255 !$OMP END CRITICAL (MPI) 256 call VTb(VThallo) 257 258 SendUp=.TRUE. 259 SendDown=.TRUE. 260 RecvUp=.TRUE. 261 RecvDown=.TRUE. 262 263 IF (pole_nord) THEN 264 SendUp=.FALSE. 265 RecvUp=.FALSE. 266 ENDIF 267 268 IF (pole_sud) THEN 269 SendDown=.FALSE. 270 RecvDown=.FALSE. 271 ENDIF 272 273 if (up.eq.0) then 274 SendDown=.FALSE. 275 RecvUp=.FALSE. 276 endif 277 278 if (down.eq.0) then 279 SendUp=.FALSE. 280 RecvDown=.FALSE. 281 endif 282 283 NbRequest=0 284 285 IF (SendUp) THEN 286 NbRequest=NbRequest+1 287 buffer_size=down*iip1*ll 288 allocate(Buffer_Send_up(Buffer_size)) 289 call PACK_Data(Field(ij_begin,1),ij,ll,down,Buffer_Send_up) 290 !$OMP CRITICAL (MPI) 291 call MPI_ISSEND(Buffer_send_up,Buffer_Size,MPI_REAL8,MPI_Rank-1,1, & 292 COMM_LMDZ,Request(NbRequest),ierr) 293 !$OMP END CRITICAL (MPI) 294 ENDIF 295 296 IF (SendDown) THEN 297 NbRequest=NbRequest+1 298 299 buffer_size=up*iip1*ll 300 allocate(Buffer_Send_down(Buffer_size)) 301 call PACK_Data(Field(ij_end+1-up*iip1,1),ij,ll,up,Buffer_send_down) 302 303 !$OMP CRITICAL (MPI) 304 call MPI_ISSEND(Buffer_send_down,Buffer_Size,MPI_REAL8,MPI_Rank+1,1, & 305 COMM_LMDZ,Request(NbRequest),ierr) 306 !$OMP END CRITICAL (MPI) 307 ENDIF 308 309 310 IF (RecvUp) THEN 311 NbRequest=NbRequest+1 312 buffer_size=up*iip1*ll 313 allocate(Buffer_recv_up(Buffer_size)) 300 IF (using_mpi) THEN 301 302 CALL barrier 303 304 call VTb(VThallo) 305 306 SendUp=.TRUE. 307 SendDown=.TRUE. 308 RecvUp=.TRUE. 309 RecvDown=.TRUE. 310 311 IF (pole_nord) THEN 312 SendUp=.FALSE. 313 RecvUp=.FALSE. 314 ENDIF 315 316 IF (pole_sud) THEN 317 SendDown=.FALSE. 318 RecvDown=.FALSE. 319 ENDIF 320 321 if (up.eq.0) then 322 SendDown=.FALSE. 323 RecvUp=.FALSE. 324 endif 325 326 if (down.eq.0) then 327 SendUp=.FALSE. 328 RecvDown=.FALSE. 329 endif 330 331 NbRequest=0 332 333 IF (SendUp) THEN 334 NbRequest=NbRequest+1 335 buffer_size=down*iip1*ll 336 allocate(Buffer_Send_up(Buffer_size)) 337 call PACK_Data(Field(ij_begin,1),ij,ll,down,Buffer_Send_up) 338 !$OMP CRITICAL (MPI) 339 #ifdef CPP_MPI 340 call MPI_ISSEND(Buffer_send_up,Buffer_Size,MPI_REAL8,MPI_Rank-1,1, & 341 COMM_LMDZ,Request(NbRequest),ierr) 342 #endif 343 !$OMP END CRITICAL (MPI) 344 ENDIF 345 346 IF (SendDown) THEN 347 NbRequest=NbRequest+1 348 349 buffer_size=up*iip1*ll 350 allocate(Buffer_Send_down(Buffer_size)) 351 call PACK_Data(Field(ij_end+1-up*iip1,1),ij,ll,up,Buffer_send_down) 352 353 !$OMP CRITICAL (MPI) 354 #ifdef CPP_MPI 355 call MPI_ISSEND(Buffer_send_down,Buffer_Size,MPI_REAL8,MPI_Rank+1,1, & 356 COMM_LMDZ,Request(NbRequest),ierr) 357 #endif 358 !$OMP END CRITICAL (MPI) 359 ENDIF 360 361 362 IF (RecvUp) THEN 363 NbRequest=NbRequest+1 364 buffer_size=up*iip1*ll 365 allocate(Buffer_recv_up(Buffer_size)) 314 366 315 367 !$OMP CRITICAL (MPI) 316 call MPI_IRECV(Buffer_recv_up,Buffer_size,MPI_REAL8,MPI_Rank-1,1, & 317 COMM_LMDZ,Request(NbRequest),ierr) 368 #ifdef CPP_MPI 369 call MPI_IRECV(Buffer_recv_up,Buffer_size,MPI_REAL8,MPI_Rank-1,1, & 370 COMM_LMDZ,Request(NbRequest),ierr) 371 #endif 318 372 !$OMP END CRITICAL (MPI) 319 373 320 374 321 ENDIF 322 323 IF (RecvDown) THEN 324 NbRequest=NbRequest+1 325 buffer_size=down*iip1*ll 326 allocate(Buffer_recv_down(Buffer_size)) 327 328 !$OMP CRITICAL (MPI) 329 call MPI_IRECV(Buffer_recv_down,Buffer_size,MPI_REAL8,MPI_Rank+1,1, & 330 COMM_LMDZ,Request(NbRequest),ierr) 331 !$OMP END CRITICAL (MPI) 332 333 334 ENDIF 335 336 if (NbRequest > 0) call MPI_WAITALL(NbRequest,Request,Status,ierr) 337 IF (RecvUp) call Unpack_Data(Field(ij_begin-up*iip1,1),ij,ll,up,Buffer_Recv_up) 338 IF (RecvDown) call Unpack_Data(Field(ij_end+1,1),ij,ll,down,Buffer_Recv_down) 339 340 call VTe(VThallo) 341 !$OMP CRITICAL (MPI) 342 call MPI_Barrier(COMM_LMDZ,ierr) 343 !$OMP END CRITICAL (MPI) 344 375 ENDIF 376 377 IF (RecvDown) THEN 378 NbRequest=NbRequest+1 379 buffer_size=down*iip1*ll 380 allocate(Buffer_recv_down(Buffer_size)) 381 382 !$OMP CRITICAL (MPI) 383 #ifdef CPP_MPI 384 call MPI_IRECV(Buffer_recv_down,Buffer_size,MPI_REAL8,MPI_Rank+1,1, & 385 COMM_LMDZ,Request(NbRequest),ierr) 386 #endif 387 !$OMP END CRITICAL (MPI) 388 389 ENDIF 390 391 #ifdef CPP_MPI 392 if (NbRequest > 0) call MPI_WAITALL(NbRequest,Request,Status,ierr) 393 #endif 394 IF (RecvUp) call Unpack_Data(Field(ij_begin-up*iip1,1),ij,ll,up,Buffer_Recv_up) 395 IF (RecvDown) call Unpack_Data(Field(ij_end+1,1),ij,ll,down,Buffer_Recv_down) 396 397 call VTe(VThallo) 398 call barrier 399 400 ENDIF ! using_mpi 401 345 402 RETURN 346 403 … … 352 409 #include "dimensions.h" 353 410 #include "paramet.h" 411 #ifdef CPP_MPI 354 412 include 'mpif.h' 355 413 #endif 356 414 INTEGER :: ij,ll,rank 357 415 REAL, dimension(ij,ll) :: Field … … 362 420 INTEGER ::i 363 421 364 if (ij==ip1jmp1) then 365 allocate(Buffer_send(iip1*ll*(jj_end-jj_begin+1))) 366 call Pack_Data(Field(ij_begin,1),ij,ll,jj_end-jj_begin+1,Buffer_send) 367 else if (ij==ip1jm) then 368 allocate(Buffer_send(iip1*ll*(min(jj_end,jjm)-jj_begin+1))) 369 call Pack_Data(Field(ij_begin,1),ij,ll,min(jj_end,jjm)-jj_begin+1,Buffer_send) 370 else 371 print *,ij 372 stop 'erreur dans Gather_Field' 373 endif 374 375 if (MPI_Rank==rank) then 376 allocate(Buffer_Recv(ij*ll)) 422 IF (using_mpi) THEN 423 424 if (ij==ip1jmp1) then 425 allocate(Buffer_send(iip1*ll*(jj_end-jj_begin+1))) 426 call Pack_Data(Field(ij_begin,1),ij,ll,jj_end-jj_begin+1,Buffer_send) 427 else if (ij==ip1jm) then 428 allocate(Buffer_send(iip1*ll*(min(jj_end,jjm)-jj_begin+1))) 429 call Pack_Data(Field(ij_begin,1),ij,ll,min(jj_end,jjm)-jj_begin+1,Buffer_send) 430 else 431 print *,ij 432 stop 'erreur dans Gather_Field' 433 endif 434 435 if (MPI_Rank==rank) then 436 allocate(Buffer_Recv(ij*ll)) 377 437 378 438 !CDIR NOVECTOR 379 do i=0,MPI_Size-1 380 439 do i=0,MPI_Size-1 440 441 if (ij==ip1jmp1) then 442 Recv_count(i)=(jj_end_para(i)-jj_begin_para(i)+1)*ll*iip1 443 else if (ij==ip1jm) then 444 Recv_count(i)=(min(jj_end_para(i),jjm)-jj_begin_para(i)+1)*ll*iip1 445 else 446 stop 'erreur dans Gather_Field' 447 endif 448 449 if (i==0) then 450 displ(i)=0 451 else 452 displ(i)=displ(i-1)+Recv_count(i-1) 453 endif 454 455 enddo 456 457 endif 458 459 !$OMP CRITICAL (MPI) 460 #ifdef CPP_MPI 461 call MPI_GATHERV(Buffer_send,(min(ij_end,ij)-ij_begin+1)*ll,MPI_REAL8, & 462 Buffer_Recv,Recv_count,displ,MPI_REAL8,rank,COMM_LMDZ,ierr) 463 #endif 464 !$OMP END CRITICAL (MPI) 465 466 if (MPI_Rank==rank) then 467 381 468 if (ij==ip1jmp1) then 382 Recv_count(i)=(jj_end_para(i)-jj_begin_para(i)+1)*ll*iip1 469 do i=0,MPI_Size-1 470 call Unpack_Data(Field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, & 471 jj_end_para(i)-jj_begin_para(i)+1,Buffer_Recv(displ(i)+1)) 472 enddo 383 473 else if (ij==ip1jm) then 384 Recv_count(i)=(min(jj_end_para(i),jjm)-jj_begin_para(i)+1)*ll*iip1 385 else 386 stop 'erreur dans Gather_Field' 474 do i=0,MPI_Size-1 475 call Unpack_Data(Field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, & 476 min(jj_end_para(i),jjm)-jj_begin_para(i)+1,Buffer_Recv(displ(i)+1)) 477 enddo 387 478 endif 388 389 if (i==0) then 390 displ(i)=0 391 else 392 displ(i)=displ(i-1)+Recv_count(i-1) 393 endif 394 395 enddo 396 397 endif 398 399 !$OMP CRITICAL (MPI) 400 call MPI_GATHERV(Buffer_send,(min(ij_end,ij)-ij_begin+1)*ll,MPI_REAL8, & 401 Buffer_Recv,Recv_count,displ,MPI_REAL8,rank,COMM_LMDZ,ierr) 402 !$OMP END CRITICAL (MPI) 403 404 if (MPI_Rank==rank) then 405 406 if (ij==ip1jmp1) then 407 do i=0,MPI_Size-1 408 call Unpack_Data(Field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, & 409 jj_end_para(i)-jj_begin_para(i)+1,Buffer_Recv(displ(i)+1)) 410 enddo 411 else if (ij==ip1jm) then 412 do i=0,MPI_Size-1 413 call Unpack_Data(Field((jj_begin_para(i)-1)*iip1+1,1),ij,ll, & 414 min(jj_end_para(i),jjm)-jj_begin_para(i)+1,Buffer_Recv(displ(i)+1)) 415 enddo 416 endif 417 418 endif 419 479 endif 480 ENDIF ! using_mpi 481 420 482 end subroutine Gather_Field 421 483 … … 425 487 #include "dimensions.h" 426 488 #include "paramet.h" 489 #ifdef CPP_MPI 427 490 include 'mpif.h' 428 491 #endif 429 492 INTEGER :: ij,ll 430 493 REAL, dimension(ij,ll) :: Field 431 494 INTEGER :: ierr 432 495 433 call Gather_Field(Field,ij,ll,0) 434 !$OMP CRITICAL (MPI) 496 IF (using_mpi) THEN 497 call Gather_Field(Field,ij,ll,0) 498 !$OMP CRITICAL (MPI) 499 #ifdef CPP_MPI 435 500 call MPI_BCAST(Field,ij*ll,MPI_REAL8,0,COMM_LMDZ,ierr) 436 !$OMP END CRITICAL (MPI) 501 #endif 502 !$OMP END CRITICAL (MPI) 503 ENDIF 437 504 438 505 end subroutine AllGather_Field … … 442 509 #include "dimensions.h" 443 510 #include "paramet.h" 511 #ifdef CPP_MPI 444 512 include 'mpif.h' 445 513 #endif 446 514 INTEGER :: ij,ll 447 515 REAL, dimension(ij,ll) :: Field … … 449 517 INTEGER :: ierr 450 518 451 !$OMP CRITICAL (MPI) 519 IF (using_mpi) THEN 520 521 !$OMP CRITICAL (MPI) 522 #ifdef CPP_MPI 452 523 call MPI_BCAST(Field,ij*ll,MPI_REAL8,rank,COMM_LMDZ,ierr) 453 !$OMP END CRITICAL (MPI) 454 524 #endif 525 !$OMP END CRITICAL (MPI) 526 527 ENDIF 455 528 end subroutine Broadcast_Field 456 529 -
LMDZ4/trunk/libf/dyn3dpar/startvar.F
r774 r1000 521 521 CASE ('snow') 522 522 champ(:) = 0.0 523 CASE ('deltat') 523 cIM "slab" ocean 524 CASE ('tslab') 525 champ(:) = 0.0 526 CASE ('seaice') 524 527 champ(:) = 0.0 525 528 CASE ('rugmer') … … 1051 1054 REAL, ALLOCATABLE :: var_tmp2d(:,:), var_tmp3d(:,:,:) 1052 1055 REAL, ALLOCATABLE :: ax(:), ay(:), yder(:) 1053 REAL, ALLOCATABLE :: varrr(:,:,:)1056 ! REAL, ALLOCATABLE :: varrr(:,:,:) 1054 1057 INTEGER, ALLOCATABLE :: lind(:) 1055 1058 ! … … 1059 1062 ALLOCATE(var_ana3d(iml_dyn, jml_dyn, llm_dyn)) 1060 1063 ENDIF 1061 ALLOCATE(varrr(iml_dyn, jml_dyn, llm_dyn))1064 ! ALLOCATE(varrr(iml_dyn, jml_dyn, llm_dyn)) 1062 1065 ! 1063 1066 ! … … 1170 1173 1171 1174 DEALLOCATE(lon_rad) 1175 DEALLOCATE(lon_ini) 1172 1176 DEALLOCATE(lat_rad) 1177 DEALLOCATE(lat_ini) 1178 DEALLOCATE(lev_dyn) 1173 1179 DEALLOCATE(var_tmp2d) 1174 1180 DEALLOCATE(var_tmp3d) -
LMDZ4/trunk/libf/dyn3dpar/times.F90
r792 r1000 138 138 use parallel 139 139 implicit none 140 #ifdef CPP_MPI 140 141 include 'mpif.h' 142 #endif 141 143 integer :: ierr 142 144 integer :: data_size 143 145 real, allocatable,dimension(:,:) :: tmp_table 144 145 if (AllTimer_IsActive) then 146 147 allocate(tmp_table(max_size,nb_timer)) 148 149 data_size=max_size*nb_timer 150 151 tmp_table(:,:)=timer_table(:,:,mpi_rank) 152 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL8,timer_table(1,1,mpi_rank),data_size,MPI_REAL8,COMM_LMDZ,ierr) 153 154 tmp_table(:,:)=timer_table_sqr(:,:,mpi_rank) 155 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL8,timer_table_sqr(1,1,mpi_rank),data_size,MPI_REAL8,COMM_LMDZ,ierr) 156 157 deallocate(tmp_table) 158 159 endif 146 147 IF (using_mpi) THEN 148 149 if (AllTimer_IsActive) then 150 151 152 allocate(tmp_table(max_size,nb_timer)) 153 154 data_size=max_size*nb_timer 155 156 tmp_table(:,:)=timer_table(:,:,mpi_rank) 157 #ifdef CPP_MPI 158 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL_LMDZ,timer_table(1,1,0),data_size,MPI_REAL_LMDZ,COMM_LMDZ,ierr) 159 #endif 160 tmp_table(:,:)=timer_table_sqr(:,:,mpi_rank) 161 #ifdef CPP_MPI 162 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL_LMDZ,timer_table_sqr(1,1,0),data_size,MPI_REAL_LMDZ,COMM_LMDZ,ierr) 163 #endif 164 deallocate(tmp_table) 165 166 endif 167 168 ENDIF ! using_mpi 160 169 161 170 end subroutine allgather_timer … … 164 173 use parallel 165 174 implicit none 175 #ifdef CPP_MPI 166 176 include 'mpif.h' 177 #endif 167 178 integer :: ierr 168 179 integer :: data_size … … 170 181 integer, allocatable,dimension(:,:),target :: tmp_iter 171 182 integer :: istats 172 173 if (AllTimer_IsActive) then 174 175 allocate(tmp_table(max_size,nb_timer)) 176 allocate(tmp_iter(max_size,nb_timer)) 183 184 IF (using_mpi) THEN 185 186 if (AllTimer_IsActive) then 187 188 allocate(tmp_table(max_size,nb_timer)) 189 allocate(tmp_iter(max_size,nb_timer)) 177 190 178 data_size=max_size*nb_timer 179 180 tmp_table(:,:)=timer_average(:,:,mpi_rank) 181 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL8,timer_average(1,1,0),data_size,MPI_REAL8,COMM_LMDZ,ierr) 182 183 tmp_table(:,:)=timer_delta(:,:,mpi_rank) 184 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL8,timer_delta(1,1,0),data_size,MPI_REAL8,COMM_LMDZ,ierr) 185 186 tmp_iter(:,:)=timer_iteration(:,:,mpi_rank) 187 call mpi_allgather(tmp_iter(1,1),data_size,MPI_INTEGER,timer_iteration(1,1,0),data_size,MPI_INTEGER,COMM_LMDZ,ierr) 188 189 deallocate(tmp_table) 190 191 endif 191 data_size=max_size*nb_timer 192 193 tmp_table(:,:)=timer_average(:,:,mpi_rank) 194 #ifdef CPP_MPI 195 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL_LMDZ,timer_average(1,1,0),data_size,MPI_REAL_LMDZ,COMM_LMDZ,ierr) 196 #endif 197 tmp_table(:,:)=timer_delta(:,:,mpi_rank) 198 #ifdef CPP_MPI 199 call mpi_allgather(tmp_table(1,1),data_size,MPI_REAL_LMDZ,timer_delta(1,1,0),data_size,MPI_REAL_LMDZ,COMM_LMDZ,ierr) 200 #endif 201 tmp_iter(:,:)=timer_iteration(:,:,mpi_rank) 202 #ifdef CPP_MPI 203 call mpi_allgather(tmp_iter(1,1),data_size,MPI_INTEGER,timer_iteration(1,1,0),data_size,MPI_INTEGER,COMM_LMDZ,ierr) 204 #endif 205 deallocate(tmp_table) 206 207 endif 208 209 ENDIF ! using_mpî 192 210 end subroutine allgather_timer_average 193 211 -
LMDZ4/trunk/libf/dyn3dpar/writedynav_p.F
r774 r1000 105 105 C Vents V scalaire 106 106 C 107 if (pole_sud) ije= jj_end-iip1107 if (pole_sud) ije=ij_end-iip1 108 108 if (pole_sud) jjn=jj_nb-1 109 109 110 110 call gr_v_scal_p(llm, vnat, vs) 111 call histwrite(histid, 'v', itau_w, vs(ijb: :ije,:),111 call histwrite(histid, 'v', itau_w, vs(ijb:ije,:), 112 112 . iip1*jjn*llm, ndex3d) 113 113 C … … 118 118 jjn=jj_nb 119 119 120 call histwrite(histid, 'theta', itau_w, teta(ijb: :ije,:),120 call histwrite(histid, 'theta', itau_w, teta(ijb:ije,:), 121 121 . iip1*jjn*llm, ndex3d) 122 122 C -
LMDZ4/trunk/libf/dyn3dpar/writehist_p.F
r774 r1000 90 90 jjn=jj_nb 91 91 92 call histwrite(histid, 'ucov', itau_w, ucov ,92 call histwrite(histid, 'ucov', itau_w, ucov(ijb:ije,:), 93 93 . iip1*jjn*llm, ndexu) 94 94 … … 96 96 C Vents V 97 97 C 98 if (pole_sud) ije= jj_end-iip198 if (pole_sud) ije=ij_end-iip1 99 99 if (pole_sud) jjn=jj_nb-1 100 100
Note: See TracChangeset
for help on using the changeset viewer.