source: BOL/LMDZ_Setup/lmdz_env.sh @ 5498

Last change on this file since 5498 was 5497, checked in by fhourdin, 12 days ago

cdo/adastra + ajout .def de best tuning partII

File size: 10.5 KB
RevLine 
[5415]1#!/bin/bash
2########################################################
3# This file loads the required modules and sets the paths for simulations
4# To modify the paths:
5#    1) In the <set_env> function, find the section corresponding to your machine
6#    2) Modify the variables, which are documented in the *) section of <set_env>
7# See the end of <set_env> for the explanation of each
8########################################################
[4615]9
[5415]10# <root_dir> will be set by sed by setup.sh here
[5462]11root_dir=/data/hourdin/TESTS/SPLA
[5426]12pub_store=1
[5462]13PARALLEL=1
[4615]14
[5415]15function get_hostname {
[5422]16  if grep -q "Adastra" /etc/motd 2>/dev/null ; then
[5415]17    hostname="adastra"
18  elif which idrquota &> /dev/null; then
19    hostname="jean-zay"
20  else
21    hostname=$(hostname)
22  fi
23}
[4615]24
[5415]25function set_env {  # Platform-specific
26  case ${hostname:0:5} in
[5422]27#-------------------------------------------------------------------------
28    jean-) # Jean-zay, Idris-CNRS super computer
29#-------------------------------------------------------------------------
[5415]30      module purge
31      compilo=19.0.4 # available 2013.0, 2017.2
32      module load intel-compilers/$compilo
33      #module load intel-mpi/$compilo
34      module load intel-mkl/$compilo
35      module load hdf5/1.10.5-mpi
36      module load netcdf/4.7.2-mpi
37      module load netcdf-fortran/4.5.2-mpi
38      module load subversion/1.9.7
39      #Pour module gcc, voir : https://trac.lmd.jussieu.fr/LMDZ/wiki/PortageJeanZay
40      #module load gcc/6.5.0
41      module load nco
42      module load cdo
43      # Imputation de la consommation sur le groupe (projet) actif par defaut,
44      #   idrproj indique le groupe (projet) actif par defaut
45      #   idrproj -d newproj   redefinit "newproj" en tant que  projet actif,
46      #        alors $STORE, $WORK etc vont designer les espaces de "newproj")
47      account="lmd"  # $(idrproj | grep active | awk '{ print $1}') doesn't work on compute nodes
48      ARCH="X64_JEANZAY_PBIOIPSL"
[5420]49      SIMRUNBASEDIR="$SCRATCH/$(basename $root_dir)"
[5426]50      LMDZD="$WORK/LMDZD"
51      LMDZ_INIT="$WORK/LMDZ/pub"
[5415]52      NB_MPI_MAX=2000
53      NB_OMP_MAX=20
54      NB_CORE_PER_NODE_MAX=0
55      MPICMD="srun -n"
56      RUNBASHCMD="srun -A $account@cpu --label -n 1 -c"
[5422]57      #SUBMITCMD="sbatch -A $account@cpu"
58      submitcmd() {
59         sbatch -A $account@cpu $1
60      }
[5415]61      ;;
[5422]62#-------------------------------------------------------------------------
63    spiri) # Spirit : IPSL cluster
64#-------------------------------------------------------------------------
[5415]65      module purge
66      module load subversion/1.13.0
67      module load gcc/11.2.0
68      module load openmpi/4.0.7
69      module load cdo/2.3.0
[4615]70
[5415]71      ARCH="X64_MESOIPSL-GNU"
[5420]72      SIMRUNBASEDIR="$SCRATCH/$(basename $root_dir)"
[5415]73      LMDZD="$root_dir/LMDZD"
[5426]74      LMDZ_INIT="$HOME/LMDZ/pub"
[5415]75      NB_MPI_MAX=5
76      NB_OMP_MAX=1
77      NB_CORE_PER_NODE_MAX=0
78      N_HYPERTHREADING=1
79      MPICMD="mpirun -np"  # on spirit, we can't run MPI using srun from within sbatch
80      RUNBASHCMD="bash"
[5422]81      #SUBMITCMD="sbatch"
82      submitcmd() {
83         sbatch $1
84      }
[5415]85      ;;
[5422]86#-------------------------------------------------------------------------
87    adast) # Adastra, Cines computing center
88#-------------------------------------------------------------------------
[5415]89      module purge
90      module load PrgEnv-gnu  # we need to load the env because lmdz links some shared libraries
91      module load gcc/13.2.0  # required, see https://dci.dci-gitlab.cines.fr/webextranet/user_support/index.html#prgenv-and-compilers
92      export CRAY_CPU_TARGET=x86-64  # to suppress warnings during Cmake netcdf95 build
93      export FI_CXI_RX_MATCH_MODE=hybrid  # 09/24 otherwise we get random SIGABRT e.g. "libfabric:2490616:1725895288::cxi:core:cxip_ux_onload_cb():2657<warn> c1456: RXC (0x5130:21) PtlTE 84:[Fatal] LE resources not recovered during flow control. FI_CXI_RX_MATCH_MODE=[hybrid|software] is required"
[4615]94
[5415]95      function cdo {  # cdo is available as a spack cmd which requires a specific, incompatible env
96        unset cdo
97        module purge
[5497]98        # Commentaire conserve pour l'option "develop". Ca a marché puis plus
99        #module load develop GCC-CPU-4.0.0 cdo/2.4.2-omp-mpi
100        module load GCC-CPU-3.2.0 cdo/2.4.2-omp-mpi
[5415]101        cdo "$@"
102        set_env
103      }
[4615]104
[5415]105      function ferret {
106        unset ferret
107        module purge
108        module load GCC-CPU-3.1.0
109        module load ferret
110        ferret "$@"
111        set_env
112      }
113
114      account=$(/usr/sbin/my_project.py -l 2>&1 | head -1 | cut -d " " -f 3- | cut -c 5-)
115      ARCH="X64_ADASTRA-GNU"
[5420]116      SIMRUNBASEDIR="$SCRATCHDIR/$(basename $root_dir)"
[5426]117      LMDZD="$WORKDIR/LMDZD"
118      LMDZ_INIT="$WORKDIR/LMDZ/pub"
[5415]119      NB_MPI_MAX=2000
120      NB_OMP_MAX=200
121      NB_CORE_PER_NODE_MAX=192
122      N_HYPERTHREADING=1  # Adastra has SMT=2 enabled, but we found no actual performance improvement for the latlon model. Maybe useful for Dynamico ?
123      MPICMD="srun -n"
124#      RUNBASHCMD="srun --label --account=$account --constraint=GENOA --ntasks-per-node=1 -n 1 --time=00:15:00 -c"
125      RUNBASHCMD="bash"  # On Adastra the docs says we can use login nodes for compilation
[5422]126      #SUBMITCMD="env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account"  # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script
127      submitcmd() {
[5424]128        env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account $1
[5422]129      }  # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script
[5415]130      ;;
[5422]131#-------------------------------------------------------------------------
132    *)  # Local machine.
133#-------------------------------------------------------------------------
[5420]134      SIMRUNBASEDIR="$root_dir/SCRATCH/"  # Where the simulations will be executed ($SIMRUNBASEDIR/LMDZ_Setup/...)
[5426]135      LMDZD="$root_dir/../LMDZD"  # Where the sources will be downloaded and compiled
136      if [[ $pub_store ]] ; then LMDZ_INIT="$HOME/LMDZ/pub" ; else LMDZ_INIT="$root_dir/LMDZ/pub" ; fi
[5462]137      # Old fortran versions
138      if [ $( gfortran --version | head -1  | awk ' { print $NF } ' | cut -d. -f1 ) -le 9 ] ; then 
139          gfortranv=gfortran9 ; archdir="-arch_dir arch"
140          if [ ! -d $LMDZD/arch ] ; then mkdir -p $LMDZD ; cd $LMDZD ; 
141            wget http://lmdz.lmd.jussieu.fr/pub/src_archives/misc/arch/LMDZ/arch-local-gfortran9.tar
142            tar xvf arch-local-gfortran9.tar
143            cd -
144          fi
145      else
146          gfortranv=gfortran ; archdir=
147      fi
148      if [[ $PARALLEL = 0 ]] ; then
149         ARCH="local-${gfortranv} $archdir"  # The arch file to use
150         NB_MPI_MAX=1  # Max number of MPI cores (only for running simulations)
151         NB_OMP_MAX=1  # Max number of OMP threads (only for running simulations)
152         MPICMD="" # command to run an mpi executable, as $MPICMD <nprocs> <script>
153      else
154         ARCH="local-${gfortranv}-parallel $archdir"  # The arch file to use
155         NB_MPI_MAX=2  # Max number of MPI cores (only for running simulations)
156         NB_OMP_MAX=2  # Max number of OMP threads (only for running simulations)
157         MPICMD="mpirun -np" # command to run an mpi executable, as $MPICMD <nprocs> <script>
158      fi
159
[5415]160      NB_CORE_PER_NODE_MAX=0  # Max number of cores per node (real cores, not hyperthreading - only for running simulations, cluster-specific)
161      N_HYPERTHREADING=1  # How many hyperthreading threads per physical core
162      RUNBASHCMD="bash" # command to run a bash job, as $runbashcmd (nthreads) <script> [nthreads only supplied if =/="bash"]
[5422]163      #SUBMITCMD="."  # command to sumbit a job, as $submitcmd <script>
164      submitcmd() {
165         nohup bash $1 > out.$$ 2>err.$$ &
166      }
[5415]167      ;;
168  esac
169}
170
[5426]171#-----------------------------------------------------------------------------------------------------
172function wget_pub() { # geting file from http:lmdz... and saving on $LMDZ_INIT
173#-----------------------------------------------------------------------------------------------------
174  local dir=$1
175  local file=$2
176  local target_dir=$LMDZ_INIT/$dir
177  if [ ! -f $target_dir/$file ] ; then
178     mkdir -p $target_dir
179     cd $target_dir
180     wget --no-check-certificate -nv "http://lmdz.lmd.jussieu.fr/pub/$dir/$file"
181     cd -
182  fi
183}
184
185#-----------------------------------------------------------------------------------------------------
186function cp_from_pub() { # geting file from http:lmdz... and saving on $LMDZ_INIT
187#-----------------------------------------------------------------------------------------------------
188  local dir=$1
189  local file=$2
190  cp -f $LMDZ_INIT/$dir/$file .
191}
192
193#-----------------------------------------------------------------------------------------------------
194function ln_from_pub() { # geting file from http:lmdz... and saving on $LMDZ_INIT
195#-----------------------------------------------------------------------------------------------------
196  local dir=$1
197  local file=$2
198  ln -sf $LMDZ_INIT/$dir/$file .
199}
200
[5429]201#-----------------------------------------------------------------------------------------------------
202function get_input_files() {
203#-----------------------------------------------------------------------------------------------------
204  local method=$1
205  local target=$2
206  case $target in
207
208      Orchidee) local files="PFTmap_IPCC_2000.nc cartepente2d_15min.nc routing.nc routing_simple.nc lai2D.nc \
209      alb_bg_modisopt_2D_ESA_v2.nc reftemp.nc \
210      soils_param.nc woodharvest_2000.nc PFTmap_15PFT.v1_2000.nc soil_bulk_and_ph.nc  \
211      ndep_nhx.nc ndep_noy.nc nfert_cropland.nc nfert_pasture.nc nmanure_cropland.nc nmanure_pasture.nc bnf.nc" ;;
212
213      AerChem) local files="aerosols1850_from_inca.nc aerosols9999_from_inca.nc" ;;
214
215      SPLA_WA/emissions) local files="donnees_lisa.nc SOILSPEC.data              \
[5438]216         cly.dat $( for i in $(seq -w 1 12 ) ; do echo dust$i.nc ; done ) wth.dat   \
[5429]217         carbon_emissions.nc sulphur_emissions_antro.nc sulphur_emissions_nat.nc \
218         sulphur_emissions_volc.nc" ;;
219
220      *) echo target $target non available in get_input_files ; exit 1 ;;
221  esac
222
223  if [[ $method != wget_pub && $method != ln_from_pub ]] ; then
224     echo method $method not available in get_input_files ; exit 1
225  fi
226  for file in $files ; do $method 3DInputData/$target $file ; done
227
228}
229
[5415]230get_hostname
231echo "Setting up lmdz_env on $hostname"
232set_env
233
234if [[ ! (-d $root_dir && -f $root_dir/.lmdz_setup_root_dir && -f $root_dir/lmdz_env.sh) ]]; then
235  echo "STOP: root_dir $root_dir not found, either you are running on an unsupported cluster, or the initialisation failed midway"; exit 1
236fi
Note: See TracBrowser for help on using the repository browser.