source: BOL/LMDZ_Setup/lmdz_env.sh @ 5422

Last change on this file since 5422 was 5422, checked in by fhourdin, 12 hours ago

Changing $SUBMITCMD to a function submitcmd

Need for local use of LMDZ

File size: 7.1 KB
Line 
1#!/bin/bash
2########################################################
3# This file loads the required modules and sets the paths for simulations
4# To modify the paths:
5#    1) In the <set_env> function, find the section corresponding to your machine
6#    2) Modify the variables, which are documented in the *) section of <set_env>
7# See the end of <set_env> for the explanation of each
8########################################################
9
10# <root_dir> will be set by sed by setup.sh here
11root_dir=/home/hourdin/TMP/LMDZ_Setup
12
13function get_hostname {
14  if grep -q "Adastra" /etc/motd 2>/dev/null ; then
15    hostname="adastra"
16  elif which idrquota &> /dev/null; then
17    hostname="jean-zay"
18  else
19    hostname=$(hostname)
20  fi
21}
22
23function set_env {  # Platform-specific
24  case ${hostname:0:5} in
25#-------------------------------------------------------------------------
26    jean-) # Jean-zay, Idris-CNRS super computer
27#-------------------------------------------------------------------------
28      module purge
29      compilo=19.0.4 # available 2013.0, 2017.2
30      module load intel-compilers/$compilo
31      #module load intel-mpi/$compilo
32      module load intel-mkl/$compilo
33      module load hdf5/1.10.5-mpi
34      module load netcdf/4.7.2-mpi
35      module load netcdf-fortran/4.5.2-mpi
36      module load subversion/1.9.7
37      #Pour module gcc, voir : https://trac.lmd.jussieu.fr/LMDZ/wiki/PortageJeanZay
38      #module load gcc/6.5.0
39      module load nco
40      module load cdo
41      # Imputation de la consommation sur le groupe (projet) actif par defaut,
42      #   idrproj indique le groupe (projet) actif par defaut
43      #   idrproj -d newproj   redefinit "newproj" en tant que  projet actif,
44      #        alors $STORE, $WORK etc vont designer les espaces de "newproj")
45      account="lmd"  # $(idrproj | grep active | awk '{ print $1}') doesn't work on compute nodes
46      ARCH="X64_JEANZAY_PBIOIPSL"
47      SIMRUNBASEDIR="$SCRATCH/$(basename $root_dir)"
48      LMDZD="$root_dir/LMDZD"
49      LMDZ_INIT="$root_dir/LMDZ_Init"
50      NB_MPI_MAX=2000
51      NB_OMP_MAX=20
52      NB_CORE_PER_NODE_MAX=0
53      MPICMD="srun -n"
54      RUNBASHCMD="srun -A $account@cpu --label -n 1 -c"
55      #SUBMITCMD="sbatch -A $account@cpu"
56      submitcmd() {
57         sbatch -A $account@cpu $1
58      }
59      ;;
60#-------------------------------------------------------------------------
61    spiri) # Spirit : IPSL cluster
62#-------------------------------------------------------------------------
63      module purge
64      module load subversion/1.13.0
65      module load gcc/11.2.0
66      module load openmpi/4.0.7
67      module load cdo/2.3.0
68
69      ARCH="X64_MESOIPSL-GNU"
70      SIMRUNBASEDIR="$SCRATCH/$(basename $root_dir)"
71      LMDZD="$root_dir/LMDZD"
72      LMDZ_INIT="$HOME/LMDZ_Init"
73      NB_MPI_MAX=5
74      NB_OMP_MAX=1
75      NB_CORE_PER_NODE_MAX=0
76      N_HYPERTHREADING=1
77      MPICMD="mpirun -np"  # on spirit, we can't run MPI using srun from within sbatch
78      RUNBASHCMD="bash"
79      #SUBMITCMD="sbatch"
80      submitcmd() {
81         sbatch $1
82      }
83      ;;
84#-------------------------------------------------------------------------
85    adast) # Adastra, Cines computing center
86#-------------------------------------------------------------------------
87      module purge
88      module load PrgEnv-gnu  # we need to load the env because lmdz links some shared libraries
89      module load gcc/13.2.0  # required, see https://dci.dci-gitlab.cines.fr/webextranet/user_support/index.html#prgenv-and-compilers
90      export CRAY_CPU_TARGET=x86-64  # to suppress warnings during Cmake netcdf95 build
91      export FI_CXI_RX_MATCH_MODE=hybrid  # 09/24 otherwise we get random SIGABRT e.g. "libfabric:2490616:1725895288::cxi:core:cxip_ux_onload_cb():2657<warn> c1456: RXC (0x5130:21) PtlTE 84:[Fatal] LE resources not recovered during flow control. FI_CXI_RX_MATCH_MODE=[hybrid|software] is required"
92
93      function cdo {  # cdo is available as a spack cmd which requires a specific, incompatible env
94        unset cdo
95        module purge
96        module load develop GCC-CPU-4.0.0 cdo/2.4.2-omp-mpi
97        cdo "$@"
98        set_env
99      }
100
101      function ferret {
102        unset ferret
103        module purge
104        module load GCC-CPU-3.1.0
105        module load ferret
106        ferret "$@"
107        set_env
108      }
109
110      account=$(/usr/sbin/my_project.py -l 2>&1 | head -1 | cut -d " " -f 3- | cut -c 5-)
111      ARCH="X64_ADASTRA-GNU"
112      SIMRUNBASEDIR="$SCRATCHDIR/$(basename $root_dir)"
113      LMDZD="$root_dir/LMDZD"
114      LMDZ_INIT="$WORKDIR/LMDZ_Init"
115      NB_MPI_MAX=2000
116      NB_OMP_MAX=200
117      NB_CORE_PER_NODE_MAX=192
118      N_HYPERTHREADING=1  # Adastra has SMT=2 enabled, but we found no actual performance improvement for the latlon model. Maybe useful for Dynamico ?
119      MPICMD="srun -n"
120#      RUNBASHCMD="srun --label --account=$account --constraint=GENOA --ntasks-per-node=1 -n 1 --time=00:15:00 -c"
121      RUNBASHCMD="bash"  # On Adastra the docs says we can use login nodes for compilation
122      #SUBMITCMD="env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account"  # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script
123      submitcmd() {
124        env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account
125      }  # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script
126      ;;
127#-------------------------------------------------------------------------
128    *)  # Local machine.
129#-------------------------------------------------------------------------
130      ARCH="local-gfortran-parallel"  # The arch file to use
131      SIMRUNBASEDIR="$root_dir/SCRATCH/"  # Where the simulations will be executed ($SIMRUNBASEDIR/LMDZ_Setup/...)
132      LMDZD="$root_dir/LMDZD"  # Where the sources will be downloaded and compiled
133      LMDZ_INIT="$HOME/LMDZ_Init"  # Where to store shared files used for initialisation. Should be outside the LMDZ_Setup dir since it's shared between several LMDZ_Setup.
134      NB_MPI_MAX=2  # Max number of MPI cores (only for running simulations)
135      NB_OMP_MAX=2  # Max number of OMP threads (only for running simulations)
136      NB_CORE_PER_NODE_MAX=0  # Max number of cores per node (real cores, not hyperthreading - only for running simulations, cluster-specific)
137      N_HYPERTHREADING=1  # How many hyperthreading threads per physical core
138      MPICMD="mpirun -np" # command to run an mpi executable, as $MPICMD <nprocs> <script>
139      RUNBASHCMD="bash" # command to run a bash job, as $runbashcmd (nthreads) <script> [nthreads only supplied if =/="bash"]
140      #SUBMITCMD="."  # command to sumbit a job, as $submitcmd <script>
141      submitcmd() {
142         nohup bash $1 > out.$$ 2>err.$$ &
143      }
144      ;;
145  esac
146}
147
148get_hostname
149echo "Setting up lmdz_env on $hostname"
150set_env
151
152if [[ ! (-d $root_dir && -f $root_dir/.lmdz_setup_root_dir && -f $root_dir/lmdz_env.sh) ]]; then
153  echo "STOP: root_dir $root_dir not found, either you are running on an unsupported cluster, or the initialisation failed midway"; exit 1
154fi
Note: See TracBrowser for help on using the repository browser.