source: BOL/LMDZ_Setup_amaury/lmdz_env.sh @ 5379

Last change on this file since 5379 was 5372, checked in by abarral, 9 days ago

Add NB_CORE_PER_NODE_MAX for JZ

File size: 6.0 KB
Line 
1#!/bin/bash
2########################################################
3# This file loads the required modules and sets the paths for simulations
4# To modify the paths:
5#    1) In the <set_env> function, find the section corresponding to your machine
6#    2) Modify the variables, which are documented in the *) section of <set_env>
7# See the end of <set_env> for the explanation of each
8########################################################
9
10# <root_dir> will be set by sed by setup.sh here
11root_dir=/home/abarral/PycharmProjects/installLMDZ/LMDZ_Setup
12
13function get_hostname {
14  if grep -q "Adastra" /etc/motd; then
15    hostname="adastra"
16  elif which idrquota &> /dev/null; then
17    hostname="jean-zay"
18  else
19    hostname=$(hostname)
20  fi
21}
22
23function set_env {  # Platform-specific
24  case ${hostname:0:5} in
25    jean-)
26      module purge
27      compilo=19.0.4 # available 2013.0, 2017.2
28      module load intel-compilers/$compilo
29      #module load intel-mpi/$compilo
30      module load intel-mkl/$compilo
31      module load hdf5/1.10.5-mpi
32      module load netcdf/4.7.2-mpi
33      module load netcdf-fortran/4.5.2-mpi
34      module load subversion/1.9.7
35      #Pour module gcc, voir : https://trac.lmd.jussieu.fr/LMDZ/wiki/PortageJeanZay
36      #module load gcc/6.5.0
37      module load nco
38      module load cdo
39      # Imputation de la consommation sur le groupe (projet) actif par defaut,
40      #   idrproj indique le groupe (projet) actif par defaut
41      #   idrproj -d newproj   redefinit "newproj" en tant que  projet actif,
42      #        alors $STORE, $WORK etc vont designer les espaces de "newproj")
43      account="lmd"  # $(idrproj | grep active | awk '{ print $1}') doesn't work on compute nodes
44      ARCH="X64_JEANZAY"
45      SIMRUNBASEDIR="$SCRATCH/"
46      LMDZD="$root_dir/LMDZD"
47      LMDZ_INIT="$root_dir/LMDZ_Init"
48      NB_MPI_MAX=2000
49      NB_OMP_MAX=20
50      NB_CORE_PER_NODE_MAX=0
51      MPICMD="srun -n"
52      RUNBASHCMD="srun -A $account@cpu --label -n 1 -c"
53      SUBMITCMD="sbatch -A $account@cpu"
54      ;;
55    spiri)
56      module purge
57      module load subversion/1.13.0
58      module load gcc/11.2.0
59      module load openmpi/4.0.7
60      module load cdo/2.3.0
61
62      ARCH="X64_MESOIPSL-GNU"
63      SIMRUNBASEDIR="$SCRATCH/"
64      LMDZD="$root_dir/LMDZD"
65      LMDZ_INIT="$HOME/LMDZ_Init"
66      NB_MPI_MAX=5
67      NB_OMP_MAX=1
68      NB_CORE_PER_NODE_MAX=0
69      N_HYPERTHREADING=1
70      MPICMD="mpirun -np"  # on spirit, we can't run MPI using srun from within sbatch
71      RUNBASHCMD="bash"
72      SUBMITCMD="sbatch"
73      ;;
74    adast)
75      module purge
76      module load PrgEnv-gnu  # we need to load the env because lmdz links some shared libraries
77      module load gcc/13.2.0  # required, see https://dci.dci-gitlab.cines.fr/webextranet/user_support/index.html#prgenv-and-compilers
78      export CRAY_CPU_TARGET=x86-64  # to suppress warnings during Cmake netcdf95 build
79      export FI_CXI_RX_MATCH_MODE=hybrid  # 09/24 otherwise we get random SIGABRT e.g. "libfabric:2490616:1725895288::cxi:core:cxip_ux_onload_cb():2657<warn> c1456: RXC (0x5130:21) PtlTE 84:[Fatal] LE resources not recovered during flow control. FI_CXI_RX_MATCH_MODE=[hybrid|software] is required"
80
81      function cdo {  # cdo is available as a spack cmd which requires a specific, incompatible env
82        unset cdo
83        module purge
84        module load develop GCC-CPU-4.0.0 cdo/2.4.2-omp-mpi
85        cdo "$@"
86        set_env
87      }
88
89      function ferret {
90        unset ferret
91        module purge
92        module load GCC-CPU-3.1.0
93        module load ferret
94        ferret "$@"
95        set_env
96      }
97
98      account=$(/usr/sbin/my_project.py -l 2>&1 | head -1 | cut -d " " -f 3- | cut -c 5-)
99      ARCH="X64_ADASTRA-GNU"
100      SIMRUNBASEDIR="$SCRATCHDIR/"
101      LMDZD="$root_dir/LMDZD"
102      LMDZ_INIT="$WORKDIR/LMDZ_Init"
103      NB_MPI_MAX=2000
104      NB_OMP_MAX=200
105      NB_CORE_PER_NODE_MAX=192
106      N_HYPERTHREADING=1  # Adastra has SMT=2 enabled, but we found no actual performance improvement for the latlon model. Maybe useful for Dynamico ?
107      MPICMD="srun -n"
108#      RUNBASHCMD="srun --label --account=$account --constraint=GENOA --ntasks-per-node=1 -n 1 --time=00:15:00 -c"
109      RUNBASHCMD="bash"  # On Adastra the docs says we can use login nodes for compilation
110      SUBMITCMD="env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account"  # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script
111      ;;
112    *) echo "WARNING: RUNNING THIS SCRIPT ON A LOCAL COMPUTER IS DISCOURAGED (lackluster cpus and memory)"
113      ARCH="local-gfortran-parallel"  # The arch file to use
114      SIMRUNBASEDIR="/tmp/SCRATCH/"  # Where the simulations will be executed ($SIMRUNBASEDIR/LMDZ_Setup/...)
115      LMDZD="$root_dir/LMDZD"  # Where the sources will be downloaded and compiled
116      LMDZ_INIT="$HOME/LMDZ_Init"  # Where to store shared files used for initialisation. Should be outside the LMDZ_Setup dir since it's shared between several LMDZ_Setup.
117      NB_MPI_MAX=2  # Max number of MPI cores (only for running simulations)
118      NB_OMP_MAX=2  # Max number of OMP threads (only for running simulations)
119      NB_CORE_PER_NODE_MAX=0  # Max number of cores per node (real cores, not hyperthreading - only for running simulations, cluster-specific)
120      N_HYPERTHREADING=1  # How many hyperthreading threads per physical core
121      MPICMD="mpirun -np" # command to run an mpi executable, as $MPICMD <nprocs> <script>
122      RUNBASHCMD="bash" # command to run a bash job, as $runbashcmd (nthreads) <script> [nthreads only supplied if =/="bash"]
123      SUBMITCMD="."  # command to sumbit a job, as $submitcmd <script>
124      ;;
125  esac
126}
127
128get_hostname
129echo "Setting up lmdz_env on $hostname"
130set_env
131
132if [[ ! (-d $root_dir && -f $root_dir/.lmdz_setup_root_dir && -f $root_dir/lmdz_env.sh) ]]; then
133  echo "STOP: root_dir $root_dir not found, either you are running on an unsupported cluster, or the initialisation failed midway"; exit 1
134fi
Note: See TracBrowser for help on using the repository browser.