| 1 | #!/bin/bash |
|---|
| 2 | ######################################################## |
|---|
| 3 | # This file loads the required modules and sets the paths for simulations |
|---|
| 4 | # To modify the paths: |
|---|
| 5 | # 1) In the <set_env> function, find the section corresponding to your machine |
|---|
| 6 | # 2) Modify the variables, which are documented in the *) section of <set_env> |
|---|
| 7 | # See the end of <set_env> for the explanation of each |
|---|
| 8 | ######################################################## |
|---|
| 9 | |
|---|
| 10 | function get_hostname { |
|---|
| 11 | if grep -q "Adastra" /etc/motd; then |
|---|
| 12 | hostname="adastra" |
|---|
| 13 | elif which idrquota &> /dev/null; then |
|---|
| 14 | hostname="jean-zay" |
|---|
| 15 | else |
|---|
| 16 | hostname=$(hostname) |
|---|
| 17 | fi |
|---|
| 18 | } |
|---|
| 19 | |
|---|
| 20 | function set_env { # Platform-specific |
|---|
| 21 | case ${hostname:0:5} in |
|---|
| 22 | jean-) |
|---|
| 23 | module purge |
|---|
| 24 | compilo=19.0.4 # available 2013.0, 2017.2 |
|---|
| 25 | module load intel-compilers/$compilo |
|---|
| 26 | #module load intel-mpi/$compilo |
|---|
| 27 | module load intel-mkl/$compilo |
|---|
| 28 | module load hdf5/1.10.5-mpi |
|---|
| 29 | module load netcdf/4.7.2-mpi |
|---|
| 30 | module load netcdf-fortran/4.5.2-mpi |
|---|
| 31 | module load subversion/1.9.7 |
|---|
| 32 | #Pour module gcc, voir : https://trac.lmd.jussieu.fr/LMDZ/wiki/PortageJeanZay |
|---|
| 33 | #module load gcc/6.5.0 |
|---|
| 34 | module load nco |
|---|
| 35 | module load cdo |
|---|
| 36 | # Imputation de la consommation sur le groupe (projet) actif par defaut, |
|---|
| 37 | # idrproj indique le groupe (projet) actif par defaut |
|---|
| 38 | # idrproj -d newproj redefinit "newproj" en tant que projet actif, |
|---|
| 39 | # alors $STORE, $WORK etc vont designer les espaces de "newproj") |
|---|
| 40 | account="lmd" # $(idrproj | grep active | awk '{ print $1}') doesn't work on compute nodes |
|---|
| 41 | root_dir="$WORK/LMDZ_Setup"; |
|---|
| 42 | ARCH="X64_JEANZAY" |
|---|
| 43 | SIMRUNBASEDIR="$SCRATCH/" |
|---|
| 44 | LMDZD="$root_dir/LMDZD" |
|---|
| 45 | LMDZ_INIT="$root_dir/LMDZ_Init" |
|---|
| 46 | NB_MPI_MAX=2000 |
|---|
| 47 | NB_OMP_MAX=20 |
|---|
| 48 | MPICMD="srun -n" |
|---|
| 49 | RUNBASHCMD="srun -A $account@cpu --label -n 1 -c" |
|---|
| 50 | SUBMITCMD="sbatch -A $account@cpu" |
|---|
| 51 | ;; |
|---|
| 52 | spiri) |
|---|
| 53 | module purge |
|---|
| 54 | module load subversion/1.13.0 |
|---|
| 55 | module load gcc/11.2.0 |
|---|
| 56 | module load openmpi/4.0.7 |
|---|
| 57 | module load cdo/2.3.0 |
|---|
| 58 | |
|---|
| 59 | root_dir="/data/abarral/LMDZ_SETUP_ROOT/LMDZ_Setup"; |
|---|
| 60 | ARCH="X64_MESOIPSL-GNU" |
|---|
| 61 | SIMRUNBASEDIR="$SCRATCH/" |
|---|
| 62 | LMDZD="$root_dir/LMDZD" |
|---|
| 63 | LMDZ_INIT="$root_dir/LMDZ_Init" |
|---|
| 64 | NB_MPI_MAX=5 |
|---|
| 65 | NB_OMP_MAX=1 |
|---|
| 66 | NB_CORE_PER_NODE_MAX=0 |
|---|
| 67 | N_HYPERTHREADING=1 |
|---|
| 68 | MPICMD="mpirun -np" # on spirit, we can't run MPI using srun from within sbatch |
|---|
| 69 | RUNBASHCMD="bash" |
|---|
| 70 | SUBMITCMD="sbatch" |
|---|
| 71 | ;; |
|---|
| 72 | adast) |
|---|
| 73 | module purge |
|---|
| 74 | module load PrgEnv-gnu # we need to load the env because lmdz links some shared libraries |
|---|
| 75 | module load gcc/13.2.0 # required, see https://dci.dci-gitlab.cines.fr/webextranet/user_support/index.html#prgenv-and-compilers |
|---|
| 76 | |
|---|
| 77 | function cdo { # cdo is available as a spack cmd which requires a specific, incompatible env |
|---|
| 78 | unset cdo |
|---|
| 79 | module purge |
|---|
| 80 | module load develop GCC-CPU-4.0.0 cdo/2.4.2-omp-mpi |
|---|
| 81 | cdo "$@" |
|---|
| 82 | set_env |
|---|
| 83 | } |
|---|
| 84 | |
|---|
| 85 | function ferret { |
|---|
| 86 | unset ferret |
|---|
| 87 | module purge |
|---|
| 88 | module load GCC-CPU-3.1.0 |
|---|
| 89 | module load ferret |
|---|
| 90 | ferret "$@" |
|---|
| 91 | set_env |
|---|
| 92 | } |
|---|
| 93 | |
|---|
| 94 | account=$(/usr/sbin/my_project.py -l 2>&1 | head -1 | cut -d " " -f 3- | cut -c 5-) |
|---|
| 95 | root_dir="$WORKDIR/LMDZ_Setup"; |
|---|
| 96 | ARCH="X64_ADASTRA-GNU" |
|---|
| 97 | SIMRUNBASEDIR="$SCRATCHDIR/" |
|---|
| 98 | LMDZD="$root_dir/LMDZD" |
|---|
| 99 | LMDZ_INIT="$root_dir/LMDZ_Init" |
|---|
| 100 | NB_MPI_MAX=2000 |
|---|
| 101 | NB_OMP_MAX=200 |
|---|
| 102 | NB_CORE_PER_NODE_MAX=192 |
|---|
| 103 | N_HYPERTHREADING=1 # Adastra has SMT=2 enabled, but we found no actual performance improvement for the latlon model. Maybe useful for Dynamico ? |
|---|
| 104 | MPICMD="srun -n" |
|---|
| 105 | # RUNBASHCMD="srun --label --account=$account --constraint=GENOA --ntasks-per-node=1 -n 1 --time=00:15:00 -c" |
|---|
| 106 | RUNBASHCMD="bash" # On Adastra the docs says we can use login nodes for compilation |
|---|
| 107 | SUBMITCMD="env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account" # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script |
|---|
| 108 | ;; |
|---|
| 109 | *) echo "WARNING: RUNNING THIS SCRIPT ON A LOCAL COMPUTER IS DISCOURAGED (lackluster cpus and memory)" |
|---|
| 110 | root_dir="/home/abarral/PycharmProjects/installLMDZ/LMDZ_Setup"; # Where you have extracted LMDZ_Setup. Can't use $(pwd) since this script gets copied and ran from several locations |
|---|
| 111 | ARCH="local-gfortran-parallel" # The arch file to use |
|---|
| 112 | SIMRUNBASEDIR="/tmp/SCRATCH/" # Where the simulations will be executed ($SIMRUNBASEDIR/LMDZ_Setup/...) |
|---|
| 113 | LMDZD="$root_dir/LMDZD" # Where the sources will be downloaded and compiled |
|---|
| 114 | LMDZ_INIT="$root_dir/LMDZ_Init" # Where to store shared files used for initialisation |
|---|
| 115 | NB_MPI_MAX=2 # Max number of MPI cores (only for running simulations) |
|---|
| 116 | NB_OMP_MAX=2 # Max number of OMP threads (only for running simulations) |
|---|
| 117 | NB_CORE_PER_NODE_MAX=0 # Max number of cores per node (real cores, not hyperthreading - only for running simulations, cluster-specific) |
|---|
| 118 | N_HYPERTHREADING=1 # How many hyperthreading threads per physical core |
|---|
| 119 | MPICMD="mpirun -np" # command to run an mpi executable, as $MPICMD <nprocs> <script> |
|---|
| 120 | RUNBASHCMD="bash" # command to run a bash job, as $runbashcmd (nthreads) <script> [nthreads only supplied if =/="bash"] |
|---|
| 121 | SUBMITCMD="." # command to sumbit a job, as $submitcmd <script> |
|---|
| 122 | ;; |
|---|
| 123 | esac |
|---|
| 124 | } |
|---|
| 125 | |
|---|
| 126 | get_hostname |
|---|
| 127 | echo "Setting up lmdz_env on $hostname" |
|---|
| 128 | set_env |
|---|
| 129 | |
|---|
| 130 | if [[ ! (-d $root_dir && -f $root_dir/lmdz_env.sh) ]]; then |
|---|
| 131 | echo "STOP: root_dir $root_dir not found, either you are running on an unsupported cluster, or you haven't edited lmdz_env.sh properly"; exit 1 |
|---|
| 132 | fi |
|---|