#!/bin/bash ######################################################## # This file loads the required modules and sets the paths for simulations # To modify the paths: # 1) In the function, find the section corresponding to your machine # 2) Modify the variables, which are documented in the *) section of # See the end of for the explanation of each ######################################################## # will be set by sed by setup.sh here root_dir=/home/hourdin/TMP/TestI pub_store=1 function get_hostname { if grep -q "Adastra" /etc/motd 2>/dev/null ; then hostname="adastra" elif which idrquota &> /dev/null; then hostname="jean-zay" else hostname=$(hostname) fi } function set_env { # Platform-specific case ${hostname:0:5} in #------------------------------------------------------------------------- jean-) # Jean-zay, Idris-CNRS super computer #------------------------------------------------------------------------- module purge compilo=19.0.4 # available 2013.0, 2017.2 module load intel-compilers/$compilo #module load intel-mpi/$compilo module load intel-mkl/$compilo module load hdf5/1.10.5-mpi module load netcdf/4.7.2-mpi module load netcdf-fortran/4.5.2-mpi module load subversion/1.9.7 #Pour module gcc, voir : https://trac.lmd.jussieu.fr/LMDZ/wiki/PortageJeanZay #module load gcc/6.5.0 module load nco module load cdo # Imputation de la consommation sur le groupe (projet) actif par defaut, # idrproj indique le groupe (projet) actif par defaut # idrproj -d newproj redefinit "newproj" en tant que projet actif, # alors $STORE, $WORK etc vont designer les espaces de "newproj") account="lmd" # $(idrproj | grep active | awk '{ print $1}') doesn't work on compute nodes ARCH="X64_JEANZAY_PBIOIPSL" SIMRUNBASEDIR="$SCRATCH/$(basename $root_dir)" LMDZD="$WORK/LMDZD" LMDZ_INIT="$WORK/LMDZ/pub" NB_MPI_MAX=2000 NB_OMP_MAX=20 NB_CORE_PER_NODE_MAX=0 MPICMD="srun -n" RUNBASHCMD="srun -A $account@cpu --label -n 1 -c" #SUBMITCMD="sbatch -A $account@cpu" submitcmd() { sbatch -A $account@cpu $1 } ;; #------------------------------------------------------------------------- spiri) # Spirit : IPSL cluster #------------------------------------------------------------------------- module purge module load subversion/1.13.0 module load gcc/11.2.0 module load openmpi/4.0.7 module load cdo/2.3.0 ARCH="X64_MESOIPSL-GNU" SIMRUNBASEDIR="$SCRATCH/$(basename $root_dir)" LMDZD="$root_dir/LMDZD" LMDZ_INIT="$HOME/LMDZ/pub" NB_MPI_MAX=5 NB_OMP_MAX=1 NB_CORE_PER_NODE_MAX=0 N_HYPERTHREADING=1 MPICMD="mpirun -np" # on spirit, we can't run MPI using srun from within sbatch RUNBASHCMD="bash" #SUBMITCMD="sbatch" submitcmd() { sbatch $1 } ;; #------------------------------------------------------------------------- adast) # Adastra, Cines computing center #------------------------------------------------------------------------- module purge module load PrgEnv-gnu # we need to load the env because lmdz links some shared libraries module load gcc/13.2.0 # required, see https://dci.dci-gitlab.cines.fr/webextranet/user_support/index.html#prgenv-and-compilers export CRAY_CPU_TARGET=x86-64 # to suppress warnings during Cmake netcdf95 build export FI_CXI_RX_MATCH_MODE=hybrid # 09/24 otherwise we get random SIGABRT e.g. "libfabric:2490616:1725895288::cxi:core:cxip_ux_onload_cb():2657 c1456: RXC (0x5130:21) PtlTE 84:[Fatal] LE resources not recovered during flow control. FI_CXI_RX_MATCH_MODE=[hybrid|software] is required" function cdo { # cdo is available as a spack cmd which requires a specific, incompatible env unset cdo module purge module load develop GCC-CPU-4.0.0 cdo/2.4.2-omp-mpi cdo "$@" set_env } function ferret { unset ferret module purge module load GCC-CPU-3.1.0 module load ferret ferret "$@" set_env } account=$(/usr/sbin/my_project.py -l 2>&1 | head -1 | cut -d " " -f 3- | cut -c 5-) ARCH="X64_ADASTRA-GNU" SIMRUNBASEDIR="$SCRATCHDIR/$(basename $root_dir)" LMDZD="$WORKDIR/LMDZD" LMDZ_INIT="$WORKDIR/LMDZ/pub" NB_MPI_MAX=2000 NB_OMP_MAX=200 NB_CORE_PER_NODE_MAX=192 N_HYPERTHREADING=1 # Adastra has SMT=2 enabled, but we found no actual performance improvement for the latlon model. Maybe useful for Dynamico ? MPICMD="srun -n" # RUNBASHCMD="srun --label --account=$account --constraint=GENOA --ntasks-per-node=1 -n 1 --time=00:15:00 -c" RUNBASHCMD="bash" # On Adastra the docs says we can use login nodes for compilation #SUBMITCMD="env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account" # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script submitcmd() { env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account $1 } # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script ;; #------------------------------------------------------------------------- *) # Local machine. #------------------------------------------------------------------------- ARCH="local-gfortran-parallel" # The arch file to use SIMRUNBASEDIR="$root_dir/SCRATCH/" # Where the simulations will be executed ($SIMRUNBASEDIR/LMDZ_Setup/...) LMDZD="$root_dir/../LMDZD" # Where the sources will be downloaded and compiled if [[ $pub_store ]] ; then LMDZ_INIT="$HOME/LMDZ/pub" ; else LMDZ_INIT="$root_dir/LMDZ/pub" ; fi NB_MPI_MAX=2 # Max number of MPI cores (only for running simulations) NB_OMP_MAX=2 # Max number of OMP threads (only for running simulations) NB_CORE_PER_NODE_MAX=0 # Max number of cores per node (real cores, not hyperthreading - only for running simulations, cluster-specific) N_HYPERTHREADING=1 # How many hyperthreading threads per physical core MPICMD="mpirun -np" # command to run an mpi executable, as $MPICMD