#!/bin/bash
########################################################
# This file loads the required modules and sets the paths for simulations
# To modify the paths:
#    1) In the <set_env> function, find the section corresponding to your machine
#    2) Modify the variables, which are documented in the *) section of <set_env>
# See the end of <set_env> for the explanation of each
########################################################

# <root_dir> will be set by sed by setup.sh here
root_dir=/home/abarral/PycharmProjects/installLMDZ/LMDZ_Setup

function get_hostname {
  if grep -q "Adastra" /etc/motd; then
    hostname="adastra"
  elif which idrquota &> /dev/null; then
    hostname="jean-zay"
  else
    hostname=$(hostname)
  fi
}

function set_env {  # Platform-specific
  case ${hostname:0:5} in
    jean-)
      module purge
      compilo=19.0.4 # available 2013.0, 2017.2
      module load intel-compilers/$compilo
      #module load intel-mpi/$compilo
      module load intel-mkl/$compilo
      module load hdf5/1.10.5-mpi
      module load netcdf/4.7.2-mpi
      module load netcdf-fortran/4.5.2-mpi
      module load subversion/1.9.7
      #Pour module gcc, voir : https://trac.lmd.jussieu.fr/LMDZ/wiki/PortageJeanZay
      #module load gcc/6.5.0
      module load nco
      module load cdo
      # Imputation de la consommation sur le groupe (projet) actif par defaut,
      #   idrproj indique le groupe (projet) actif par defaut
      #   idrproj -d newproj   redefinit "newproj" en tant que  projet actif,
      #        alors $STORE, $WORK etc vont designer les espaces de "newproj")
      account="lmd"  # $(idrproj | grep active | awk '{ print $1}') doesn't work on compute nodes
      ARCH="X64_JEANZAY_PBIOIPSL"
      SIMRUNBASEDIR="$SCRATCH/"
      LMDZD="$root_dir/LMDZD"
      LMDZ_INIT="$root_dir/LMDZ_Init"
      NB_MPI_MAX=2000
      NB_OMP_MAX=20
      NB_CORE_PER_NODE_MAX=0
      MPICMD="srun -n"
      RUNBASHCMD="srun -A $account@cpu --label -n 1 -c"
      SUBMITCMD="sbatch -A $account@cpu"
      ;;
    spiri)
      module purge
      module load subversion/1.13.0
      module load gcc/11.2.0
      module load openmpi/4.0.7
      module load cdo/2.3.0

      ARCH="X64_MESOIPSL-GNU"
      SIMRUNBASEDIR="$SCRATCH/"
      LMDZD="$root_dir/LMDZD"
      LMDZ_INIT="$HOME/LMDZ_Init"
      NB_MPI_MAX=5
      NB_OMP_MAX=1
      NB_CORE_PER_NODE_MAX=0
      N_HYPERTHREADING=1
      MPICMD="mpirun -np"  # on spirit, we can't run MPI using srun from within sbatch
      RUNBASHCMD="bash"
      SUBMITCMD="sbatch"
      ;;
    adast)
      module purge
      module load PrgEnv-gnu  # we need to load the env because lmdz links some shared libraries
      module load gcc/13.2.0  # required, see https://dci.dci-gitlab.cines.fr/webextranet/user_support/index.html#prgenv-and-compilers
      export CRAY_CPU_TARGET=x86-64  # to suppress warnings during Cmake netcdf95 build
      export FI_CXI_RX_MATCH_MODE=hybrid  # 09/24 otherwise we get random SIGABRT e.g. "libfabric:2490616:1725895288::cxi:core:cxip_ux_onload_cb():2657<warn> c1456: RXC (0x5130:21) PtlTE 84:[Fatal] LE resources not recovered during flow control. FI_CXI_RX_MATCH_MODE=[hybrid|software] is required"

      function cdo {  # cdo is available as a spack cmd which requires a specific, incompatible env
        unset cdo
        module purge
        module load develop GCC-CPU-4.0.0 cdo/2.4.2-omp-mpi
        cdo "$@"
        set_env
      }

      function ferret {
        unset ferret
        module purge
        module load GCC-CPU-3.1.0
        module load ferret
        ferret "$@"
        set_env
      }

      account=$(/usr/sbin/my_project.py -l 2>&1 | head -1 | cut -d " " -f 3- | cut -c 5-)
      ARCH="X64_ADASTRA-GNU"
      SIMRUNBASEDIR="$SCRATCHDIR/"
      LMDZD="$root_dir/LMDZD"
      LMDZ_INIT="$WORKDIR/LMDZ_Init"
      NB_MPI_MAX=2000
      NB_OMP_MAX=200
      NB_CORE_PER_NODE_MAX=192
      N_HYPERTHREADING=1  # Adastra has SMT=2 enabled, but we found no actual performance improvement for the latlon model. Maybe useful for Dynamico ?
      MPICMD="srun -n"
#      RUNBASHCMD="srun --label --account=$account --constraint=GENOA --ntasks-per-node=1 -n 1 --time=00:15:00 -c"
      RUNBASHCMD="bash"  # On Adastra the docs says we can use login nodes for compilation
      SUBMITCMD="env $(env | grep -E "SLURM_|SBATCH_|SRUN_" | cut -d= -f1 | awk '{print "-u " $0}' | tr '\n' ' ' ) sbatch --constraint=GENOA --account=$account"  # we need to remove the existing SLURM variables otherwise they may be unexpectedly inherited by the submitted script
      ;;
    *) echo "WARNING: RUNNING THIS SCRIPT ON A LOCAL COMPUTER IS DISCOURAGED (lackluster cpus and memory)"
      ARCH="local-gfortran-parallel"  # The arch file to use
      SIMRUNBASEDIR="/tmp/SCRATCH/"  # Where the simulations will be executed ($SIMRUNBASEDIR/LMDZ_Setup/...)
      LMDZD="$root_dir/LMDZD"  # Where the sources will be downloaded and compiled
      LMDZ_INIT="$HOME/LMDZ_Init"  # Where to store shared files used for initialisation. Should be outside the LMDZ_Setup dir since it's shared between several LMDZ_Setup.
      NB_MPI_MAX=2  # Max number of MPI cores (only for running simulations)
      NB_OMP_MAX=2  # Max number of OMP threads (only for running simulations)
      NB_CORE_PER_NODE_MAX=0  # Max number of cores per node (real cores, not hyperthreading - only for running simulations, cluster-specific)
      N_HYPERTHREADING=1  # How many hyperthreading threads per physical core
      MPICMD="mpirun -np" # command to run an mpi executable, as $MPICMD <nprocs> <script>
      RUNBASHCMD="bash" # command to run a bash job, as $runbashcmd (nthreads) <script> [nthreads only supplied if =/="bash"]
      SUBMITCMD="."  # command to sumbit a job, as $submitcmd <script>
      ;;
  esac
}

get_hostname
echo "Setting up lmdz_env on $hostname"
set_env

if [[ ! (-d $root_dir && -f $root_dir/.lmdz_setup_root_dir && -f $root_dir/lmdz_env.sh) ]]; then
  echo "STOP: root_dir $root_dir not found, either you are running on an unsupported cluster, or the initialisation failed midway"; exit 1
fi
