source: trunk/LMDZ.COMMON/libf/evolution/deftank/pcm_run.job @ 4076

Last change on this file since 4076 was 4074, checked in by jbclement, 3 weeks ago

PEM:

  • Correct management of H2O ice tendency in 1D when there is not enough ice anymore.
  • Clean initialization of allocatable module arrays (especially needed when no slope)
  • One more renaming for consistency + few small updates thoughout the code.

JBC

  • Property svn:executable set to *
File size: 2.7 KB
RevLine 
[3349]1#!/bin/bash
[3869]2### Partition to use
[3349]3#SBATCH --account=cin0391
4#SBATCH --constraint=GENOA
[3851]5### Number of nodes/cores to use
[3349]6#SBATCH --nodes=1
[3351]7#SBATCH --ntasks-per-node=24
[3861]8#SBATCH --cpus-per-task=8
[3349]9#SBATCH --threads-per-core=1 # --hint=nomultithread
[3869]10### Job information
11#SBATCH --job-name=jobPCM0
[3403]12#SBATCH --output=jobPCM_%j.out
[3639]13#SBATCH --time=4:00:00
[3349]14
[3579]15########################################################################
16# Modify here the parameters depending on your setup
17####################################################
[3351]18# Path to the arch.env to source:
[3349]19source ../trunk/LMDZ.COMMON/arch.env
20
[3403]21# Number of threads to use (must be the same as "#SBATCH --cpus-per-task=" above)
22export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
[3349]23export OMP_STACKSIZE=400M
24
[3351]25# Name of executable for the PCM:
[4074]26pcm_exe="gcm_64x48x32_phymars_para.e"
[3495]27
28# Execution command:
[4074]29exec_cmd="srun --ntasks-per-node=${SLURM_NTASKS_PER_NODE} --cpu-bind=none --mem-bind=none --label -- ./adastra_cpu_binding.sh ./$pcm_exe"
[3351]30########################################################################
31
[3403]32
33ulimit -s unlimited
34
[3354]35# Running the PCM
[4072]36read n_yr_sim ntot_yr_sim r_plnt2earth_yr i_pcm_run i_pem_run n_pcm_runs n_pcm_runs_ini < pem_workflow.sts
37echo "Run \"PCM $i_pcm_run\" is starting."
[4074]38cp run_pcm.def run.def
39eval "$exec_cmd > run.log 2>&1"
[3861]40if [ ! -f "restartfi.nc" ] || ! (tail -n 100 run.log | grep -iq "everything is cool!"); then # Check if it ended abnormally
[4072]41    echo "Error: the run \"PCM $i_pcm_run\" crashed!"
42    if [ $exec_mode -ne 0 ]; then
43        echo "Be careful: there may be dependent jobs remaining in the queue! You can cancel them by executing the script \"kill_pem_workflow.sh\"."
[3556]44    fi
[3349]45    exit 1
46fi
[3354]47
[3349]48# Copy data files and prepare the next run
[4072]49mv run.log logs/runPCM${i_pcm_run}.log
[3386]50if [ -f "diagfi.nc" ]; then
[4072]51    mv diagfi.nc diags/diagfi${i_pcm_run}.nc
[3386]52fi
[3349]53if [ -f "diagsoil.nc" ]; then
[4072]54    mv diagsoil.nc diags/diagsoil${i_pcm_run}.nc
[3349]55fi
56if [ -f "stats.nc" ]; then
[4072]57    mv stats.nc diags/stats${i_pcm_run}.nc
[3349]58fi
59k=0
[3649]60if [ $(echo "$k > 0" | bc) -eq 1 ]; then # Only the last 2 years are taken for the PEM
[3977]61    cp Xoutdaily4pem.nc Xoutdaily4pem_Y${k}.nc
62    cp Xoutyearly4pem.nc Xoutyearly4pem_Y${k}.nc
[3349]63fi
[4072]64mv Xoutdaily4pem.nc diags/Xoutdaily4pem${i_pcm_run}.nc
65mv Xoutyearly4pem.nc diags/Xoutyearly4pem${i_pcm_run}.nc
66cp restartfi.nc starts/restartfi${i_pcm_run}.nc
[3349]67mv restartfi.nc startfi.nc
68if [ -f "restart.nc" ]; then
[4072]69    cp restart.nc starts/restart${i_pcm_run}.nc
[3349]70    mv restart.nc start.nc
71elif [ -f "restart1D.txt" ]; then
[4072]72    cp restart1D.txt starts/restart1D${i_pcm_run}.txt
[3349]73    mv restart1D.txt start1D.txt
74fi
[4072]75((i_pcm_run++))
76sed -i "1s/.*/$n_yr_sim $ntot_yr_sim $r_plnt2earth_yr $i_pcm_run $i_pem_run $n_pcm_runs $n_pcm_runs_ini/" pem_workflow.sts
Note: See TracBrowser for help on using the repository browser.