source: trunk/MESOSCALE/LMD_MM_MARS/SIMU/MESORUN/launch

Last change on this file was 3988, checked in by aslmd, 2 weeks ago

MESORUN: spirit launch script: switched back to multiproc runs now that parallel runs are fixed (see updates in branch mesoscale_compile-run-spirit). also added a more comfortable 64G memory request. test case successfully works on the requested 24 procs.

File size: 2.4 KB
RevLine 
[2877]1#!/bin/bash
[3988]2##SBATCH --partition=zen4
3#SBATCH --partition=zen16
4#SBATCH --ntasks=24
[2901]5##SBATCH --ntasks=4
[3988]6##SBATCH --ntasks=1
[2901]7#SBATCH --time=08:00:00
8##SBATCH --mem=6G
[3988]9#SBATCH --mem=64G
[1436]10
[2877]11# job information
12cat << EOF
13------------------------------------------------------------------
14Job submit on $SLURM_SUBMIT_HOST by $SLURM_JOB_USER
15JobID=$SLURM_JOBID Running_Node=$SLURM_NODELIST
16Node=$SLURM_JOB_NUM_NODES Task=$SLURM_NTASKS
17------------------------------------------------------------------
18EOF
19# Begin of section with executable commands
20
21
[2499]22########################################################
[2877]23## standard run 121x121x61 nodes=1:ppn=4,mem=4gb,vmem=6gb
[2499]24########################################################
[2877]25### large-domain run 321x321x61 (OK but slower) nodes=1:ppn=16,mem=16gb,vmem=24gb
[2499]26########################################################
[2877]27### large-domain run 321x321x61 nodes=1:ppn=32,mem=32gb,vmem=48gb
[2499]28########################################################
[1434]29
[2877]30
31
32step=1
[2901]33#step=4
[2877]34fold=$PWD
35
[1434]36#### PREAMBLE
37ulimit -s unlimited
38
39#### STEP 1
[1436]40if [[ ${step} -le 1 ]] ; then
[2877]41  cd $fold/gcm
[1436]42  #
[2499]43  ./get_startday_from_namelist.sh | ./launch_gcm
[1436]44  #
[2877]45  if [[ $SLURM_NTASKS -gt 24 ]] ; then
[2499]46    echo "--- Total CPU count is above 24"
47    echo "--- For the standard GCM resolution, this is too much"
48    echo "--- So we run the GCM only with 24 processors"
49    $WHERE_MPI/mpirun -np 24 gcm.e > log_gcm
50  else
51    $WHERE_MPI/mpirun gcm.e > log_gcm
52  fi
[1436]53  #
[2877]54  cd $fold/prep
[1436]55  echo 1 | create_readmeteo.exe
56  readmeteo.exe < readmeteo.def
57fi
[1434]58
[2877]59
[1436]60##### STEP 2
61if [[ ${step} -le 2 ]] ; then
[2877]62  cd $fold/geogrid
[1439]63  \rm geo_em.*
[1436]64  geogrid.exe
65  #
[2877]66  cd $fold/metgrid
[1439]67  \rm met_em.*
[1436]68  metgrid.exe
69fi
[1434]70
[1436]71##### STEP 3
72if [[ ${step} -le 3 ]] ; then
[2877]73  cd $fold/
[1439]74  \rm wrfi* wrfb*
[1436]75  real.exe
76fi
[1434]77
[1436]78##### STEP 4
79if [[ ${step} -le 4 ]] ; then
[2877]80  rm -rf $fold/run_$SLURM_JOBID
81  mkdir $fold/run_$SLURM_JOBID
82  mv rsl.error.0000 $fold/run_$SLURM_JOBID/real_rsl.error.0000
83  mv rsl.out.0000 $fold/run_$SLURM_JOBID/real_rsl.out.0000
84  cp -rfL  $fold/*.def           $fold/run_$SLURM_JOBID/
85  cp -rfL  $fold/wrf.exe*        $fold/run_$SLURM_JOBID/
86  cp -rfL  $fold/namelist.input  $fold/run_$SLURM_JOBID/
87  cp -rfL  $fold/namelist.wps    $fold/run_$SLURM_JOBID/
88  cp -rfL  $fold/wrfi*           $fold/run_$SLURM_JOBID/
89  cp -rfL  $fold/wrfb*           $fold/run_$SLURM_JOBID/
90  cd $fold/run_$SLURM_JOBID/
91  ln -sf $fold/data_physics ./
[1552]92  $WHERE_MPI/mpirun wrf.exe
[2493]93  #gdb wrf.exe -ex=run
[1436]94fi
Note: See TracBrowser for help on using the repository browser.