source: trunk/MESOSCALE/LMD_MM_MARS/SIMU/MESORUN/launch @ 3610

Last change on this file since 3610 was 2901, checked in by aslmd, 2 years ago

MESORUN: 1) corrected link towards 5.3 starts 2) used scenario 8 clim as a test baseline 3) launch script default to 1 pricessor because on spirit mesoscale model crashes with parallel computations

File size: 2.4 KB
RevLine 
[2877]1#!/bin/bash
2#SBATCH --partition=zen4
[2901]3##SBATCH --partition=zen16
4##SBATCH --ntasks=24
5##SBATCH --ntasks=4
6#SBATCH --ntasks=1
7#SBATCH --time=08:00:00
8##SBATCH --mem=6G
[1436]9
[2877]10# job information
11cat << EOF
12------------------------------------------------------------------
13Job submit on $SLURM_SUBMIT_HOST by $SLURM_JOB_USER
14JobID=$SLURM_JOBID Running_Node=$SLURM_NODELIST
15Node=$SLURM_JOB_NUM_NODES Task=$SLURM_NTASKS
16------------------------------------------------------------------
17EOF
18# Begin of section with executable commands
19
20
[2499]21########################################################
[2877]22## standard run 121x121x61 nodes=1:ppn=4,mem=4gb,vmem=6gb
[2499]23########################################################
[2877]24### large-domain run 321x321x61 (OK but slower) nodes=1:ppn=16,mem=16gb,vmem=24gb
[2499]25########################################################
[2877]26### large-domain run 321x321x61 nodes=1:ppn=32,mem=32gb,vmem=48gb
[2499]27########################################################
[1434]28
[2877]29
30
31step=1
[2901]32#step=4
[2877]33fold=$PWD
34
[1434]35#### PREAMBLE
36ulimit -s unlimited
37
38#### STEP 1
[1436]39if [[ ${step} -le 1 ]] ; then
[2877]40  cd $fold/gcm
[1436]41  #
[2499]42  ./get_startday_from_namelist.sh | ./launch_gcm
[1436]43  #
[2877]44  if [[ $SLURM_NTASKS -gt 24 ]] ; then
[2499]45    echo "--- Total CPU count is above 24"
46    echo "--- For the standard GCM resolution, this is too much"
47    echo "--- So we run the GCM only with 24 processors"
48    $WHERE_MPI/mpirun -np 24 gcm.e > log_gcm
49  else
50    $WHERE_MPI/mpirun gcm.e > log_gcm
51  fi
[1436]52  #
[2877]53  cd $fold/prep
[1436]54  echo 1 | create_readmeteo.exe
55  readmeteo.exe < readmeteo.def
56fi
[1434]57
[2877]58
[1436]59##### STEP 2
60if [[ ${step} -le 2 ]] ; then
[2877]61  cd $fold/geogrid
[1439]62  \rm geo_em.*
[1436]63  geogrid.exe
64  #
[2877]65  cd $fold/metgrid
[1439]66  \rm met_em.*
[1436]67  metgrid.exe
68fi
[1434]69
[1436]70##### STEP 3
71if [[ ${step} -le 3 ]] ; then
[2877]72  cd $fold/
[1439]73  \rm wrfi* wrfb*
[1436]74  real.exe
75fi
[1434]76
[1436]77##### STEP 4
78if [[ ${step} -le 4 ]] ; then
[2877]79  rm -rf $fold/run_$SLURM_JOBID
80  mkdir $fold/run_$SLURM_JOBID
81  mv rsl.error.0000 $fold/run_$SLURM_JOBID/real_rsl.error.0000
82  mv rsl.out.0000 $fold/run_$SLURM_JOBID/real_rsl.out.0000
83  cp -rfL  $fold/*.def           $fold/run_$SLURM_JOBID/
84  cp -rfL  $fold/wrf.exe*        $fold/run_$SLURM_JOBID/
85  cp -rfL  $fold/namelist.input  $fold/run_$SLURM_JOBID/
86  cp -rfL  $fold/namelist.wps    $fold/run_$SLURM_JOBID/
87  cp -rfL  $fold/wrfi*           $fold/run_$SLURM_JOBID/
88  cp -rfL  $fold/wrfb*           $fold/run_$SLURM_JOBID/
89  cd $fold/run_$SLURM_JOBID/
90  ln -sf $fold/data_physics ./
[1552]91  $WHERE_MPI/mpirun wrf.exe
[2493]92  #gdb wrf.exe -ex=run
[1436]93fi
Note: See TracBrowser for help on using the repository browser.