source: trunk/MESOSCALE/LMD_MM_MARS/SIMU/MESORUN/launch @ 2613

Last change on this file since 2613 was 2499, checked in by aslmd, 4 years ago

MESOSCALE workflow. better header for submission to ciclad. added solution to run everything-in-a-row (GCM to MESOSCALE) even when using number of processors above limit for 64x48 standard GCM runs (that is: 24 processors). call to script get_startday_from_namelist.sh instead of lines of bash code.

File size: 2.8 KB
RevLine 
[1436]1#! /bin/bash
2
[2499]3########################################################
[1434]4#PBS -S  /bin/bash
5#PBS -j  oe
[2499]6########################################################
7### available queues:
8### short std h12 day days3 week weeks2 infini
9### 2h    6h  12h 24h 72h   168h 340h   840h
10########################################################
11#PBS -q  week
[1434]12#PBS -N  run
[2499]13########################################################
14### single processor (e.g. testing)
[2493]15##PBS -l  nodes=1:ppn=1
[2499]16########################################################
17## standard run 121x121x61
18#PBS -l  nodes=1:ppn=4,mem=4gb,vmem=6gb
19########################################################
20### large-domain run 321x321x61 (OK but slower)
21##PBS -l  nodes=1:ppn=16,mem=16gb,vmem=24gb
22########################################################
23### large-domain run 321x321x61
24##PBS -l  nodes=1:ppn=32,mem=32gb,vmem=48gb
25########################################################
26### need for large memory
27##PBS -l  mem=120gb
28##PBS -l  vmem=120gb
29########################################################
[1436]30#PBS -v  step=1
[2499]31## NB: "qsub -v step=XX launch" overrides value above
32########################################################
[1434]33
34#### PREAMBLE
35ulimit -s unlimited
36
[2499]37# This finds out the number of nodes we have
38NP=$(wc -l $PBS_NODEFILE | awk '{print $1}')
39echo "Total CPU count = $NP"
40
[1434]41#### STEP 1
[1436]42if [[ ${step} -le 1 ]] ; then
43  cd $PBS_O_WORKDIR/gcm
44  #
[2499]45  ./get_startday_from_namelist.sh | ./launch_gcm
[1436]46  #
[2499]47  if [[ $NP -gt 24 ]] ; then
48    echo "--- Total CPU count is above 24"
49    echo "--- For the standard GCM resolution, this is too much"
50    echo "--- So we run the GCM only with 24 processors"
51    $WHERE_MPI/mpirun -np 24 gcm.e > log_gcm
52  else
53    $WHERE_MPI/mpirun gcm.e > log_gcm
54  fi
[1436]55  #
56  cd $PBS_O_WORKDIR/prep
57  echo 1 | create_readmeteo.exe
58  readmeteo.exe < readmeteo.def
59fi
[1434]60
[1436]61##### STEP 2
62if [[ ${step} -le 2 ]] ; then
63  cd $PBS_O_WORKDIR/geogrid
[1439]64  \rm geo_em.*
[1436]65  geogrid.exe
66  #
67  cd $PBS_O_WORKDIR/metgrid
[1439]68  \rm met_em.*
[1436]69  metgrid.exe
70fi
[1434]71
[1436]72##### STEP 3
73if [[ ${step} -le 3 ]] ; then
74  cd $PBS_O_WORKDIR/
[1439]75  \rm wrfi* wrfb*
[1436]76  real.exe
77fi
[1434]78
[1436]79##### STEP 4
80if [[ ${step} -le 4 ]] ; then
81  rm -rf $PBS_O_WORKDIR/$PBS_JOBNAME
82  mkdir $PBS_O_WORKDIR/$PBS_JOBNAME
83  mv rsl.error.0000 $PBS_O_WORKDIR/$PBS_JOBNAME/real_rsl.error.0000
84  mv rsl.out.0000 $PBS_O_WORKDIR/$PBS_JOBNAME/real_rsl.out.0000
[1473]85  cp -rfL  $PBS_O_WORKDIR/*.def           $PBS_O_WORKDIR/$PBS_JOBNAME/
[1448]86  cp -rfL  $PBS_O_WORKDIR/wrf.exe*        $PBS_O_WORKDIR/$PBS_JOBNAME/
[1436]87  cp -rfL  $PBS_O_WORKDIR/namelist.input  $PBS_O_WORKDIR/$PBS_JOBNAME/
88  cp -rfL  $PBS_O_WORKDIR/namelist.wps    $PBS_O_WORKDIR/$PBS_JOBNAME/
89  cp -rfL  $PBS_O_WORKDIR/wrfi*           $PBS_O_WORKDIR/$PBS_JOBNAME/
90  cp -rfL  $PBS_O_WORKDIR/wrfb*           $PBS_O_WORKDIR/$PBS_JOBNAME/
91  cd $PBS_O_WORKDIR/$PBS_JOBNAME/
92  ln -sf $PBS_O_WORKDIR/data_physics ./
[1552]93  $WHERE_MPI/mpirun wrf.exe
[2493]94  #gdb wrf.exe -ex=run
[1436]95fi
Note: See TracBrowser for help on using the repository browser.