source: trunk/MESOSCALE/LMD_MM_MARS/SIMU/MESORUN/launch @ 2613

Last change on this file since 2613 was 2499, checked in by aslmd, 4 years ago

MESOSCALE workflow. better header for submission to ciclad. added solution to run everything-in-a-row (GCM to MESOSCALE) even when using number of processors above limit for 64x48 standard GCM runs (that is: 24 processors). call to script get_startday_from_namelist.sh instead of lines of bash code.

File size: 2.8 KB
Line 
1#! /bin/bash
2
3########################################################
4#PBS -S  /bin/bash
5#PBS -j  oe
6########################################################
7### available queues:
8### short std h12 day days3 week weeks2 infini
9### 2h    6h  12h 24h 72h   168h 340h   840h
10########################################################
11#PBS -q  week
12#PBS -N  run
13########################################################
14### single processor (e.g. testing)
15##PBS -l  nodes=1:ppn=1
16########################################################
17## standard run 121x121x61
18#PBS -l  nodes=1:ppn=4,mem=4gb,vmem=6gb
19########################################################
20### large-domain run 321x321x61 (OK but slower)
21##PBS -l  nodes=1:ppn=16,mem=16gb,vmem=24gb
22########################################################
23### large-domain run 321x321x61
24##PBS -l  nodes=1:ppn=32,mem=32gb,vmem=48gb
25########################################################
26### need for large memory
27##PBS -l  mem=120gb
28##PBS -l  vmem=120gb
29########################################################
30#PBS -v  step=1
31## NB: "qsub -v step=XX launch" overrides value above
32########################################################
33
34#### PREAMBLE
35ulimit -s unlimited
36
37# This finds out the number of nodes we have
38NP=$(wc -l $PBS_NODEFILE | awk '{print $1}')
39echo "Total CPU count = $NP"
40
41#### STEP 1
42if [[ ${step} -le 1 ]] ; then
43  cd $PBS_O_WORKDIR/gcm
44  #
45  ./get_startday_from_namelist.sh | ./launch_gcm
46  #
47  if [[ $NP -gt 24 ]] ; then
48    echo "--- Total CPU count is above 24"
49    echo "--- For the standard GCM resolution, this is too much"
50    echo "--- So we run the GCM only with 24 processors"
51    $WHERE_MPI/mpirun -np 24 gcm.e > log_gcm
52  else
53    $WHERE_MPI/mpirun gcm.e > log_gcm
54  fi
55  #
56  cd $PBS_O_WORKDIR/prep
57  echo 1 | create_readmeteo.exe
58  readmeteo.exe < readmeteo.def
59fi
60
61##### STEP 2
62if [[ ${step} -le 2 ]] ; then
63  cd $PBS_O_WORKDIR/geogrid
64  \rm geo_em.*
65  geogrid.exe
66  #
67  cd $PBS_O_WORKDIR/metgrid
68  \rm met_em.*
69  metgrid.exe
70fi
71
72##### STEP 3
73if [[ ${step} -le 3 ]] ; then
74  cd $PBS_O_WORKDIR/
75  \rm wrfi* wrfb*
76  real.exe
77fi
78
79##### STEP 4
80if [[ ${step} -le 4 ]] ; then
81  rm -rf $PBS_O_WORKDIR/$PBS_JOBNAME
82  mkdir $PBS_O_WORKDIR/$PBS_JOBNAME
83  mv rsl.error.0000 $PBS_O_WORKDIR/$PBS_JOBNAME/real_rsl.error.0000
84  mv rsl.out.0000 $PBS_O_WORKDIR/$PBS_JOBNAME/real_rsl.out.0000
85  cp -rfL  $PBS_O_WORKDIR/*.def           $PBS_O_WORKDIR/$PBS_JOBNAME/
86  cp -rfL  $PBS_O_WORKDIR/wrf.exe*        $PBS_O_WORKDIR/$PBS_JOBNAME/
87  cp -rfL  $PBS_O_WORKDIR/namelist.input  $PBS_O_WORKDIR/$PBS_JOBNAME/
88  cp -rfL  $PBS_O_WORKDIR/namelist.wps    $PBS_O_WORKDIR/$PBS_JOBNAME/
89  cp -rfL  $PBS_O_WORKDIR/wrfi*           $PBS_O_WORKDIR/$PBS_JOBNAME/
90  cp -rfL  $PBS_O_WORKDIR/wrfb*           $PBS_O_WORKDIR/$PBS_JOBNAME/
91  cd $PBS_O_WORKDIR/$PBS_JOBNAME/
92  ln -sf $PBS_O_WORKDIR/data_physics ./
93  $WHERE_MPI/mpirun wrf.exe
94  #gdb wrf.exe -ex=run
95fi
Note: See TracBrowser for help on using the repository browser.