Changeset 2877 for trunk/MESOSCALE


Ignore:
Timestamp:
Jan 24, 2023, 1:37:51 PM (22 months ago)
Author:
aslmd
Message:

a first adaptation of launch script to Spirit

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/MESOSCALE/LMD_MM_MARS/SIMU/MESORUN/launch

    r2499 r2877  
    1 #! /bin/bash
     1#!/bin/bash
     2#SBATCH --partition=zen4
     3#SBATCH --ntasks=24
     4#SBATCH --time=02:00:00
     5
     6# job information
     7cat << EOF
     8------------------------------------------------------------------
     9Job submit on $SLURM_SUBMIT_HOST by $SLURM_JOB_USER
     10JobID=$SLURM_JOBID Running_Node=$SLURM_NODELIST
     11Node=$SLURM_JOB_NUM_NODES Task=$SLURM_NTASKS
     12------------------------------------------------------------------
     13EOF
     14# Begin of section with executable commands
     15
    216
    317########################################################
    4 #PBS -S  /bin/bash
    5 #PBS -j  oe
     18## standard run 121x121x61 nodes=1:ppn=4,mem=4gb,vmem=6gb
    619########################################################
    7 ### available queues:
    8 ### short std h12 day days3 week weeks2 infini
    9 ### 2h    6h  12h 24h 72h   168h 340h   840h
     20### large-domain run 321x321x61 (OK but slower) nodes=1:ppn=16,mem=16gb,vmem=24gb
    1021########################################################
    11 #PBS -q  week
    12 #PBS -N  run
     22### large-domain run 321x321x61 nodes=1:ppn=32,mem=32gb,vmem=48gb
    1323########################################################
    14 ### single processor (e.g. testing)
    15 ##PBS -l  nodes=1:ppn=1
    16 ########################################################
    17 ## standard run 121x121x61
    18 #PBS -l  nodes=1:ppn=4,mem=4gb,vmem=6gb
    19 ########################################################
    20 ### large-domain run 321x321x61 (OK but slower)
    21 ##PBS -l  nodes=1:ppn=16,mem=16gb,vmem=24gb
    22 ########################################################
    23 ### large-domain run 321x321x61
    24 ##PBS -l  nodes=1:ppn=32,mem=32gb,vmem=48gb
    25 ########################################################
    26 ### need for large memory
    27 ##PBS -l  mem=120gb
    28 ##PBS -l  vmem=120gb
    29 ########################################################
    30 #PBS -v  step=1
    31 ## NB: "qsub -v step=XX launch" overrides value above
    32 ########################################################
     24
     25
     26
     27step=1
     28fold=$PWD
    3329
    3430#### PREAMBLE
    3531ulimit -s unlimited
    3632
    37 # This finds out the number of nodes we have
    38 NP=$(wc -l $PBS_NODEFILE | awk '{print $1}')
    39 echo "Total CPU count = $NP"
    40 
    4133#### STEP 1
    4234if [[ ${step} -le 1 ]] ; then
    43   cd $PBS_O_WORKDIR/gcm
     35  cd $fold/gcm
    4436  #
    4537  ./get_startday_from_namelist.sh | ./launch_gcm
    4638  #
    47   if [[ $NP -gt 24 ]] ; then
     39  if [[ $SLURM_NTASKS -gt 24 ]] ; then
    4840    echo "--- Total CPU count is above 24"
    4941    echo "--- For the standard GCM resolution, this is too much"
     
    5446  fi
    5547  #
    56   cd $PBS_O_WORKDIR/prep
     48  cd $fold/prep
    5749  echo 1 | create_readmeteo.exe
    5850  readmeteo.exe < readmeteo.def
    5951fi
    6052
     53stop
     54
    6155##### STEP 2
    6256if [[ ${step} -le 2 ]] ; then
    63   cd $PBS_O_WORKDIR/geogrid
     57  cd $fold/geogrid
    6458  \rm geo_em.*
    6559  geogrid.exe
    6660  #
    67   cd $PBS_O_WORKDIR/metgrid
     61  cd $fold/metgrid
    6862  \rm met_em.*
    6963  metgrid.exe
     
    7266##### STEP 3
    7367if [[ ${step} -le 3 ]] ; then
    74   cd $PBS_O_WORKDIR/
     68  cd $fold/
    7569  \rm wrfi* wrfb*
    7670  real.exe
     
    7973##### STEP 4
    8074if [[ ${step} -le 4 ]] ; then
    81   rm -rf $PBS_O_WORKDIR/$PBS_JOBNAME
    82   mkdir $PBS_O_WORKDIR/$PBS_JOBNAME
    83   mv rsl.error.0000 $PBS_O_WORKDIR/$PBS_JOBNAME/real_rsl.error.0000
    84   mv rsl.out.0000 $PBS_O_WORKDIR/$PBS_JOBNAME/real_rsl.out.0000
    85   cp -rfL  $PBS_O_WORKDIR/*.def           $PBS_O_WORKDIR/$PBS_JOBNAME/
    86   cp -rfL  $PBS_O_WORKDIR/wrf.exe*        $PBS_O_WORKDIR/$PBS_JOBNAME/
    87   cp -rfL  $PBS_O_WORKDIR/namelist.input  $PBS_O_WORKDIR/$PBS_JOBNAME/
    88   cp -rfL  $PBS_O_WORKDIR/namelist.wps    $PBS_O_WORKDIR/$PBS_JOBNAME/
    89   cp -rfL  $PBS_O_WORKDIR/wrfi*           $PBS_O_WORKDIR/$PBS_JOBNAME/
    90   cp -rfL  $PBS_O_WORKDIR/wrfb*           $PBS_O_WORKDIR/$PBS_JOBNAME/
    91   cd $PBS_O_WORKDIR/$PBS_JOBNAME/
    92   ln -sf $PBS_O_WORKDIR/data_physics ./
     75  rm -rf $fold/run_$SLURM_JOBID
     76  mkdir $fold/run_$SLURM_JOBID
     77  mv rsl.error.0000 $fold/run_$SLURM_JOBID/real_rsl.error.0000
     78  mv rsl.out.0000 $fold/run_$SLURM_JOBID/real_rsl.out.0000
     79  cp -rfL  $fold/*.def           $fold/run_$SLURM_JOBID/
     80  cp -rfL  $fold/wrf.exe*        $fold/run_$SLURM_JOBID/
     81  cp -rfL  $fold/namelist.input  $fold/run_$SLURM_JOBID/
     82  cp -rfL  $fold/namelist.wps    $fold/run_$SLURM_JOBID/
     83  cp -rfL  $fold/wrfi*           $fold/run_$SLURM_JOBID/
     84  cp -rfL  $fold/wrfb*           $fold/run_$SLURM_JOBID/
     85  cd $fold/run_$SLURM_JOBID/
     86  ln -sf $fold/data_physics ./
    9387  $WHERE_MPI/mpirun wrf.exe
    9488  #gdb wrf.exe -ex=run
Note: See TracChangeset for help on using the changeset viewer.