| 1 | #!/bin/bash |
|---|
| 2 | ##SBATCH --partition=zen4 |
|---|
| 3 | #SBATCH --partition=zen16 |
|---|
| 4 | #SBATCH --ntasks=24 |
|---|
| 5 | ##SBATCH --ntasks=4 |
|---|
| 6 | ##SBATCH --ntasks=1 |
|---|
| 7 | #SBATCH --time=08:00:00 |
|---|
| 8 | ##SBATCH --mem=6G |
|---|
| 9 | #SBATCH --mem=64G |
|---|
| 10 | |
|---|
| 11 | # job information |
|---|
| 12 | cat << EOF |
|---|
| 13 | ------------------------------------------------------------------ |
|---|
| 14 | Job submit on $SLURM_SUBMIT_HOST by $SLURM_JOB_USER |
|---|
| 15 | JobID=$SLURM_JOBID Running_Node=$SLURM_NODELIST |
|---|
| 16 | Node=$SLURM_JOB_NUM_NODES Task=$SLURM_NTASKS |
|---|
| 17 | ------------------------------------------------------------------ |
|---|
| 18 | EOF |
|---|
| 19 | # Begin of section with executable commands |
|---|
| 20 | |
|---|
| 21 | |
|---|
| 22 | ######################################################## |
|---|
| 23 | ## standard run 121x121x61 nodes=1:ppn=4,mem=4gb,vmem=6gb |
|---|
| 24 | ######################################################## |
|---|
| 25 | ### large-domain run 321x321x61 (OK but slower) nodes=1:ppn=16,mem=16gb,vmem=24gb |
|---|
| 26 | ######################################################## |
|---|
| 27 | ### large-domain run 321x321x61 nodes=1:ppn=32,mem=32gb,vmem=48gb |
|---|
| 28 | ######################################################## |
|---|
| 29 | |
|---|
| 30 | |
|---|
| 31 | |
|---|
| 32 | step=1 |
|---|
| 33 | #step=4 |
|---|
| 34 | fold=$PWD |
|---|
| 35 | |
|---|
| 36 | #### PREAMBLE |
|---|
| 37 | ulimit -s unlimited |
|---|
| 38 | |
|---|
| 39 | #### STEP 1 |
|---|
| 40 | if [[ ${step} -le 1 ]] ; then |
|---|
| 41 | cd $fold/gcm |
|---|
| 42 | # |
|---|
| 43 | ./get_startday_from_namelist.sh | ./launch_gcm |
|---|
| 44 | # |
|---|
| 45 | if [[ $SLURM_NTASKS -gt 24 ]] ; then |
|---|
| 46 | echo "--- Total CPU count is above 24" |
|---|
| 47 | echo "--- For the standard GCM resolution, this is too much" |
|---|
| 48 | echo "--- So we run the GCM only with 24 processors" |
|---|
| 49 | $WHERE_MPI/mpirun -np 24 gcm.e > log_gcm |
|---|
| 50 | else |
|---|
| 51 | $WHERE_MPI/mpirun gcm.e > log_gcm |
|---|
| 52 | fi |
|---|
| 53 | # |
|---|
| 54 | cd $fold/prep |
|---|
| 55 | echo 1 | create_readmeteo.exe |
|---|
| 56 | readmeteo.exe < readmeteo.def |
|---|
| 57 | fi |
|---|
| 58 | |
|---|
| 59 | |
|---|
| 60 | ##### STEP 2 |
|---|
| 61 | if [[ ${step} -le 2 ]] ; then |
|---|
| 62 | cd $fold/geogrid |
|---|
| 63 | \rm geo_em.* |
|---|
| 64 | geogrid.exe |
|---|
| 65 | # |
|---|
| 66 | cd $fold/metgrid |
|---|
| 67 | \rm met_em.* |
|---|
| 68 | metgrid.exe |
|---|
| 69 | fi |
|---|
| 70 | |
|---|
| 71 | ##### STEP 3 |
|---|
| 72 | if [[ ${step} -le 3 ]] ; then |
|---|
| 73 | cd $fold/ |
|---|
| 74 | \rm wrfi* wrfb* |
|---|
| 75 | real.exe |
|---|
| 76 | fi |
|---|
| 77 | |
|---|
| 78 | ##### STEP 4 |
|---|
| 79 | if [[ ${step} -le 4 ]] ; then |
|---|
| 80 | rm -rf $fold/run_$SLURM_JOBID |
|---|
| 81 | mkdir $fold/run_$SLURM_JOBID |
|---|
| 82 | mv rsl.error.0000 $fold/run_$SLURM_JOBID/real_rsl.error.0000 |
|---|
| 83 | mv rsl.out.0000 $fold/run_$SLURM_JOBID/real_rsl.out.0000 |
|---|
| 84 | cp -rfL $fold/*.def $fold/run_$SLURM_JOBID/ |
|---|
| 85 | cp -rfL $fold/wrf.exe* $fold/run_$SLURM_JOBID/ |
|---|
| 86 | cp -rfL $fold/namelist.input $fold/run_$SLURM_JOBID/ |
|---|
| 87 | cp -rfL $fold/namelist.wps $fold/run_$SLURM_JOBID/ |
|---|
| 88 | cp -rfL $fold/wrfi* $fold/run_$SLURM_JOBID/ |
|---|
| 89 | cp -rfL $fold/wrfb* $fold/run_$SLURM_JOBID/ |
|---|
| 90 | cd $fold/run_$SLURM_JOBID/ |
|---|
| 91 | ln -sf $fold/data_physics ./ |
|---|
| 92 | $WHERE_MPI/mpirun wrf.exe |
|---|
| 93 | #gdb wrf.exe -ex=run |
|---|
| 94 | fi |
|---|