Script_intelmpi-hdf5.sh

#!/bin/bash
#SBATCH -N 300
##SBATCH --ntasks-per-node=48
#SBATCH --exclusive
#SBATCH --exclude=cn001
#SBATCH --time=00:30:00
#SBATCH --job-name=Grand_Potential_MPI
#SBATCH --error=job.%J.err
#SBATCH --output=job.%J.out
#SBATCH --partition=<queue name>

module load spack/0.17
. /home-ext/apps/spack/share/spack/setup-env.sh
spack load intel-mpi@2019.10.317 /6icwzn3
spack load hdf5 /ssgjscn

#export MCA param routed=direct
# // Below are Intel MPI specific settings //
export I_MPI_FALLBACK=disable

export I_MPI_FABRICS=shm:dapl 
export I_MPI_DEBUG=9

cd $SLURM_SUBMIT_DIR

nprocs=14400
xworkers=120
yworkers=120

ulimit -aH

ulimit -c unlimited
ulimit -s unlimited

mpiexec.hydra -n $nprocs <executable> <arguments> > output.log