script_openmpi_compilerd_with_slurm

#!/bin/sh
#SBATCH --job-name=mo13x-cpu-256                         # Job name
#SBATCH --ntasks-per-node=48
#SBATCH --nodes=256
#SBATCH --time=8:00:00                                   # Time limit hrs:min:sec
#SBATCH -o cpu_srun_new.out
#SBATCH --partition=large
echo "Number of Nodes Allocated = $SLURM_JOB_NUM_NODES"
echo "Number of Tasks Allocated = $SLURM_NTASKS"
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
spack unload -a
module purge
cd $SLURM_SUBMIT_DIR
module load spack
. /home-ext/apps/spack/share/spack/setup-env.sh
spack load gcc@11.2.0
spack load /cvwbkcp
spack load openmpi+legacylaunchers fabrics=auto schedulers=auto
spack load intel-mkl@2020.4.304%intel@2021.4.0
export LIBRARY_PATH="/home-ext/apps/spack/opt/spack/linux-centos7-cascadelake/gcc-11.2.0/zlib-1.2.11-2r3jtjxvltasn7jh64vhgfg6ikrckiy7/lib:$LIBRARY_PATH"
export CPATH="/home-ext/apps/spack/opt/spack/linux-centos7-cascadelake/gcc-11.2.0/zlib-1.2.11-2r3jtjxvltasn7jh64vhgfg6ikrckiy7/include:$CPATH"
mpirun -n $SLURM_NTASKS -mca btl ^openib /home/staff/cds/cdsphani/dftfesoftware2022_pravega/dftfe/release/real/dftfe parameterFileCPU.prm > ompioutput