#!/bin/sh
#SBATCH --job-name=serial_job_test # Job name
#SBATCH --ntasks=1 # number of tasks
#SBATCH --cpus-per-task=5 # number of cpus, can vary from 1-10
#SBATCH --time=48:00:00 # Time limit hrs:min:sec
#SBATCH --output=serial_test_job.out # Standard output
#SBATCH --error=serial_test_job.err # error log
#SBATCH --gres=gpu:1 # GPUs needed, should be same as selected queue GPUs
#SBATCH --partition=q_2day-1G # Specific to queue being used, need to select from queues available
#SBATCH --mem=20GB # Memory for computation process can go up to 50GB
pwd; hostname; date |tee result
docker run -t --gpus '"device='$CUDA_VISIBLE_DEVICES'"' --cpus=$SLURM_CPUS_PER_TASK --name $SLURM_JOB_ID --shm-size=20G --user $(id -u $USER):$(id -g $USER) --rm -v /localscratch/<uid>:/workspace/localscratch/<uid> <preferred_docker_image name>:<tag> bash -c 'cd /workspace/localscratch/<uid>/<path to desired folder>/ && python <script to be run.py>' | tee -a log_out.txt
##example for above looks like ( do not include these 2 highlighted lines in your script):
##docker run -t --gpus '"device='$CUDA_VISIBLE_DEVICES'"' --cpus=$SLURM_CPUS_PER_TASK --name $SLURM_JOB_ID --shm-size=20G --user $(id -u $USER):$(id -g $USER) --rm -v /localscratch/secdsan:/workspace/localscratch/secdsan secdsan_cuda:latest bash -c 'cd /workspace/localscratch/secdsan/gputestfolder/ && python gputest.py' | tee -a log_out.txt
Job Submission Instructions:
- All jobs should be sumbitted via slurm.
- If jobs are run without slurm, your actions will be notified to your professor and account will be blocked.
- Get the sbatch script as above in a file, for job submission use the below command.
sbatch <SCRIPT NAME>
example: sbatch test_script.sh
