...
Code Block | ||||||||||
---|---|---|---|---|---|---|---|---|---|---|
| ||||||||||
#!/bin/bash #SBATCH --partition=preempt #SBATCH --job-name=test-verbose-srun #SBATCH --output=%x.out #SBATCH --time=4:00:00 #SBATCH --account=hpcrcf ###SBATCH --nodes=2 ###SBATCH --ntasks-per-node=28 #SBATCH --ntasks=16 #SBATCH --cpus-per-task=8 #SBATCH --mem-per-cpu=1G module purge module load slurm module load intel module load intel-mpi module load mkl # print some diagnostic info echo env for main task env | sort | egrep -e '^(SLURM|OMP|I_MPI)_' echo camask=$(fgrep -ie cpus_allowed: /proc/self/status | awk '{ print $2 }' | tr -d ,) ca=$(python -c 'print(bin(0x'${camask}').count("1"))') echo Cpus_allowed count for main task before mpi startup is $ca echo # end diagnostic info # could choose another mpi lib config, but release_mt is the default # source mpivars.sh {debug,release}{,_mt} # https://software.intel.com/en-us/mpi-developer-reference-linux-interoperability-with-openmp # export I_MPI_PIN_DOMAIN=omp # possibly redundant? export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK # https://software.intel.com/en-us/mpi-developer-reference-linux-other-environment-variables # export I_MPI_DEBUG=5,host,level \time srun ./test-verbose ${1+"$@"} |
...