#!/bin/bash
#SBATCH -t 00:1059:00
#SBATCH --nodes=2=1
#SBATCH --partition=gpu-a100:shared
#SBATCH --ntasks-per-node=401
#SBATCH -L ansys
#SBATCH -p medium--gres=gpu:1 # number of GPUs per node - ignored if exclusive partition with 4 GPUs
#SBATCH --mailgpu-type=ALLbind=single:1 # bind each process to its own GPU (single:<tasks_per_gpu>)
#SBATCH -L ansys
#SBATCH --output="cavity.slurm-log.%j"
#SBATCH --job-name=cavity_on_cpu
module load ansys/2019r2
module add gcc openmpi/gcc.11 ansys/2023r2_mlx_openmpiCUDAaware # via external OpenMPI
hostlist=$(srun hostname -s > hostfile | sort | uniq -c | awk '{printf $2":"$1","}')
echo "Running on nodes: ${SLURM_JOB_NODELIST}$hostlist"
fluentcat 2d<<EOF -g -t${SLURM_NTASKS} -ssh -mpi=intel -pib -cnf=hostfile << EOFluentInput >cavity.out.$SLURM_JOB_ID
> tui_input.jou
; this is an Ansys journal file aka text user interface (TUI) file
parallel/gpgpu/show
file/read-casecas initialnozzle_gpu_runsupported.cas.h5
parallel/partition/method/cartesian-axes 2
file/auto-save/append-file-name time-step 6
file/auto-save/case-frequency if-case-is-modified
file/auto-save/data-frequency 10
file/auto-save/retain-most-recent-files yes
solve/initialize/initialize-flow
solve/iterate 100
exit
yes
EOFluentInput
solve/initialize/hyb-initialization
solve/iterate 1000 yes
file/write-case-data outputfile1
file/export cgns outputfile2 full-domain yes yes
pressure temperature x-velocity y-velocity mach-number
quit
exit
EOF
fluent 3ddp -g -cnf=$hostlist -t${SLURM_NTASKS} -gpu -nm -i tui_input.jou \
-mpi=openmpi -pib -mpiopt="--report-bindings --rank-by core" >/dev/null 2>&1
echo '#################### Fluent finished ############'
|