forked from NERSC/slurm-ray-cluster
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsubmit-ray-cluster.sbatch_shifter
executable file
·52 lines (41 loc) · 1.67 KB
/
submit-ray-cluster.sbatch_shifter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/bin/bash
#SBATCH -C gpu
#SBATCH --time=01:00:00
### This script works for any number of nodes, Ray will find and manage all resources
#SBATCH --nodes=2
### Give all resources to a single Ray task, ray can manage the resources internally
#SBATCH --ntasks-per-node=1
#SBATCH --gpus-per-task=8
#SBATCH --cpus-per-task=80
#SBATCH --image=nersc/pytorch:1.5.0_v3
#SBATCH --volume="/dev/infiniband:/sys/class/infiniband_verbs"
# Load modules or your own conda environment here
# module load pytorch/v1.4.0-gpu
################# DON NOT CHANGE THINGS HERE UNLESS YOU KNOW WHAT YOU ARE DOING ###############
# This script is a modification to the implementation suggest by gregSchwartz18 here:
# https://github.com/ray-project/ray/issues/826#issuecomment-522116599
redis_password=$(uuidgen)
export redis_password
nodes=$(scontrol show hostnames $SLURM_JOB_NODELIST) # Getting the node names
nodes_array=( $nodes )
node_1=${nodes_array[0]}
ip=$(srun --nodes=1 --ntasks=1 -w $node_1 hostname --ip-address) # making redis-address
port=6379
ip_head=$ip:$port
export ip_head
echo "IP Head: $ip_head"
echo "STARTING HEAD at $node_1"
srun --nodes=1 --ntasks=1 -w $node_1 shifter bash start-head.sh $ip $redis_password &
sleep 30
worker_num=$(($SLURM_JOB_NUM_NODES - 1)) #number of nodes other than the head node
for (( i=1; i<=$worker_num; i++ ))
do
node_i=${nodes_array[$i]}
echo "STARTING WORKER $i at $node_i"
srun --nodes=1 --ntasks=1 -w $node_i shifter bash start-worker.sh $ip_head $redis_password &
sleep 5
done
##############################################################################################
#### call your code below
shifter python examples/mnist_pytorch_trainable.py
exit