forked from PufferAI/PufferLib
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.yaml
77 lines (73 loc) · 1.75 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# goes in PufferLib top directory
train:
seed: 1
torch_deterministic: True
device: cuda
learning_rate: 0.0002 # 0.0002 # 0.000175 # 0.00015
anneal_lr: False
gamma: 0.998
gae_lambda: 0.95
num_minibatches: 1 # 4 # 16 # 32 didn't work well? kind of slow? # 16 worked well # 4 default
norm_adv: True
clip_coef: 0.1
clip_vloss: True
ent_coef: 0.01
vf_coef: 0.5
max_grad_norm: 0.5
target_kl: ~
env_pool: True
verbose: True
data_dir: experiments
checkpoint_interval: 200
cpu_offload: True
pool_kernel: [0]
bptt_horizon: 16
vf_clip_coef: 0.1
compile: True
compile_mode: reduce-overhead
sweep:
method: random
name: sweep
metric:
goal: maximize
name: episodic_return
# Nested parameters name required by WandB API
parameters:
train:
parameters:
learning_rate: {
'distribution': 'log_uniform_values',
'min': 1e-4,
'max': 1e-1,
}
batch_size: {
'values': [128, 256, 512, 1024, 2048],
}
batch_rows: {
'values': [16, 32, 64, 128, 256],
}
bptt_horizon: {
'values': [4, 8, 16, 32],
}
# fastest badge 1 ever: (lr: 2.0e-4, anneal=True, num_envs=150, envs_per_worker=2, envs_per_batch=60, update_epochs=3, batch_size=32768, batch_rows=64)
pokemon_red:
package: pokemon_red
train:
total_timesteps: 1_000_000_000
num_envs: 72 # 96 # 144 # 72
envs_per_worker: 1 # 1
envs_per_batch: 18 # 48 is crap w/ 96 env # 24
update_epochs: 3 # 3
gamma: 0.998
batch_size: 49152 # 65536 # 49152
batch_rows: 128 # 128
env:
name: pokemon_red
pokemon-red:
package: pokemon_red
pokemonred:
package: pokemon_red
pokemon:
package: pokemon_red
pokegym:
package: pokemon_red