forked from yuexujiang/MUTarget
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.yaml
70 lines (60 loc) · 1.32 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
fix_seed: 0
checkpoints_every: 16
result_path: ./results_test
config_path: ./config.yaml
resume:
resume: False
resume_path: path/to/checkpoints.pth
restart_optimizer: True
encoder:
composition: esm_v2 # esm_v2, promprot, both
model_type: esm_v2 # esm_v2, t5
model_name: facebook/esm2_t33_650M_UR50D # facebook/esm2_t33_650M_UR50D, facebook/esm2_t30_150M_UR50D, facebook/esm2_t12_35M_UR50D, facebook/esm2_t6_8M_UR50D, Rostlab/prot_t5_base_mt_uniref50
max_len: 1024
num_classes: 8
prm4prmpro: ppi
frag_overlap: 200
PEFT: PFT #lora # FT, PFT, frozen, lora, PromT
train_settings:
num_epochs: 20
shuffle: True
device: cuda
batch_size: 16
grad_accumulation: 1
loss_pos_weight: 35
dataset: v2 # v2, v3
fine_tune_lr: -2 # -1, -2, -3
valid_settings:
do_every: 1
batch_size: 16
device: cuda
predict_settings:
batch_size: 16
device: cuda
cutoffs: [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
optimizer:
name: adam
lr: 1e-4 #-4
weight_decouple: True
weight_decay: 1e-3
eps: 1e-16
beta_1: 0.9
beta_2: 0.999
use_8bit_adam: False
grad_clip_norm: 1
decay:
warmup: 1024
min_lr: 1e-5
gamma: 0.2
num_restarts: 1
mode: cosine #skip, cosine
supcon:
apply: True
device: cuda
drop_out: 0.1
n_pos: 9
n_neg: 30
temperature: 0.1
hard_neg: True
weight: 1
warm_start: 0