-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMain Dynamic
126 lines (96 loc) · 3.8 KB
/
Main Dynamic
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import torch
import torch.nn as nn
from torch_geometric.loader import DataLoader
import os.path as osp
import copy
from statistics import mean
from Train & test functions import train_static, test_static, train_dynamic, test_dynamic
from DynModel import DynGNN
# HYPERPARAMETERS #
num_epochs = 200
batch_size = 20
learning_rate = 0.001
weight_decay = 1e-4
drop_rate = 0.5
# DATA #
path = './data/UPFD'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
name = 'gossipcop' # 'politifact, 'gossipcop'
feature = 'profile' # 'profile', 'spacy', 'bert', 'content'
gnn_type = 'graphsage' # 'gcn', 'gat', 'graphsage'
snapshot_type = 'calendar' # 'sequential', 'temporal', 'calendar'
attention_layer = 'gru' # 'mean', 'lstm', 'gru'
snapshot_num = 5
in_features = 10 # profile
hidden_features = 128
train_dataset = torch.load(osp.join(path, 'raw', name, feature, 'train', snapshot_type)) # 5 snapshots
val_dataset = torch.load(osp.join(path, 'raw', name, feature, 'val', snapshot_type))
test_dataset = torch.load(osp.join(path, 'raw', name, feature, 'test', snapshot_type))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
accuracies = []
F1s = []
precisions = []
recalls = []
for step in range(1, 6):
print('-----------------')
print(' STEP ', step, ' ')
print('-----------------')
# MODEL INIT #
model = DynGNN(gnn_type, in_features, hidden_features, drop_rate, snapshot_num, attention_layer).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# TRAINING LOOP #
train_losses = train_accuracies = []
val_losses = val_accuracies = val_f1s = []
# EARLY STOPPING INIT VARIABLES #
best_loss = float('inf')
best_model_weights = None
patience = 15
for epoch in range(1, num_epochs + 1):
# TRAIN #
train_loss, train_acc = train_dynamic(model, train_loader, optimizer, criterion, device)
train_losses.append(train_loss)
train_accuracies.append(train_acc)
# VALIDATE #
val_loss, val_acc, val_f1, val_precision, val_recall = test_dynamic(model, val_loader, criterion, device)
val_losses.append(val_loss)
val_accuracies.append(val_acc)
val_f1s.append(val_f1)
# EARLY STOPPING #
if val_loss < best_loss:
best_loss = val_loss
best_model_weights = copy.deepcopy(model.state_dict()) # Deep copy here
patience = 15 # Reset patience counter
else:
patience -= 1
if patience == 0:
print('')
print('EARLY STOPPED @ EPOCH:', epoch)
break
if epoch % 5 == 0:
print(f'Epoch: {epoch:02d}, Loss: {train_loss:.4f}, Acc: {train_acc:.4f}, '
f'Val loss: {val_loss:.4f}, Val acc: {val_acc:.4f}, Val F1: {val_f1:.4f}')
# TEST #
test_loss, test_acc, test_f1, test_precision, test_recall = test_dynamic(model, test_loader, criterion, device)
print('----------------------')
print(' TEST RESULTS ')
print('----------------------')
print(f'Loss: {test_loss:.4f}, Acc: {test_acc:.4f}, F1: {test_f1:.4f}, Precision: {test_precision:.4f}, Recall: {test_recall:.4f}')
accuracies.append(test_acc)
F1s.append(test_f1)
precisions.append(test_precision)
recalls.append(test_recall)
print('=== RESULTS===')
print('Accuracies: ', accuracies)
print('==> mean = ', mean(accuracies))
print('')
print('F1s: ', F1s)
print('==> mean = ', mean(F1s))
print('')
print('Precisions: ', precisions)
print('==> mean = ', mean(precisions))
print('')
print('Recalls: ', recalls)
print('==> mean = ', mean(recalls))