-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathlinear.py
126 lines (106 loc) · 6.12 KB
/
linear.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import argparse
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from thop import profile, clever_format
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from tqdm import tqdm
import utils
import torchvision
class Net(nn.Module):
def __init__(self, num_class, pretrained_path, dataset):
super(Net, self).__init__()
# encoder
from model import Model
self.f = Model(dataset=dataset).f
# classifier
self.fc = nn.Linear(2048, num_class, bias=True)
self.load_state_dict(torch.load(pretrained_path, map_location='cpu'), strict=False)
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.fc(feature)
return out
# train or test for one epoch
def train_val(net, data_loader, train_optimizer):
is_train = train_optimizer is not None
net.train() if is_train else net.eval()
total_loss, total_correct_1, total_correct_5, total_num, data_bar = 0.0, 0.0, 0.0, 0, tqdm(data_loader)
with (torch.enable_grad() if is_train else torch.no_grad()):
for data, target in data_bar:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
out = net(data)
loss = loss_criterion(out, target)
if is_train:
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += data.size(0)
total_loss += loss.item() * data.size(0)
prediction = torch.argsort(out, dim=-1, descending=True)
total_correct_1 += torch.sum((prediction[:, 0:1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
total_correct_5 += torch.sum((prediction[:, 0:5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
data_bar.set_description('{} Epoch: [{}/{}] Loss: {:.4f} ACC@1: {:.2f}% ACC@5: {:.2f}% model: {}'
.format('Train' if is_train else 'Test', epoch, epochs, total_loss / total_num,
total_correct_1 / total_num * 100, total_correct_5 / total_num * 100,
model_path.split('/')[-1]))
return total_loss / total_num, total_correct_1 / total_num * 100, total_correct_5 / total_num * 100
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Linear Evaluation')
parser.add_argument('--dataset', default='cifar10', type=str, help='Dataset: cifar10 or tiny_imagenet or stl10')
parser.add_argument('--model_path', type=str, default='results/Barlow_Twins/0.005_64_128_model.pth',
help='The base string of the pretrained model path')
parser.add_argument('--batch_size', type=int, default=512, help='Number of images in each mini-batch')
parser.add_argument('--epochs', type=int, default=200, help='Number of sweeps over the dataset to train')
args = parser.parse_args()
model_path, batch_size, epochs = args.model_path, args.batch_size, args.epochs
dataset = args.dataset
if dataset == 'cifar10':
train_data = CIFAR10(root='data', train=True,\
transform=utils.CifarPairTransform(train_transform = True, pair_transform=False), download=True)
test_data = CIFAR10(root='data', train=False,\
transform=utils.CifarPairTransform(train_transform = False, pair_transform=False), download=True)
elif dataset == 'stl10':
train_data = torchvision.datasets.STL10(root='data', split="train", \
transform=utils.StlPairTransform(train_transform = True, pair_transform=False), download=True)
test_data = torchvision.datasets.STL10(root='data', split="test", \
transform=utils.StlPairTransform(train_transform = False, pair_transform=False), download=True)
elif dataset == 'tiny_imagenet':
train_data = torchvision.datasets.ImageFolder('data/tiny-imagenet-200/train', \
utils.TinyImageNetPairTransform(train_transform=True, pair_transform=False))
test_data = torchvision.datasets.ImageFolder('data/tiny-imagenet-200/val', \
utils.TinyImageNetPairTransform(train_transform = False, pair_transform=False))
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
model = Net(num_class=len(train_data.classes), pretrained_path=model_path, dataset=dataset).cuda()
for param in model.f.parameters():
param.requires_grad = False
if dataset == 'cifar10':
flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).cuda(),))
elif dataset == 'tiny_imagenet' or dataset == 'stl10':
flops, params = profile(model, inputs=(torch.randn(1, 3, 64, 64).cuda(),))
flops, params = clever_format([flops, params])
print('# Model Params: {} FLOPs: {}'.format(params, flops))
optimizer = optim.Adam(model.fc.parameters(), lr=1e-3, weight_decay=1e-6)
loss_criterion = nn.CrossEntropyLoss()
results = {'train_loss': [], 'train_acc@1': [], 'train_acc@5': [],
'test_loss': [], 'test_acc@1': [], 'test_acc@5': []}
save_name = model_path.split('.pth')[0] + '_linear.csv'
best_acc = 0.0
for epoch in range(1, epochs + 1):
train_loss, train_acc_1, train_acc_5 = train_val(model, train_loader, optimizer)
results['train_loss'].append(train_loss)
results['train_acc@1'].append(train_acc_1)
results['train_acc@5'].append(train_acc_5)
test_loss, test_acc_1, test_acc_5 = train_val(model, test_loader, None)
results['test_loss'].append(test_loss)
results['test_acc@1'].append(test_acc_1)
results['test_acc@5'].append(test_acc_5)
# save statistics
data_frame = pd.DataFrame(data=results, index=range(1, epoch + 1))
data_frame.to_csv(save_name, index_label='epoch')
#if test_acc_1 > best_acc:
# best_acc = test_acc_1
# torch.save(model.state_dict(), 'results/linear_model.pth')