-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsparse_mlp_run.py
51 lines (43 loc) · 1.95 KB
/
sparse_mlp_run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from cProfile import label
from cmath import exp
import torch
import torchvision
import matplotlib.pyplot as plt
import argparse
import torchvision.transforms as transforms
import sparse_mlp
from experiment import *
import torch.nn as nn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--batch_size", default=100, type=int)
parser.add_argument("--lr", default=4e-2, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--sparsity_level", default=0.01, type=float)
args = parser.parse_args()
transform = transforms.Compose(
[
transforms.ToTensor()
])
trainset = torchvision.datasets.MNIST(root='./datasets', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True)
testset = torchvision.datasets.MNIST(root='./datasets', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=True)
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
print(f'training device: {device}')
print(f'training set size: {len(trainset)}')
# model = sparse_mlp.SimpleSparseMLP(args.sparsity_level)
model = sparse_mlp.SimpleMLP()
# optimizer = torch.optim.SparseAdam(params=model.parameters(), lr=args.lr)
optimizer = None
# optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
experiment = SparseTimingBenchmark(model)
experiment.set_up_exp(args.epochs, trainloader, testloader, args.lr, criterion, optimizer)
experiment.run()