-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNN.py
92 lines (75 loc) · 3.63 KB
/
NN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import torch
from torch.nn import Embedding
class DNN(torch.nn.Module):
def __init__(self, config, uvec=None, ivec=None):
print('initializing DNN')
super(DNN, self).__init__()
self.config = config
self.user_num = config['user_num']
self.item_num = config['item_num']
self.u_embed_dim = config['embed_dim']
self.i_embed_dim = config['embed_dim']
if uvec is not None and ivec is not None:
print('pre-learnt user/item feature detected')
print('u:', uvec.shape)
print('i:', ivec.shape)
self.u_embed_dim = uvec.shape[1]
self.i_embed_dim = ivec.shape[1]
self.u_embed = Embedding(num_embeddings=self.user_num, embedding_dim=self.u_embed_dim)
self.i_embed = Embedding(num_embeddings=self.item_num, embedding_dim=self.i_embed_dim)
self.u_embed.weight = torch.nn.Parameter(torch.Tensor(uvec))
self.i_embed.weight = torch.nn.Parameter(torch.Tensor(ivec))
else:
print('no pretrained!')
self.u_embed = Embedding(num_embeddings=self.user_num, embedding_dim=self.u_embed_dim)
self.i_embed = Embedding(num_embeddings=self.item_num, embedding_dim=self.i_embed_dim)
self.NN = torch.nn.ModuleList()
for idx, (in_size, out_size) in enumerate(zip(config['layers'][:-1], config['layers'][1:])):
self.NN.append(torch.nn.Linear(in_size, out_size))
self.score_layer = torch.nn.Linear(in_features=config['layers'][-1], out_features=1)
self.logistic = torch.nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.u_embed(user_indices)
item_embedding = self.i_embed(item_indices)
vector = torch.cat([user_embedding, item_embedding], dim=-1) # the concat latent vector
for idx, _ in enumerate(range(len(self.NN))):
vector = self.NN[idx](vector)
vector = torch.nn.ReLU()(vector)
logits = self.score_layer(vector)
rating = self.logistic(logits)
return rating
class Engine(object):
def __init__(self, config, uPretrain=None, iPretrain=None):
self.config = config
self.loss_fn = torch.nn.BCELoss()
self.model = DNN(config, uvec=uPretrain, ivec=iPretrain)
self.opt = self.initialize_opt(self.model, config)
self.use_cuda = config['use_cuda']
if self.use_cuda:
self.model = self.model.cuda()
print(self.model)
def initialize_opt(self,model,params):
optimizer = torch.optim.Adam(model.parameters(), lr=params['adam_lr'],
weight_decay=params['l2_regularization'])
return optimizer
def train_single_batch(self, users, items, ratings):
if self.config['use_cuda'] is True:
users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()
self.opt.zero_grad()
ratings_pred = self.model(users, items)
loss = self.loss_fn(ratings_pred.view(-1), ratings)
loss.backward()
self.opt.step()
loss = loss.item()
return loss
def train_an_epoch(self, train_loader, epoch_id):
self.model.train()
total_loss = 0
for batch_id, batch in enumerate(train_loader):
assert isinstance(batch[0], torch.LongTensor)
user, item, rating = batch[0], batch[1], batch[2]
rating = rating.float()
loss = self.train_single_batch(user, item, rating)
# print('[Training Epoch {}] Batch {}, Loss {}'.format(epoch_id, batch_id, loss))
total_loss += loss
print('total loss:',total_loss)