-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrain.py
109 lines (77 loc) · 3.53 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
import json
import argparse
import numpy as np
from model import build_model, save_weights
DATA_DIR = './data'
LOG_DIR = './logs'
# Using Batch SGD for model buliding
BATCH_SIZE = 16
SEQ_LENGTH = 64
class TrainLogger(object):
def __init__(self, file):
self.file = os.path.join(LOG_DIR, file)
self.epochs = 0
with open(self.file, 'w') as f:
f.write('epoch,loss,acc\n')
def add_entry(self, loss, acc):
self.epochs += 1
s = '{},{},{}\n'.format(self.epochs, loss, acc)
with open(self.file, 'a') as f:
f.write(s)
def read_batches(T, vocab_size):
length = T.shape[0] #129,665
batch_chars = int(length / BATCH_SIZE) # 8,104
for start in range(0, batch_chars - SEQ_LENGTH, SEQ_LENGTH): # (0, 8040, 64)
# Creating X(2d tensor) and Y(3d tensor)
X = np.zeros((BATCH_SIZE, SEQ_LENGTH)) # 16X64
Y = np.zeros((BATCH_SIZE, SEQ_LENGTH, vocab_size)) # 16X64X86
# Filling values in X and Y
for batch_idx in range(0, BATCH_SIZE): # (0,16)
for i in range(0, SEQ_LENGTH): #(0,64)
X[batch_idx, i] = T[batch_chars * batch_idx + start + i] #
Y[batch_idx, i, T[batch_chars * batch_idx + start + i + 1]] = 1 # One hot Encoding
yield X, Y
def train(text, epochs=100, save_freq=10):
# Mapping character to index
char_to_idx = { ch: i for (i, ch) in enumerate(sorted(list(set(text)))) }
print("Number of unique characters: " + str(len(char_to_idx))) #86
with open(os.path.join(DATA_DIR, 'char_to_idx.json'), 'w') as f:
json.dump(char_to_idx, f)
# Mapping character to index
idx_to_char = {i: ch for (ch, i) in char_to_idx.items()}
vocab_size = len(char_to_idx)
# Model Architecture
model = build_model(BATCH_SIZE, SEQ_LENGTH, vocab_size)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Generating Train data
T = np.asarray([char_to_idx[c] for c in text], dtype=np.int32) #convert complete text into numerical indices
print("Length of text:" + str(T.size)) #129,665
steps_per_epoch = (len(text) / BATCH_SIZE - 1) / SEQ_LENGTH
log = TrainLogger('training_log.csv')
for epoch in range(epochs):
print('\nEpoch {}/{}'.format(epoch + 1, epochs))
losses, accs = [], []
for i, (X, Y) in enumerate(read_batches(T, vocab_size)):
print(X)
loss, acc = model.train_on_batch(X, Y)
print('Batch {}: loss = {}, acc = {}'.format(i + 1, loss, acc))
losses.append(loss)
accs.append(acc)
# Adding result to log file
log.add_entry(np.average(losses), np.average(accs))
# Saving the model at every 10 epochs
if (epoch + 1) % save_freq == 0:
save_weights(epoch + 1, model)
print('Saved checkpoint to', 'weights.{}.h5'.format(epoch + 1))
# Passing Arguments
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train the model on some text.')
parser.add_argument('--input', default='input.txt', help='name of the text file to train from')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train for')
parser.add_argument('--freq', type=int, default=10, help='checkpoint save frequency')
args = parser.parse_args()
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
train(open(os.path.join(DATA_DIR, args.input)).read(), args.epochs, args.freq)