-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaidoctor_rnn.py
247 lines (194 loc) · 9.44 KB
/
aidoctor_rnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
"""
Method being used : Recurrent Neural Network
Unit Cell Type : Long Short Term Memory (LSTM)
"""
#Phase 1 : Preprocessing the input
"""
Summary of the file : Each line contains a patient's information,
the information about the year as well as 73 features, together with
the label (whether that particular patient got the disease or not)
"""
padding_zero = [0.0] * 73 #for the padding zero
#function special for the one-hot encoding
def one_hot(label):
if label == 0:
return [1.0, 0.0]
else:
return [0.0, 1.0]
#this function is about to process the input file
def process_input(file, train_stat = True):
f = open(file, 'r') #opening the file
input_list = []
train_data = []
train_label = []
temp_traindata = []
for line in f: #processing every line of the content of the input file
line_split = line.split(",")
#splitting lines into array of useful feature
for i in range(len(line_split)):
if line_split[i] == '' or line_split[i] == '\n':
line_split[i] = np.abs(np.random.normal()) #note that value of the features cannot be negative
else:
line_split[i] = np.float32(line_split[i])
input_list.append(line_split)
f.close() #closing the file, meaning that we have finished extracting the info from the file
"""
Info for the input_list : it has multiple info of length 76, with the first element being the ID of the
patient, the second one being the year, then followed by 73 features and being ended by the label (0/1)
"""
#begin processing the input list
cur_posit = 0 #denoting the current position the element is being pointed at
#initialization of the temporary bucket for feature and label
temp_feat = []
temp_label = []
while (cur_posit < len(input_list)):
#collecting the full information about the patient, storing it to the appropriate place
assert (len(input_list[cur_posit]) == 76) #just to make sure the length of line is okay
feat_patient = input_list[cur_posit][2:-1] #info about all the features for that line
id_patient = input_list[cur_posit][0] #info about the patient's identity number
label_patient = input_list[cur_posit][-1]
#begin putting on the list to be trained/tested
if (cur_posit != len(input_list) - 1) and (id_patient == input_list[cur_posit + 1][0]):
#if the data following the currently explored one still belongs to the same patient
#in this case, simply collect those feature and labelling to the temporary bucket of
#feature and label
temp_feat.append(feat_patient)
temp_label.append(one_hot(label_patient))
else:
#reaching the end of the data for a particular patient. In this case, putting that last
#info to the temporary bucket first
temp_feat.append(feat_patient)
temp_label.append(one_hot(label_patient))
#after that push into the training data, but with padding zero first
for i in range(12 - len(temp_feat)):
temp_feat.insert(0, padding_zero)
temp_label.insert(0, [1.0, 0.0])
train_data.append(temp_feat)
if train_stat:
train_label.append(temp_label)
else:
train_label.append(temp_label[-1])
#lastly, flush all the temporary bucket so that it will be used as new again
temp_feat = []
temp_label = []
cur_posit += 1
return train_data, train_label
#collecting all the data for the training, validation and testing
train_data, train_label = process_input('training_file/train_arrhytmia.txt', train_stat = False)
val_data, val_label = process_input('validation_file/val_arrhytmia.txt', train_stat = False)
test_data, test_label = process_input('testing_file/test_arrhytmia.txt', train_stat = False)
# print(train_label.count([0.0, 1.0]))
# print(len(train_label))
# print(len(train_label[0]))
#################################PHASE 1 FINISHED#####################################
#Phase 2 : Building up the LSTM Model
#defining the hyperparameter first
training_epoch = 1000
hidden_nodes = 128
batch_size = 128
learning_rate = 0.0001
dropout_rate = 0.2
l2_regularize = True
reg_param = 0.1
#defining up the variable used for holding the input data and the label
data = tf.placeholder(tf.float32, [None, 12, 73])
target = tf.placeholder(tf.float32, [None, 2])
cell = tf.contrib.rnn.core_rnn_cell.LSTMCell(hidden_nodes, forget_bias = 1.0, state_is_tuple=True)
cell = tf.contrib.rnn.core_rnn_cell.MultiRNNCell([cell] * 4, state_is_tuple=True)
cell = tf.contrib.rnn.core_rnn_cell.DropoutWrapper(cell, output_keep_prob = dropout_rate)
val, state = tf.nn.dynamic_rnn(cell, data, dtype = tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
#defining the initialized value for the weight and bias
weight = tf.Variable(tf.random_normal(shape = [hidden_nodes, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape = [int(target.get_shape()[1])]))
#now defining the prediction vector, which should be the softmax function after being multiplied
#by W and b, then we define the cross entropy
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = -tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction,1e-10,1.0)))
if l2_regularize:
cost = tf.reduce_mean(cross_entropy) + reg_param * (tf.nn.l2_loss(weight) + tf.nn.l2_loss(bias))
else:
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
#measurement of the accuracy value of the dataset
correct = tf.equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
#initializing all the trainable parameters here
init_op = tf.global_variables_initializer()
f = open("170420_result_rnn.txt", 'w')
f.write("Result of the experiment\n\n")
batch_size_list = [128]
hidden_layer_list = [128]
learning_rate_list = [1e-3, 1e-4]
epoch_list_run = [1000]
dropout_list = [0.9, 0.7, 0.5, 0.3]
regularizer_parameter = [0.1, 0.01, 0.001]
l2Regularize_list = [True]
count_exp = 1
for batch_size1 in batch_size_list:
for training_epoch1 in epoch_list_run:
for learning_rate1 in learning_rate_list:
for hidden_node1 in hidden_layer_list:
for dropout_rate1 in dropout_list:
for l2Reg in l2Regularize_list:
for reg_param1 in regularizer_parameter:
batch_size = batch_size1
hidden_nodes = hidden_node1
learning_rate = learning_rate1
training_epoch = training_epoch1
dropout_rate = dropout_rate1
l2_regularize = l2Reg
reg_param = reg_param1
epoch_list = []
cost_list = []
print("batch size = " + str(batch_size))
print("hidden nodes = " + str(hidden_nodes))
print("learning rate = " + str(learning_rate))
print("training epoch = " + str(training_epoch))
print("dropout rate = " + str(1 - dropout_rate))
print("l2Reg = " + str(l2_regularize))
print("reg_param = " + str(reg_param))
f.write("setting up the experiment with\n")
f.write("batch size = " + str(batch_size) + ", hidden nodes = " + str(hidden_nodes) + ", learning rate = " + str(learning_rate) + "\n")
f.write("training epoch = " + str(training_epoch) + ", dropout rate = " + str(1 - dropout_rate) + ", reg_param = " + str(reg_param) + "\n\n")
with tf.Session() as sess:
sess.run(init_op)
for epoch in range(training_epoch):
epoch_list.append(epoch + 1)
ptr = 0
avg_cost = 0.
no_of_batches = int(len(train_data) / batch_size)
# no_of_batches = 1
for i in range(no_of_batches):
batch_in, batch_out = train_data[ptr:ptr+batch_size], train_label[ptr:ptr+batch_size]
ptr += batch_size
# target_ = sess.run([target], feed_dict = {data : batch_in, target : batch_out})
_, cost_ = sess.run([optimizer, cost], feed_dict = {data : batch_in, target : batch_out})
avg_cost += cost_ / no_of_batches
# print("loss function = " + str(avg_cost))
cost_list.append(avg_cost)
# sess.run(target_exp, feed_dict = {data : train_data, target : train_label})
# sess.run(arg_pred, feed_dict = {data : train_data, target : train_label})
if epoch in [9, 19, 49, 99, 199, 299, 499, 699, 999]:
f.write("During the " + str(epoch+1) + "-th epoch:\n")
f.write("Training Accuracy = " + str(sess.run(accuracy, feed_dict = {data : train_data, target : train_label})) + "\n")
f.write("Validation Accuracy = " + str(sess.run(accuracy, feed_dict = {data : val_data, target : val_label})) + "\n")
f.write("Testing Accuracy = " + str(sess.run(accuracy, feed_dict = {data : test_data, target : test_label})) + "\n\n")
print("Optimization Finished")
plt.plot(epoch_list, cost_list)
plt.xlabel("Epoch (dropout = " + str(dropout_rate) + ";l2Reg = " + str(reg_param) + ";epoch = " + str(training_epoch) + ")")
plt.ylabel("Cost Function")
training_accuracy = sess.run(accuracy, feed_dict = {data : train_data, target : train_label})
validation_accuracy = sess.run(accuracy, feed_dict = {data : val_data, target : val_label})
testing_accuracy = sess.run(accuracy, feed_dict = {data : test_data, target : test_label})
print("Training Accuracy :", training_accuracy)
print("Validation Accuracy :", validation_accuracy)
print("Testing Accuracy :", testing_accuracy)
plt.title("Train Acc = " + str(training_accuracy * 100) + "\nTest Acc = " + str(testing_accuracy * 100))
plt.savefig("170420_rnn Exp " + str(count_exp) + ".png")
plt.clf()
count_exp += 1