-
Notifications
You must be signed in to change notification settings - Fork 0
/
refactor_preprocess.py
238 lines (182 loc) · 10.3 KB
/
refactor_preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
# -*- coding: utf-8 -*-
import argparse
import csv
import h5py
import re
import os
import torch
import torch.optim as optim
import numpy as np
from os.path import join
from sklearn.model_selection import train_test_split
import math
from keras.utils.np_utils import to_categorical
from helper import Indexer, load_bin_vec, parse_input_csv, clean_str
from load_data import preprocess, cross_validation, readh5todata
from model import LSTMClassifier
from torch.nn import functional as F
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
def createLossAndOptimizer(model, arg, learning_rate):
#Loss function
#criterion = torch.nn.CrossEntropyLoss(weight = weight_scale)
criterion = torch.nn.CrossEntropyLoss()
#Optimizer
if arg.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(), lr= learning_rate)
elif arg.optimizer == 'Adadelta':
optimizer = optim.Adadelta(model.parameters(), rho = 0.95, lr= learning_rate)
return(criterion, optimizer)
def train_model(args, model, learning_rate, batch_size, n_epochs, train_loader):
import time
#Print all of the hyperparameters of the training iteration:
n_batches = len(train_loader)
criterion, optimizer = createLossAndOptimizer(model, args, learning_rate)
print("===== HYPERPARAMETERS =====")
print("batch_size=", batch_size)
print("epochs=", n_epochs)
print("Optimizer= ",args.optimizer)
print("learning_rate=", learning_rate)
print("predict_label=", phenotypedictinverse[args.predict_label])
print("=" * 30)
training_start_time = time.time()
#Loop for n_epochs
for epoch in range(n_epochs):
if args.cuda > -1:
model.cuda()
model.train()
running_loss = 0.0
print_every = n_batches // 10 # 2
start_time = time.time()
total_train_loss = 0
for i, data in enumerate(train_loader,0): # train_loader size is one fold size divided by 23 batches.
#Get inputs
inputs,sequence, labels = data
#seq_lengths = LongTensor(length_list)
#vectorized_seqs = [movepad for movepad in inputs_numpy if movepad!=2]
#print(vectorized_seqs)
if args.cuda > -1:
inputs, sequence, labels = inputs.cuda(), sequence.cuda(), labels.cuda()
if (inputs.size()[0] is not batch_size):# One of the batch returned by BucketIterator has length different than batch_size 64.
continue
#Set the parameter gradients to zero
optimizer.zero_grad()
#Forward pass, backward pass, optimize
outputs = model(inputs,sequence)
if args.debug == 1:
print("outputs is: ")
print(outputs)
loss = criterion(outputs, labels)
print("loss is: ")
print(loss)
print("labels is: ")
print(labels)
loss = criterion(outputs,labels)
loss.backward() # too slow !!!!!!!!!!!!
optimizer.step()
#Print statistics
running_loss += loss.item()
total_train_loss += loss.item()
model.resetzeropadding()
model.l2norm(args)
#model.l2norm(args)
#Print every 10th batch of an epoch
if (i + 1) % (print_every + 1) == 0:
print("Epoch {}, {:d}% \t train_loss: {:.2f} took: {:.2f}s".format(
epoch+1, int(100 * (i+1) / n_batches), running_loss / print_every, time.time() - start_time))
#Reset running loss and time
running_loss = 0.0
start_time = time.time()
print("Training finished, took {:.2f}s".format(time.time() - training_start_time))
def predict(args,net,test_loader):
net.eval()
with torch.no_grad():
return_predict = torch.tensor([])
for i, data in enumerate(test_loader,0):
inputs,sequence, labels = data
if args.cuda > -1:
inputs, sequence,labels = inputs.cuda(), sequence.cuda(), labels.cuda()
output = net(inputs,sequence)
_, predicted = torch.max(output, 1)
if i == 0:
return_predict = predicted
continue
return_predict = torch.cat((return_predict,predicted))
return return_predict
def main():
os.chdir('./')
global args, word2vec, batch_size, train_set_idx
global weight_scale, phenotypedictinverse
phenotypedict = dict({"Cancer":11,"Heart":4,"Lung":5,"Neuro":10,"Pain":9,"Alcohol":7,"Substance":8,"Obesity":1,"Disorders":6,"Depression":12})
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
#parser.add_argument('clean_summaries0209.csv', help="Source Input file", type=str)
#parser.add_argument('word2vec_50d.txt', help="word2vec file", type=str)
parser.add_argument('--padding', help="padding around each text", type=int, default=4)
parser.add_argument('--max_note_len', help="Cut off all notes longer than this (0 = no cutoff).", type=int,default=0)
parser.add_argument('--filename', help="File name for output file", type=str, default="data.h5")
parser.add_argument('-predict_label', type=int, default= phenotypedict["Depression"] , help= 'Choose which type of phenotyping to detect')
parser.add_argument('-topred', type=str, default= "Depression" , help= 'Choose which type of phenotyping to detect')
parser.add_argument('-epochs', type=int, default=10, help='number of epochs for train [default: 10]')
parser.add_argument('-batch_size', type=int, default= 8, help='batch size for training [default: 64]')
parser.add_argument('-output_size', type=int, default= 2, help='final output dim [default: 2]')
parser.add_argument('-hidden_size', type=int, default= 256, help='output dim of the cell [default: 256]')
parser.add_argument('-embedding_length', type=int, default= 50, help='number of embedding dimension [default: 50]')
parser.add_argument('-learning_rate', type=float, default=0.005, help='initial learning rate [default: 0.5]')
parser.add_argument('-vocab_size', type=float, default=48849, help='initial learning rate [default: 0.5]')
parser.add_argument('-optimizer', type=str, default='Adam', help='optimizer for the gradient descent: Adadelta, Adam')
parser.add_argument('-cuda', type=int, default= -1, help='CUUUUUUUUUUUUDA')
parser.add_argument('-debug', type=int, default= 0, help='debug mode to print')
parser.add_argument('-l2s', type=float, default=3, help='l2 norm')
# with open("conditions.dict", 'w') as f:
# for i, c in enumerate(conditions):
# print (f, i + 1, c)
args = parser.parse_args()
phenotypedictinverse = dict({11:"Cancer",4:"Heart",5:"Lung",10:"Neuro",9:"Pain",7:"Alcohol",8:"Substance",1:"Obesity",6:"Disorders",12:"Depression"})
phenotypedictsamples = dict({"Cancer":161,"Heart":275,"Lung":167,"Neuro":368,"Pain":321,"Alcohol":196,"Substance":155,"Obesity":126,"Disorders":295,"Depression":460})
weight_scale = [ 1 / (1610 - phenotypedictsamples[phenotypedictinverse[args.predict_label]]),
1 / phenotypedictsamples[phenotypedictinverse[args.predict_label]]]
#weight_scale = [ phenotypedictsamples[phenotypedictinverse[args.predict_label]]/1610*10, (1610 - phenotypedictsamples[phenotypedictinverse[args.predict_label]])/1610*10]
if args.cuda > -1:
weight_scale = torch.FloatTensor(weight_scale).cuda()
print ('Weight Scale is: ',weight_scale)
# LOAD THE WORD2VEC FILE
word2vec, emb_size, v_large = load_bin_vec("word2vec_50d.txt") # word2vec whole dataset(label+unlabeled) 470260
print ('WORD2VEC POINTS:', v_large)
# first step
# lbl, targets, ids, subj, time, embed = preprocess(args, emb_size, word2vec)
# lbl_train, lbl_train_target, lbl_test, lbl_test_target, phenotypedict = cross_validation(lbl, targets, ids, subj, time, args.topred, phenotypedict, phenotypedictsamples)
fold = 1
# put data of each fold in to a .h5py file
'''
for i in range(0,fold):
with h5py.File('data_biased_'+args.topred+'_cv{0}_occ'.format(i+1) + '0'+'.h5',"w") as f:
xtrain = np.array(lbl_train[i], dtype=int)
xtraintarget = np.array(lbl_train_target[i], dtype=int)
xtest = np.array(lbl_test[i], dtype=int)
xtesttarget = np.array(lbl_test_target[i], dtype=int)
f["w2v"] = np.array(embed)
f['train'] = xtrain
f['train_label'] = xtraintarget[:,phenotypedict[args.topred]]
f['test'] = xtest
f['test_label'] = xtesttarget[:,phenotypedict[args.topred]]
'''
if args.cuda > -1:
torch.cuda.set_device(args.cuda)
torch.backends.cudnn.benchmark =True
for i in range(0,fold):
train,test,y_test,w2v = readh5todata(args,'data_biased_'+ phenotypedictinverse[args.predict_label] + '_cv{0}'.format(i+1) + '_occ' + '0' +'.h5')
args.w2v = w2v
train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, sampler=None,shuffle = False)
test_loader = torch.utils.data.DataLoader(test, batch_size= args.batch_size, sampler= None, shuffle = False)
LSTM = LSTMClassifier(args)
print(LSTM)
train_model(args, LSTM, args.learning_rate, args.batch_size, args.epochs, train_loader)
# predicted = predict(args,LSTM,test_loader)
# scores = precision_recall_fscore_support(y_test.numpy(),predicted.detach().cpu().numpy(),pos_label = 1, average = 'binary')
# acc_score = accuracy_score(y_test.numpy(),predicted.detach().cpu().numpy())
# print('Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%')
# save_path = saver.save(sesh,'/Users/han/Desktop/deep learning/model/model.ckpt')
if __name__ == "__main__":
main()