-
Notifications
You must be signed in to change notification settings - Fork 0
/
tsc_model.py
106 lines (83 loc) · 3.92 KB
/
tsc_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.rnn.python.ops import core_rnn
def load_data(ratio,dataset):
DATA = np.loadtxt(dataset,delimiter=',')
N = DATA.shape[0]
ratio = (ratio*N).astype(np.int32)
ind = np.random.permutation(N)
X_train = DATA[ind[:ratio[0]],1:]
X_val = DATA[ind[ratio[0]:ratio[1]],1:]
X_test = DATA[ind[ratio[1]:],1:]
# Targets have labels 1-indexed. We subtract one for 0-indexed
y_train = DATA[ind[:ratio[0]],0]-1
y_val = DATA[ind[ratio[0]:ratio[1]],0]-1
y_test = DATA[ind[ratio[1]:],0]-1
return X_train,X_val,X_test,y_train,y_val,y_test
def sample_batch(X_train,y_train,batch_size):
""" Function to sample a batch for training"""
N,data_len = X_train.shape
ind_N = np.random.choice(N,batch_size,replace=False)
X_batch = X_train[ind_N]
y_batch = y_train[ind_N]
return X_batch,y_batch
class Model():
def __init__(self,config):
num_layers = config['num_layers']
hidden_size = config['hidden_size']
max_grad_norm = config['max_grad_norm']
self.batch_size = config['batch_size']
sl = config['sl']
learning_rate = config['learning_rate']
num_classes = config['num_classes']
"""Place holders"""
self.input = tf.placeholder(tf.float32, [None, sl], name = 'input')
self.labels = tf.placeholder(tf.int64, [None], name='labels')
self.keep_prob = tf.placeholder("float", name = 'Drop_out_keep_prob')
with tf.name_scope("LSTM_setup") as scope:
def single_cell():
return tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_size),output_keep_prob=self.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
initial_state = cell.zero_state(self.batch_size, tf.float32)
input_list = tf.unstack(tf.expand_dims(self.input,axis=2),axis=1)
outputs,_ = core_rnn.static_rnn(cell, input_list, dtype=tf.float32)
output = outputs[-1]
#Generate a classification from the last cell_output
with tf.name_scope("Softmax") as scope:
with tf.variable_scope("Softmax_params"):
softmax_w = tf.get_variable("softmax_w", [hidden_size, num_classes])
softmax_b = tf.get_variable("softmax_b", [num_classes])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=self.labels,name = 'softmax')
self.cost = tf.reduce_sum(loss) / self.batch_size
with tf.name_scope("Evaluating_accuracy") as scope:
correct_prediction = tf.equal(tf.argmax(logits,1),self.labels)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
h1 = tf.summary.scalar('accuracy',self.accuracy)
h2 = tf.summary.scalar('cost', self.cost)
#Optimizer
with tf.name_scope("Optimizer") as scope:
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),max_grad_norm)
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients = zip(grads, tvars)
self.train_op = optimizer.apply_gradients(gradients)
for gradient, variable in gradients: #plot the gradient of each trainable variable
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
tf.summary.histogram(variable.name, variable)
tf.summary.histogram(variable.name + "/gradients", grad_values)
tf.summary.histogram(variable.name + "/gradient_norm", clip_ops.global_norm([grad_values]))
#Final code for the TensorBoard
#self.merged = tf.summary.merge_all()
self.merged = tf.constant(1)
self.init_op = tf.global_variables_initializer()
print('Finished computation graph')