#Instantiate a model model = Model(config) """Session time""" sess = tf.Session() #Depending on your use, do not forget to close the session writer = tf.summary.FileWriter("/home/kyle/dvlp/ml/LSTM/log_tb", sess.graph) #writer for Tensorboard sess.run(model.init_op) step = 0 cost_train_ma = -np.log(1/float(num_classes)+1e-9) #Moving average training cost acc_train_ma = 0.0 try: for i in range(max_iterations): X_batch, y_batch = sample_batch(X_train,y_train,batch_size) #Next line does the actual training cost_train, acc_train,_ = sess.run([model.cost,model.accuracy, model.train_op],feed_dict = {model.input: X_batch,model.labels: y_batch,model.keep_prob:dropout}) cost_train_ma = cost_train_ma*0.99 + cost_train*0.01 acc_train_ma = acc_train_ma*0.99 + acc_train*0.01 if i%100 == 0: #Evaluate training performance perf_collect[0,step] = cost_train perf_collect[1,step] = acc_train #Evaluate validation performance X_batch, y_batch = sample_batch(X_val,y_val,batch_size) cost_val, summ,acc_val = sess.run([model.cost,model.merged,model.accuracy],feed_dict = {model.input: X_batch, model.labels: y_batch, model.keep_prob:1.0}) perf_collect[1,step] = cost_val perf_collect[2,step] = acc_val
model_datapath = os.path.join(os.getcwd(), "lstm_model") model_name = 'ckp' dic = {} ans = [] for i in range(101): dic[i] = 0 best = 0 while time < 50: sess.run(model.init_op) cost_train_ma = -np.log( 1 / float(num_classes) + 1e-9) #Moving average training cost acc_train_ma = 0.0 for i in range(max_iterations): X_batch, y_batch = sample_batch(X_train, y_train, batch_size) #Next line does the actual training cost_train, acc_train, _ = sess.run( [model.cost, model.accuracy, model.train_op], feed_dict={ model.input: X_batch, model.labels: y_batch, model.keep_prob: dropout }) cost_train_ma = cost_train_ma * 0.99 + cost_train * 0.01 acc_train_ma = acc_train_ma * 0.99 + acc_train * 0.01 if i % 100 == 1: #Evaluate validation performance #X_batch, y_batch = X_test, y_test #cost_val, summ,acc_val = sess.run([model.cost,model.merged,model.accuracy],feed_dict = {model.input: X_batch, model.labels: y_batch, model.keep_prob:1.0})
epochs = np.floor(batch_size*max_iterations / N) print('Train %.0f samples in approximately %d epochs' %(N,epochs)) #Instantiate a model model = Model(config) """Session time""" sess = tf.Session() #Depending on your use, do not forget to close the session writer = tf.summary.FileWriter(summaries_dir, sess.graph) #writer for Tensorboard sess.run(model.init_op) cost_train_ma = -np.log(1/float(num_classes)+1e-9) #Moving average training cost acc_train_ma = 0.0 try: for i in range(max_iterations): X_batch, y_batch = sample_batch(X_train,y_train,batch_size) #Next line does the actual training cost_train, acc_train,_ = sess.run([model.cost,model.accuracy, model.train_op],feed_dict = {model.input: X_batch,model.labels: y_batch,model.keep_prob:dropout}) cost_train_ma = cost_train_ma*0.99 + cost_train*0.01 acc_train_ma = acc_train_ma*0.99 + acc_train*0.01 if i%100 == 1: #Evaluate validation performance X_batch, y_batch = sample_batch(X_val,y_val,batch_size) cost_val, summ,acc_val = sess.run([model.cost,model.merged,model.accuracy],feed_dict = {model.input: X_batch, model.labels: y_batch, model.keep_prob:1.0}) print('At %5.0f/%5.0f: COST %5.3f/%5.3f(%5.3f) -- Acc %5.3f/%5.3f(%5.3f)' %(i,max_iterations,cost_train,cost_val,cost_train_ma,acc_train,acc_val,acc_train_ma)) #Write information to TensorBoard writer.add_summary(summ, i) writer.flush() except KeyboardInterrupt: pass