test_loss_summ = graph.get_tensor_by_name("test_batch_loss:0") train_avg_loss_summ = graph.get_tensor_by_name("train_avg_loss:0") test_avg_loss_summ = graph.get_tensor_by_name("train_avg_loss:0") train_gini_summ = graph.get_tensor_by_name("train_gini:0") test_gini_summ = graph.get_tensor_by_name("test_gini:0") train_decile_summ = graph.get_tensor_by_name("train_decile:0") test_decile_summ = graph.get_tensor_by_name("test_decile:0") train_count = 0 test_count = 0 for i in range(previous_count + 1, previous_count + epochs + 1): train_x = divide_batches_gen(trans_train_data, batch_size) test_x = divide_batches_gen(trans_test_data, batch_size) # Train Data. count = 0 train_loss = 0 for train_data, train_label in zip(train_x, train_y): train_count += 1 count += 1 _, l = sess.run([optimizer, loss], feed_dict={ x: train_data, y: train_label })
x_lstm = graph.get_tensor_by_name('placeholders/input_lstm:0') y = graph.get_tensor_by_name('placeholders/output:0') z = graph.get_tensor_by_name('placeholders/z:0') lr = graph.get_tensor_by_name('placeholders/lr:0') kp = graph.get_tensor_by_name('placeholders/kp:0') y_ = tf.get_collection("y_")[0] # writer = tf.summary.FileWriter(logdir, sess.graph) # writer.add_graph(sess.graph) train_count = 0 test_count = 0 sess.run(lr, feed_dict={lr: learning_rate}) train_ffn_x = divide_batches_gen(ffn_train_data, batch_size) train_lstm_x = divide_batches(lstm_train_data, batch_size) # Calculate decile. train_predictions = [] for train_data_ffn, train_data_lstm in zip(train_ffn_x, train_lstm_x): model_prediction = sess.run(y_, feed_dict={ x_ffn: train_data_ffn, x_lstm: train_data_lstm, kp: 1.0 }) train_predictions.append(temp for temp in model_prediction) train_predictions = [ item for sublist in train_predictions for item in sublist
with tf.Session() as sess: model_saver.restore(sess, ckpt) graph = tf.get_default_graph() # for op in tf.get_default_graph().get_operations(): # print(str(op.name)) x = graph.get_tensor_by_name('placeholders/input:0') y = graph.get_tensor_by_name('placeholders/output:0') z = graph.get_tensor_by_name('placeholders/z:0') y_ = tf.get_collection("y_")[0] loss = tf.get_collection("loss")[0] infer_x = divide_batches_gen(inference_data, batch_size) count = 0 infer_loss = 0 infer_predictions = [] for infer_data, infer_label in zip(infer_x, infer_y): count += 1 l, model_prediction = sess.run([loss, y_], feed_dict={ x: infer_data, y: infer_label }) infer_loss += l