def test_and_save(_global_step, epoch, lri): global global_accuracy global epoch_start i = 0 predicted_class = np.zeros(shape=len(test_x), dtype=np.int) while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j] = sess.run(y_pred_cls, feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(lri) }) i = j correct = (np.argmax(test_y, axis=1) == predicted_class) acc = correct.mean() * 100 if acc > 25: global _MARK _MARK = 1 file_.write( str(lri) + ' ' + str(lr(lri)) + ' ' + str(_global_step) + ' ' + str(acc) + '\n') correct_numbers = correct.sum() hours, rem = divmod(time() - epoch_start, 3600) minutes, seconds = divmod(rem, 60) mes = " accuracy: {:.2f}% ({}/{}) - time: {:0>2}:{:0>2}:{:05.2f}" print( mes.format(acc, correct_numbers, len(test_x), int(hours), int(minutes), seconds))
def test_and_save(_global_step, epoch): global global_accuracy activations = [] if epoch == 0: layer_index = 1 for layer in hlayer_activations: if layer_index < 3: layer_values = layer.eval(feed_dict={ x: test_x, y: test_y, learning_rate: lr(epoch) }, session=sess) n_samples = layer_values.shape[0] layer_values = np.reshape( layer_values, (n_samples, layer_values.size / n_samples)) #np.savez_compressed('out_dir/layer%d.%.4d'%(layer_index,epoch), a=layer_values) activations.append(layer_values) layer_index += 1 i = 0 predicted_class = np.zeros(shape=len(test_x), dtype=np.int) while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j] = sess.run(y_pred_cls, feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(epoch) }) i = j correct = (np.argmax(test_y, axis=1) == predicted_class) acc = correct.mean() * 100 correct_numbers = correct.sum() mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{})" print(mes.format((epoch + 1), acc, correct_numbers, len(test_x))) if global_accuracy != 0 and global_accuracy < acc: summary = tf.Summary(value=[ tf.Summary.Value(tag="Accuracy/test", simple_value=acc), ]) train_writer.add_summary(summary, _global_step) saver.save(sess, save_path=_SAVE_PATH, global_step=_global_step) mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..." print(mes.format(acc, global_accuracy)) global_accuracy = acc elif global_accuracy == 0: global_accuracy = acc print( "###########################################################################################################" )
def train(lri, epoch): global epoch_start epoch_start = time() batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE)) i_global = 0 print("real learning rate is: {}", format(lr(lri))) new_order = np.random.permutation(len(train_x)) train_x1 = train_x[new_order] train_y1 = train_y[new_order] for s in range(batch_size): if _MARK == 1: print('exit batch loop now') break batch_xs = train_x1[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] batch_ys = train_y1[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] start_time = time() i_global, _, batch_loss, batch_acc = sess.run( [global_step, optimizer, loss, accuracy], feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(lri) }) duration = time() - start_time msg = "Global step: {:>5} - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec" print( msg.format(i_global, batch_acc, batch_loss, _BATCH_SIZE / duration)) test_and_save(i_global, epoch, lri)
def train(epoch): global epoch_start epoch_start = time() batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE)) i_global = 0 # pdb.set_trace() # for s in range(batch_size): # -1 to maintain batch size fixed for s in range(batch_size - 1): batch_xs = train_x[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] batch_ys = train_y[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] train_writer.add_summary( summ_op_2.eval(feed_dict={ x: batch_xs, y: batch_ys, pade_x: batch_xs, pade_y: batch_ys, learning_rate: lr(epoch) }, session=sess)) train_writer.add_summary( summ_op_1.eval(feed_dict={ x: batch_xs, y: batch_ys, pade_x: batch_xs, pade_y: batch_ys, learning_rate: lr(epoch) }, session=sess)) # train_writer.add_summary(summ_op_2.eval()) # pdb.set_trace() start_time = time() i_global, _, batch_loss, batch_acc, img_summ = sess.run( [global_step, optimizer, loss, accuracy, summ_op_2], feed_dict={ x: batch_xs, y: batch_ys, pade_x: batch_xs, pade_y: batch_ys, learning_rate: lr(epoch) }) # feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)}) duration = time() - start_time if s % 10 == 0: percentage = int(round((s / batch_size) * 100)) bar_len = 29 filled_len = int((bar_len * int(percentage)) / 100) bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len) msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec" print( msg.format(i_global, bar, percentage, batch_acc, batch_loss, _BATCH_SIZE / duration)) test_and_save(i_global, epoch)
def train(epoch): global epoch_start epoch_start = time() batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE)) i_global = 0 for s in range(batch_size): batch_xs = train_x[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] batch_ys = train_y[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] start_time = time() i_global, _, batch_loss, batch_acc = sess.run( [global_step, optimizer, loss, accuracy], feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(epoch) }) duration = time() - start_time if s % 10 == 0: percentage = int(round((s / batch_size) * 100)) bar_len = 29 filled_len = int((bar_len * int(percentage)) / 100) bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len) msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec" print( msg.format(i_global, bar, percentage, batch_acc, batch_loss, _BATCH_SIZE / duration)) test_and_save(i_global, epoch)
def train(epoch): global epoch_start epoch_start = time() # TODO: batch_size = int(50000/_BATCH_SIZE) i_global = 0 for s in range(batch_size): batch_xs, batch_ys = gen_train.next() start_time = time() ###################################################################### # EE599: Running the optimization on NN for one step: i_global, _, batch_loss, batch_acc, y_pred_label, _logits, _softmax = sess.run( [global_step, optimizer, loss, accuracy, y_pred_cls, logits, softmax], feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch, args.a0, args.lr_mode)}) ######################################################################## duration = time() - start_time if s % 10 == 0: percentage = int(round((float(s)/batch_size)*100)) bar_len = 29 filled_len = int((bar_len*int(percentage))/100) bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len) msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f}" txt = msg.format(i_global, bar, percentage, batch_acc, batch_loss) print(txt) outfile.write(txt + "\n") test_and_save(i_global, epoch)
def test_and_save(_global_step, epoch): global global_accuracy global epoch_start i = 0 predicted_class = np.zeros(shape=len(test_x), dtype=np.int) while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) # Skip last instances to fix batc size if j == i + _BATCH_SIZE: batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j] = sess.run(y_pred_cls, feed_dict={ x: batch_xs, y: batch_ys, pade_x: batch_xs, pade_y: batch_ys, learning_rate: lr(epoch) }) # feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)} i = j correct = (np.argmax(test_y, axis=1) == predicted_class) acc = correct.mean() * 100 correct_numbers = correct.sum() hours, rem = divmod(time() - epoch_start, 3600) minutes, seconds = divmod(rem, 60) mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{}) - time: {:0>2}:{:0>2}:{:05.2f}" print( mes.format((epoch + 1), acc, correct_numbers, len(test_x), int(hours), int(minutes), seconds)) if global_accuracy != 0 and global_accuracy < acc: summary = tf.Summary(value=[ tf.Summary.Value(tag="Accuracy/test", simple_value=acc), ]) pade_summary = tf.summary.image( 'pade_output_4', pade_output) # , step=None, max_outputs=3, description=None) train_writer.add_summary(summary, _global_step) # train_writer.add_summary(pade_summary, _global_step) saver.save(sess, save_path=_SAVE_PATH, global_step=_global_step) mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..." print(mes.format(acc, global_accuracy)) global_accuracy = acc elif global_accuracy == 0: global_accuracy = acc print( "###########################################################################################################" )
def train_step(x_batch, y_batch, epoch, keep_percent): feed_dict = { x: x_batch, y: y_batch, learning_rate: lr(epoch), keep_prob: keep_percent } i_global, _, train_summaries, batch_loss, batch_acc = sess.run( [global_step, optimizer, train_summary_op, loss, accuracy], feed_dict) return i_global, train_summaries, batch_loss, batch_acc
def test_and_save(_global_step, epoch): global global_accuracy global epoch_start i = 0 predicted_class = np.zeros(shape=len(test_x), dtype=np.int) while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j] = sess.run(y_pred_cls, feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(epoch) }) i = j correct = (np.argmax(test_y, axis=1) == predicted_class) acc = correct.mean() * 100 correct_numbers = correct.sum() hours, rem = divmod(time() - epoch_start, 3600) minutes, seconds = divmod(rem, 60) mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{}) - time: {:0>2}:{:0>2}:{:05.2f}" print( mes.format((epoch + 1), acc, correct_numbers, len(test_x), int(hours), int(minutes), seconds)) if global_accuracy != 0 and global_accuracy < acc: summary = tf.Summary(value=[ tf.Summary.Value(tag="Accuracy/test", simple_value=acc), ]) train_writer.add_summary(summary, _global_step) saver.save(sess, save_path=_SAVE_PATH_OF_CKPT, global_step=_global_step) tf.train.write_graph(sess.graph_def, '.', 'minimal_graph.proto', as_text=False) mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..." print(mes.format(acc, global_accuracy)) global_accuracy = acc elif global_accuracy == 0: global_accuracy = acc print( "###########################################################################################################" )
def train(epoch): global epoch_start epoch_start = time() batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE)) i_global = 0 for s in range(batch_size): batch_xs = train_x[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] batch_ys = train_y[s * _BATCH_SIZE:(s + 1) * _BATCH_SIZE] start_time = time() i_global, _, batch_loss, batch_acc = sess.run( [global_step, optimizer, loss, accuracy], feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(epoch) }) duration = time() - start_time if s % 10 == 0: percentage = int(round((s / batch_size) * 100)) bar_len = 29 filled_len = int((bar_len * int(percentage)) / 100) bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len) msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec" print( msg.format(i_global, bar, percentage, batch_acc, batch_loss, _BATCH_SIZE / duration)) summary = tf.Summary(value=[ tf.Summary.Value(tag="Accuracy/test", simple_value=batch_acc * 100), ]) train_writer.add_summary(summary, i_global) saver.save(sess, save_path=_SAVE_PATH, global_step=i_global) summary = tf.Summary(value=[ tf.Summary.Value(tag="Loss/test", simple_value=batch_loss), ]) train_writer.add_summary(summary, i_global) saver.save(sess, save_path=_SAVE_PATH, global_step=i_global) test_and_save(i_global, epoch, batch_loss)
def test_and_save(_global_step, epoch): global global_accuracy global epoch_start i = 0 predicted_class = np.zeros(shape=len(test_x), dtype=np.int) while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j] = sess.run( y_pred_cls, feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch, 0.1)} ) i = j correct = (np.argmax(test_y, axis=1) == predicted_class) acc = correct.mean()*100 correct_numbers = correct.sum() hours, rem = divmod(time() - epoch_start, 3600) minutes, seconds = divmod(rem, 60) mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{})" txt = mes.format((epoch+1), acc, correct_numbers, len(test_x)) print(txt) outfile.write(txt + "\n") if global_accuracy != 0 and global_accuracy < acc: summary = tf.Summary(value=[ tf.Summary.Value(tag="Accuracy/test", simple_value=acc), ]) train_writer.add_summary(summary, _global_step) saver.save(sess, save_path=_SAVE_PATH + 'newModel', global_step=_global_step) mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..." txt = mes.format(acc, global_accuracy) print(txt) outfile.write(txt + "\n") global_accuracy = acc elif global_accuracy == 0: global_accuracy = acc print("##########################################################################")
def test_and_save(_global_step, epoch, _global_loss): global global_accuracy global epoch_start i = 0 predicted_class = np.zeros(shape=len(test_x), dtype=np.int) while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j] = sess.run(y_pred_cls, feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(epoch) }) i = j correct = (np.argmax(test_y, axis=1) == predicted_class) acc = correct.mean() * 100 correct_numbers = correct.sum() hours, rem = divmod(time() - epoch_start, 3600) minutes, seconds = divmod(rem, 60) mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{}) - time: {:0>2}:{:0>2}:{:05.2f}" print( mes.format((epoch + 1), acc, correct_numbers, len(test_x), int(hours), int(minutes), seconds)) mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..." print(mes.format(acc, global_accuracy)) global_accuracy = acc print( "###########################################################################################################" )
try: print("\nTrying to restore last checkpoint ...") last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH) saver.restore(sess, save_path=last_chk_path) print("Restored checkpoint from:", last_chk_path) except Exception: print("\nFailed to restore checkpoint. Initializing variables instead.") predicted_out = np.zeros(shape=test_y.shape, dtype=np.float32, name='predicted_out') predicted_class = np.zeros(shape=len(test_x), dtype=np.int) i = 0 while i < len(test_x): j = min(i + _BATCH_SIZE, len(test_x)) batch_xs = test_x[i:j, :] batch_ys = test_y[i:j, :] predicted_class[i:j], predicted_out[i:j] = sess.run([y_pred_cls, output], feed_dict={ x: batch_xs, y: batch_ys, learning_rate: lr(1), keep_prob: 1.0 }) i = j vector_embeddings = tf.get_variable('predicted_out', test_y.shape) with open("metadata.tsv", 'w') as file_metadata: for label in enumerate(predicted_class): file_metadata.write(label + '\n')