FLAGS.layers) trainer = Trainer(seq2seq, FLAGS.optim, FLAGS.eta, FLAGS.clip) train_writer = tf.summary.FileWriter(FLAGS.outdir + "/train", sess.graph) init = tf.global_variables_initializer() sess.run(init) saver = tf.train.Saver() trainer.prepare(saver) err_min = 100 last_improved = 0 for i in range(FLAGS.epochs): print('Training epoch %d' % (i + 1)) trainer.train(ts, sess, train_writer, FLAGS.dropout, FLAGS.batchsz) #print_batch(trainer.best_in_batch(es, sess, FLAGS.batchsz), rlut2) err_rate = trainer.test(es, sess, FLAGS.batchsz) if err_rate < err_min: last_improved = i err_min = err_rate print('Lowest error achieved yet -- writing model') seq2seq.save(sess, FLAGS.outdir, 'seq2seq') trainer.checkpoint(sess, FLAGS.outdir, 'seq2seq') if (i - last_improved) > FLAGS.patience: print('Stopping due to persistent failures to improve') break
init = tf.global_variables_initializer() sess.run(init) model.save_using(tf.train.Saver()) max_acc = 0 last_improved = 0 for i in range(FLAGS.epochs): print('Training epoch %d' % (i + 1)) trainer.train(ts, FLAGS.dropout, FLAGS.batchsz) this_acc = trainer.test(vs, FLAGS.batchsz, 'Validation') if this_acc > max_acc: max_acc = this_acc last_improved = i trainer.checkpoint(FLAGS.save) print('Highest dev acc achieved yet -- writing model') if (i - last_improved) > FLAGS.patience: print('Stopping due to persistent failures to improve') break print("-----------------------------------------------------") print('Highest dev acc %.2f' % (max_acc * 100.)) print('=====================================================') print('Evaluating best model on test data:') print('=====================================================') trainer.recover_last_checkpoint() this_acc = trainer.test(es) print("-----------------------------------------------------") print('Test acc %.2f' % (this_acc * 100.))