def test_eval(self): x_dev = np.random.randint(0, VOCABULARY_SIZE, [BATCH_SIZE * 5, SEQUENCE_LENGTH]) y_dev = np.eye(2)[np.random.randint(0, 1, [BATCH_SIZE * 5])] def make_eval_iter(): data_iter = tfmodels.data.utils.batch_iter( list(zip(x_dev, y_dev)), BATCH_SIZE, 1, fill=True, seed=42) return map(lambda batch: zip(*batch), data_iter) with self.graph.as_default(), self.sess.as_default(): cnn = self._build_classifier() ev = CNNClassifierEvaluator(cnn) self.sess.run(tf.initialize_all_variables()) loss, acc, current_step = ev.eval(make_eval_iter()) loss2, acc2, current_step = ev.eval(make_eval_iter()) self.assertGreater(loss, 0) self.assertGreater(acc, 0) self.assertEqual(loss, loss2) self.assertEqual(acc, acc2)
def test_eval(self): x_dev = np.random.randint(0, VOCABULARY_SIZE, [BATCH_SIZE * 5, SEQUENCE_LENGTH]) y_dev = np.eye(2)[np.random.randint(0, 1, [BATCH_SIZE * 5])] def make_eval_iter(): data_iter = tfmodels.data.utils.batch_iter(list(zip(x_dev, y_dev)), BATCH_SIZE, 1, fill=True, seed=42) return map(lambda batch: zip(*batch), data_iter) with self.graph.as_default(), self.sess.as_default(): cnn = self._build_classifier() ev = CNNClassifierEvaluator(cnn) self.sess.run(tf.initialize_all_variables()) loss, acc, current_step = ev.eval(make_eval_iter()) loss2, acc2, current_step = ev.eval(make_eval_iter()) self.assertGreater(loss, 0) self.assertGreater(acc, 0) self.assertEqual(loss, loss2) self.assertEqual(acc, acc2)
print("Restoring checkpoint from {}".format(latest_checkpoint)) saver.restore(sess, latest_checkpoint) # Initialize variables sess.run(tf.initialize_all_variables()) # Training loop for train_loss, train_acc, current_step, time_delta in trainer.train_loop( train_data_iter): examples_per_second = FLAGS.batch_size / time_delta print("{}: step {}, loss {:g}, acc {:g} ({:g} examples/sec)".format( datetime.now().isoformat(), current_step, train_loss, train_acc, examples_per_second)) # Evaluate dev set if current_step % FLAGS.evaluate_every == 0: dev_iter = tfmodels.data.utils.xy_iter(x_dev, y_dev, FLAGS.batch_size, 1) mean_loss, acc, _ = evaluator.eval(dev_iter, global_step=trainer.global_step) print( "{}: Step {}, Dev Accuracy: {:g}, Dev Mean Loss: {:g}".format( datetime.now().isoformat(), current_step, acc, mean_loss)) # Checkpoint Model if current_step % FLAGS.checkpoint_every == 0: save_path = saver.save(sess, checkpoint_file, global_step=trainer.global_step) print("Saved {}".format(save_path))
checkpoint_file = os.path.join(checkpoint_dir, "model.ckpt") saver = tf.train.Saver(keep_checkpoint_every_n_hours=2) # Initialization, optinally load from checkpoint sess.run(tf.initialize_all_variables()) latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) if latest_checkpoint: print("Restoring checkpoint from {}".format(latest_checkpoint)) saver.restore(sess, latest_checkpoint) # Initialize variables sess.run(tf.initialize_all_variables()) # Training loop for train_loss, train_acc, current_step, time_delta in trainer.train_loop(train_data_iter): examples_per_second = FLAGS.batch_size/time_delta print("{}: step {}, loss {:g}, acc {:g} ({:g} examples/sec)".format( datetime.now().isoformat(), current_step, train_loss, train_acc, examples_per_second)) # Evaluate dev set if current_step % FLAGS.evaluate_every == 0: dev_iter = tfmodels.data.utils.xy_iter(x_dev, y_dev, FLAGS.batch_size, 1) mean_loss, acc, _ = evaluator.eval(dev_iter, global_step=trainer.global_step) print("{}: Step {}, Dev Accuracy: {:g}, Dev Mean Loss: {:g}".format( datetime.now().isoformat(), current_step, acc, mean_loss)) # Checkpoint Model if current_step % FLAGS.checkpoint_every == 0: save_path = saver.save(sess, checkpoint_file, global_step=trainer.global_step) print("Saved {}".format(save_path))