def test_trainer(self):
     num_batches = 5
     x_train = np.random.randint(0, VOCABULARY_SIZE, [BATCH_SIZE * num_batches, SEQUENCE_LENGTH])
     y_train = np.eye(2)[np.ones(BATCH_SIZE * num_batches, dtype=np.int32)]
     train_data_iter = tfmodels.data.utils.batch_iter(
         list(zip(x_train, y_train)), BATCH_SIZE, 1, fill=True, seed=42)
     train_data_iter = map(lambda batch: zip(*batch), train_data_iter)
     losses = []
     with self.graph.as_default(), self.sess.as_default():
         cnn = self._build_classifier()
         t = CNNClassifierTrainer(cnn)
         self.sess.run(tf.initialize_all_variables())
         train_iter = t.train_loop(train_data_iter)
         for loss, acc, current_step, time_delta in train_iter:
             losses = losses + [loss]
     self.assertEqual(len(losses), 5)
     self.assertEqual(current_step, 5)
예제 #2
0
 def test_trainer(self):
     num_batches = 5
     x_train = np.random.randint(
         0, VOCABULARY_SIZE, [BATCH_SIZE * num_batches, SEQUENCE_LENGTH])
     y_train = np.eye(2)[np.ones(BATCH_SIZE * num_batches, dtype=np.int32)]
     train_data_iter = tfmodels.data.utils.batch_iter(list(
         zip(x_train, y_train)),
                                                      BATCH_SIZE,
                                                      1,
                                                      fill=True,
                                                      seed=42)
     train_data_iter = map(lambda batch: zip(*batch), train_data_iter)
     losses = []
     with self.graph.as_default(), self.sess.as_default():
         cnn = self._build_classifier()
         t = CNNClassifierTrainer(cnn)
         self.sess.run(tf.initialize_all_variables())
         train_iter = t.train_loop(train_data_iter)
         for loss, acc, current_step, time_delta in train_iter:
             losses = losses + [loss]
     self.assertEqual(len(losses), 5)
     self.assertEqual(current_step, 5)
예제 #3
0
        os.makedirs(checkpoint_dir)
    checkpoint_file = os.path.join(checkpoint_dir, "model.ckpt")
    saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)

    # Initialization, optinally load from checkpoint
    sess.run(tf.initialize_all_variables())
    latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
    if latest_checkpoint:
        print("Restoring checkpoint from {}".format(latest_checkpoint))
        saver.restore(sess, latest_checkpoint)

    # Initialize variables
    sess.run(tf.initialize_all_variables())

    # Training loop
    for train_loss, train_acc, current_step, time_delta in trainer.train_loop(
            train_data_iter):
        examples_per_second = FLAGS.batch_size / time_delta
        print("{}: step {}, loss {:g}, acc {:g} ({:g} examples/sec)".format(
            datetime.now().isoformat(), current_step, train_loss, train_acc,
            examples_per_second))

        # Evaluate dev set
        if current_step % FLAGS.evaluate_every == 0:
            dev_iter = tfmodels.data.utils.xy_iter(x_dev, y_dev,
                                                   FLAGS.batch_size, 1)
            mean_loss, acc, _ = evaluator.eval(dev_iter,
                                               global_step=trainer.global_step)
            print(
                "{}: Step {}, Dev Accuracy: {:g}, Dev Mean Loss: {:g}".format(
                    datetime.now().isoformat(), current_step, acc, mean_loss))
        os.makedirs(checkpoint_dir)
    checkpoint_file = os.path.join(checkpoint_dir, "model.ckpt")
    saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)

    # Initialization, optinally load from checkpoint
    sess.run(tf.initialize_all_variables())
    latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
    if latest_checkpoint:
        print("Restoring checkpoint from {}".format(latest_checkpoint))
        saver.restore(sess, latest_checkpoint)

    # Initialize variables
    sess.run(tf.initialize_all_variables())

    # Training loop
    for train_loss, train_acc, current_step, time_delta in trainer.train_loop(train_data_iter):
        examples_per_second = FLAGS.batch_size/time_delta
        print("{}: step {}, loss {:g}, acc {:g} ({:g} examples/sec)".format(
                datetime.now().isoformat(), current_step, train_loss, train_acc, examples_per_second))

        # Evaluate dev set
        if current_step % FLAGS.evaluate_every == 0:
            dev_iter = tfmodels.data.utils.xy_iter(x_dev, y_dev, FLAGS.batch_size, 1)
            mean_loss, acc, _ = evaluator.eval(dev_iter, global_step=trainer.global_step)
            print("{}: Step {}, Dev Accuracy: {:g}, Dev Mean Loss: {:g}".format(
                datetime.now().isoformat(), current_step, acc, mean_loss))

        # Checkpoint Model
        if current_step % FLAGS.checkpoint_every == 0:
            save_path = saver.save(sess, checkpoint_file, global_step=trainer.global_step)
            print("Saved {}".format(save_path))