Esempio n. 1
0
 def test_trainer(self):
     num_batches = 5
     x_train = np.random.randint(0, VOCABULARY_SIZE, [BATCH_SIZE * num_batches, SEQUENCE_LENGTH])
     y_train = np.eye(2)[np.ones(BATCH_SIZE * num_batches, dtype=np.int32)]
     train_data_iter = tfmodels.data.utils.batch_iter(list(zip(x_train, y_train)), BATCH_SIZE, 1, fill=True, seed=42)
     train_data_iter = map(lambda batch: zip(*batch), train_data_iter)
     losses = []
     with self.graph.as_default(), self.sess.as_default():
         rnn = self._build_classifier()
         t = RNNClassifierTrainer(rnn)
         self.sess.run(tf.initialize_all_variables())
         train_iter = t.train_loop(train_data_iter)
         for loss, acc, current_step, time_delta in train_iter:
             losses = losses + [loss]
     self.assertEqual(len(losses), 5)
     self.assertEqual(current_step, 5)
Esempio n. 2
0
 def test_trainer(self):
     num_batches = 5
     x_train = np.random.randint(
         0, VOCABULARY_SIZE, [BATCH_SIZE * num_batches, SEQUENCE_LENGTH])
     y_train = np.eye(2)[np.ones(BATCH_SIZE * num_batches, dtype=np.int32)]
     train_data_iter = tfmodels.data.utils.batch_iter(list(
         zip(x_train, y_train)),
                                                      BATCH_SIZE,
                                                      1,
                                                      fill=True,
                                                      seed=42)
     train_data_iter = map(lambda batch: zip(*batch), train_data_iter)
     losses = []
     with self.graph.as_default(), self.sess.as_default():
         rnn = self._build_classifier()
         t = RNNClassifierTrainer(rnn)
         self.sess.run(tf.initialize_all_variables())
         train_iter = t.train_loop(train_data_iter)
         for loss, acc, current_step, time_delta in train_iter:
             losses = losses + [loss]
     self.assertEqual(len(losses), 5)
     self.assertEqual(current_step, 5)
Esempio n. 3
0
    }
    model_params.update(FLAGS.__flags)
    model = RNNClassifier.from_dict(model_params)
    model.print_params()
    x = tf.placeholder(tf.int32, [FLAGS.batch_size, SEQUENCE_LENGTH])
    y = tf.placeholder(tf.float32, [FLAGS.batch_size, 2])
    model.build_graph(x, y)

    # Directory for training and dev summaries
    timestamp = str(int(time.time()))
    rundir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
    train_dir = os.path.join(rundir, "train")
    dev_dir = os.path.join(rundir, "dev")

    # Build the Trainer/Evaluator
    trainer = RNNClassifierTrainer(model, train_summary_dir=train_dir)
    evaluator = RNNClassifierEvaluator(model, summary_dir=dev_dir)

    # Saving/Checkpointing
    checkpoint_dir = os.path.join(rundir, "checkpoints")
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    checkpoint_file = os.path.join(checkpoint_dir, "model.ckpt")
    saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)

    # Initialization, optinally load from checkpoint
    sess.run(tf.initialize_all_variables())
    latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
    if latest_checkpoint:
        print("Restoring checkpoint from {}".format(latest_checkpoint))
        saver.restore(sess, latest_checkpoint)
    model_params = {"sequence_length": SEQUENCE_LENGTH, "vocabulary_size": VOCABULARY_SIZE, "num_classes": 2}
    model_params.update(FLAGS.__flags)
    model = RNNClassifier.from_dict(model_params)
    model.print_params()
    x = tf.placeholder(tf.int32, [FLAGS.batch_size, SEQUENCE_LENGTH])
    y = tf.placeholder(tf.float32, [FLAGS.batch_size, 2])
    model.build_graph(x, y)

    # Directory for training and dev summaries
    timestamp = str(int(time.time()))
    rundir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
    train_dir = os.path.join(rundir, "train")
    dev_dir = os.path.join(rundir, "dev")

    # Build the Trainer/Evaluator
    trainer = RNNClassifierTrainer(model, train_summary_dir=train_dir)
    evaluator = RNNClassifierEvaluator(model, summary_dir=dev_dir)

    # Saving/Checkpointing
    checkpoint_dir = os.path.join(rundir, "checkpoints")
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    checkpoint_file = os.path.join(checkpoint_dir, "model.ckpt")
    saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)

    # Initialization, optinally load from checkpoint
    sess.run(tf.initialize_all_variables())
    latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
    if latest_checkpoint:
        print("Restoring checkpoint from {}".format(latest_checkpoint))
        saver.restore(sess, latest_checkpoint)