Esempio n. 1
0
  def test_queues(self):
    qr = FakeQueueRunner()
    tf.train.add_queue_runner(qr)
    runner = local_trainer.Runner()
    with tf.Session():
      optimizer = tf.train.GradientDescentOptimizer(0.5)
      train_op = pt.apply_optimizer(optimizer,
                                    losses=[self.softmax_result.loss])

      runner.train_model(train_op,
                         self.softmax_result.loss,
                         100,
                         (self.input, self.target),
                         self.xor_data,
                         print_every=2)
    with tf.Session():
      with self.assertRaisesRegexp(ValueError, r'.*\bstop_queues\b.*'):
        runner.train_model(train_op,
                           self.softmax_result.loss,
                           100,
                           (self.input, self.target),
                           self.xor_data,
                           print_every=2)

    runner.stop_queues()
    qr.assert_worked(self)
Esempio n. 2
0
  def test_evaluate_without_initialize_error(self):
    with tf.Graph().as_default():
      runner = local_trainer.Runner()
      tf.Variable(1)  # Put a variable in the graph.

      with runner.session(), self.assertRaises(ValueError):
        runner.evaluate_model(
            self.softmax_result, 1, (self.input, self.target), self.xor_data)
Esempio n. 3
0
  def test_evaluate_repeatedly_one_time(self):
    f = os.path.join(self.tmp_file, 'checkpoint')
    runner = local_trainer.Runner(save_path=f)
    self.restore_helper(runner)
    local_variable = tf.Variable(22, collections=[tf.GraphKeys.LOCAL_VARIABLES])
    accuracy = local_variable.assign_add(1)

    answer = runner.evaluate_repeatedly(accuracy, 20, evaluation_times=1)
    self.assertEqual([42], answer)
Esempio n. 4
0
    def test_run(self):
        runner = local_trainer.Runner()
        with tf.Session():
            optimizer = tf.train.GradientDescentOptimizer(0.5)
            train_op = pt.apply_optimizer(optimizer,
                                          losses=[self.softmax_result.loss])

            runner.train_model(train_op,
                               self.softmax_result.loss,
                               10, (self.input, self.target),
                               self.xor_data,
                               print_every=2)
Esempio n. 5
0
  def test_manual_save_restore(self):
    runner = local_trainer.Runner()
    f = os.path.join(self.tmp_file, 'manual.chkpt')

    v = tf.Variable(tf.random_normal(shape=[100], dtype=tf.float32))

    # Save it.
    with runner.session() as sess:
      runner.prepare_model(sess)  # Create variables
      value = v.eval()  # Grab the variable
      runner.saver.save(sess, f)

    with runner.session() as sess:
      # Restore the model
      runner.saver.restore(sess, f)
      new_value = v.eval()
    numpy.testing.assert_array_equal(value, new_value)
Esempio n. 6
0
  def test_checkpoint(self):
    f = os.path.join(self.tmp_file, 'checkpoint')
    runner = local_trainer.Runner(save_path=f)
    with tf.Session():
      optimizer = tf.train.GradientDescentOptimizer(0.1)
      train_op = pt.apply_optimizer(optimizer,
                                    losses=[self.softmax_result.loss])

      runner.train_model(train_op,
                         self.softmax_result.loss,
                         10,
                         (self.input, self.target),
                         self.xor_data,
                         print_every=2)
    assert runner._saver.last_checkpoints, 'Expected checkpoints.'
    for x in runner._saver.last_checkpoints:
      self.assertTrue(os.path.isfile(x), 'Promised file not saved: %s' % x)
      self.assertTrue(x.startswith(f), 'Name not as expected: %s' % x)
Esempio n. 7
0
  def test_eval(self):
    f = os.path.join(self.tmp_file, 'checkpoint')
    runner = local_trainer.Runner(save_path=f)
    with tf.Session():
      classification_acuracy = self.softmax_result.softmax.evaluate_classifier(
          self.target, phase=pt.Phase.test)

      optimizer = tf.train.GradientDescentOptimizer(0.2)
      train_op = pt.apply_optimizer(optimizer,
                                    losses=[self.softmax_result.loss])

      runner.train_model(train_op,
                         self.softmax_result.loss,
                         100,
                         (self.input, self.target),
                         self.xor_data,
                         print_every=50)
      self.assertTrue(runner._last_init)
      save_paths = list(runner._saver.last_checkpoints)

      # The accuracy should be 50% right now since model is consistently
      # generated.
      accuracy = runner.evaluate_model(classification_acuracy,
                                       1,
                                       (self.input, self.target),
                                       self.xor_data)
      self.assertEquals(runner._saver.last_checkpoints, save_paths,
                        'No additional paths should have been saved.')
      self.assertFalse(runner._last_init)
      self.assertEqual(accuracy, 0.5)

      # Train the model to 100% accuracy.
      runner.train_model(train_op,
                         self.softmax_result.loss,
                         2000,
                         (self.input, self.target),
                         self.xor_data,
                         print_every=1000)
      accuracy = runner.evaluate_model(classification_acuracy, 1,
                                       (self.input, self.target), self.xor_data)
      self.assertFalse(runner._last_init)

      # Make sure that the previous computation didn't impact this eval.
      self.assertEqual(accuracy, 1.0)
Esempio n. 8
0
 def test_not_restored(self):
   f = os.path.join(self.tmp_file, 'checkpoint')
   runner = local_trainer.Runner(save_path=f, restore=False)
   with self.assertRaises(tf.errors.FailedPreconditionError):
     self.restore_helper(runner)
Esempio n. 9
0
 def test_restore(self):
   f = os.path.join(self.tmp_file, 'checkpoint')
   runner = local_trainer.Runner(save_path=f)
   self.restore_helper(runner)
   self.assertTrue(runner._last_restore)