Exemplo n.º 1
0
 def test_dequeue(self):
   p = plan.TrainPlan()
   p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
   p.is_chief_trainer = True
   p.batch_size = 3
   p.batches_per_epoch = 2
   p.queue_capacity = 12
   p.num_dequeuers = 1
   p.ps_tasks = 1
   q = p._create_queue(0)
   p._setup_dequeuing([q])
   input_batch = list(p.compiler.build_loom_inputs([7])) * 3
   q_enqueue = q.enqueue_many([input_batch * 4])
   p.losses['foo'], = p.compiler.output_tensors
   p.train_op = tf.no_op()
   p.finalize_stats()
   p.logdir = self.get_temp_dir()
   p.epochs = 2
   p.print_file = six.StringIO()
   init_op = tf.global_variables_initializer()
   sv = p.create_supervisor()
   with self.test_session() as sess:
     sess.run(init_op)
     sess.run(q_enqueue)
     p.run(sv, sess)
   expected = '\n'.join(['running train',
                         'train_size: 6',
                         'epoch:    1 train[loss: 7.000e+00]',
                         'epoch:    2 train[loss: 7.000e+00]',
                         'final model saved in file: %s' % p.logdir])
   log_str = p.print_file.getvalue()
   self.assertIn(expected, log_str)
Exemplo n.º 2
0
 def test_enqueue(self):
   p = plan.TrainPlan()
   p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
   p.examples = [7] * 8  # two items should be ignored (8 % 3 == 2)
   p.is_chief_trainer = True
   p.batch_size = 3
   p.queue_capacity = 12
   p.num_dequeuers = 1
   p.ps_tasks = 1
   q = p._create_queue(0)
   p._setup_enqueuing([q])
   q_size = q.size()
   q_dequeue = q.dequeue_many(12)
   p.finalize_stats()
   p.logdir = self.get_temp_dir()
   p.epochs = 2
   p.print_file = six.StringIO()
   init_op = tf.global_variables_initializer()
   sv = p.create_supervisor()
   with self.test_session() as sess:
     sess.run(init_op)
     p.run(sv, sess)
     expected = '\n'.join(['running train',
                           'train_size: 6',
                           'epoch:    1 train[loss: 0.000e+00]',
                           'epoch:    2 train[loss: 0.000e+00]',
                           'final model saved in file: %s' % p.logdir])
     log_str = p.print_file.getvalue()
     self.assertIn(expected, log_str)
     self.assertEqual(12, sess.run(q_size))
     loom_inputs = sess.run(q_dequeue)
     self.assertEqual((12,), loom_inputs.shape)
     results = sess.run(p.compiler.output_tensors[0],
                        {p.compiler.loom_input_tensor: loom_inputs})
     self.assertEqual(results.tolist(), [7] * 12)
Exemplo n.º 3
0
 def test_assert_runnable(self):
   p = plan.TrainPlan()
   self.assertRaisesWithLiteralMatch(
       ValueError, 'at least one loss is required', p.assert_runnable)
   p.losses['foo'] = tf.constant(42.0)
   self.assertRaisesWithLiteralMatch(
       ValueError, 'compiler is required', p.assert_runnable)
   p.compiler = block_compiler.Compiler.create(blocks.Scalar())
   self.assertRaisesWithLiteralMatch(
       RuntimeError, 'finalize_stats() has not been called', p.assert_runnable)
   p.finalize_stats()
   self.assertRaisesWithLiteralMatch(
       ValueError, 'logdir is required', p.assert_runnable)
   p.logdir = '/tmp/'
   self.assertRaisesWithLiteralMatch(
       ValueError, 'train_op is required', p.assert_runnable)
   p.train_op = tf.no_op()
   self.assertRaisesWithLiteralMatch(
       ValueError, 'batch_size is required', p.assert_runnable)
   p.batch_size = 10
   self.assertRaisesWithLiteralMatch(
       ValueError, 'either examples or batches_per_epoch is required',
       p.assert_runnable)
   p.examples = xrange(2)
   p.assert_runnable()
   p.examples = None
   self.assertRaises(ValueError, p.assert_runnable)
   p.batches_per_epoch = 42
   p.assert_runnable()
Exemplo n.º 4
0
 def create_plan(self, loom_input_tensor):
   p = plan.TrainPlan()
   foo = tf.get_variable('foo', [], tf.float32, tf.constant_initializer(12))
   p.compiler = block_compiler.Compiler.create(
       blocks.Scalar() >> blocks.Function(lambda x: x * foo),
       loom_input_tensor=loom_input_tensor)
   p.losses['foo'] = p.compiler.output_tensors[0]
   p.finalize_stats()
   p.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(
       p.loss_total, global_step=p.global_step)
   p.logdir = self.get_temp_dir()
   p.dev_examples = [2]
   p.is_chief_trainer = True
   p.batch_size = 2
   p.epochs = 3
   p.print_file = six.StringIO()
   return p
Exemplo n.º 5
0
    def test_init_loom(self):
        p = plan.TrainPlan()
        p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
        p.batch_size = 3
        p.task = 13
        p.num_dequeuers = 7

        self.assertRaisesWithLiteralMatch(ValueError,
                                          'must have at least one PS task; 0',
                                          p.init_loom)

        p.ps_tasks = 5
        self.assertRaisesWithLiteralMatch(
            ValueError, 'worker_replicas must be at least num_queues + '
            'num_dequeuers; 0 vs. 5 + 7 = 12', p.init_loom)
        p.worker_replicas = 14

        # Would be best to actually create a queue and inspect it, but
        # tf.QueueBase doesn't currently expose these properties.
        self.assertEqual(p._create_queue(3, ctor=dict)['capacity'], 12)
        p.queue_capacity = 42
        q_dict = p._create_queue(3, ctor=dict)
        self.assertEqual(q_dict['capacity'], 42)
        self.assertEqual(q_dict['shared_name'], 'tensorflow_fold_plan_queue3')

        self.assertEqual(p.init_loom(), (True, False))

        p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
        p.task = 3
        self.assertEqual(p.init_loom(), (False, True))

        p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
        p.num_dequeuers = 0
        self.assertRaisesWithLiteralMatch(
            ValueError, 'cannot specify queue_capacity without also '
            'specifying num_dequeuers', p.init_loom)

        p.compiler = block_compiler.Compiler().compile(blocks.Scalar())
        p.queue_capacity = 0
        self.assertEqual(p.init_loom(), (True, True))
Exemplo n.º 6
0
 def test_finalize_stats_raises(self):
   self.assertRaisesWithLiteralMatch(
       ValueError, 'at least one loss is required',
       plan.TrainPlan().finalize_stats)