Example #1
0
    def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
        logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                               'tmp_logs1')

        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights = variables_lib2.get_variables_by_name('weights')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=weights)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=200,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Next, train the biases of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            biases = variables_lib2.get_variables_by_name('biases')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=biases)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Finally, train both weights and bias to get lower loss.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=400,
                                  log_every_n_steps=10)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Example #2
0
    def testTrainWithInitFromCheckpoint(self):
        logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                               'tmp_logs1')
        logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                               'tmp_logs2')

        # First, train the model one step (make sure the error is high).
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op()
            loss = learning.train(train_op, logdir1, number_of_steps=1)
            self.assertGreater(loss, .5)

        # Next, train the model to convergence.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            train_op = self.create_train_op()
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .02)

        # Finally, advance the model a single step and validate that the loss is
        # still low.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            train_op = self.create_train_op()

            model_variables = variables_lib.global_variables()
            model_path = os.path.join(logdir1, 'model.ckpt-300')

            init_op = variables_lib.global_variables_initializer()
            op, init_feed_dict = variables_lib2.assign_from_checkpoint(
                model_path, model_variables)

            def InitAssignFn(sess):
                sess.run(op, init_feed_dict)

            loss = learning.train(train_op,
                                  logdir2,
                                  number_of_steps=1,
                                  init_op=init_op,
                                  init_fn=InitAssignFn)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .02)
Example #3
0
    def testTrainWithLocalVariable(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            local_multiplier = variables_lib2.local_variable(1.0)

            tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Example #4
0
    def testTrainWithSessionConfig(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            session_config = config_pb2.ConfigProto(allow_soft_placement=True)
            loss = learning.train(train_op,
                                  None,
                                  number_of_steps=300,
                                  log_every_n_steps=10,
                                  session_config=session_config)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
Example #5
0
  def testTrainWithSessionWrapper(self):
    """Test that slim.learning.train can take `session_wrapper` args.

    One of the applications of `session_wrapper` is the wrappers of TensorFlow
    Debugger (tfdbg), which intercept methods calls to `tf.Session` (e.g., run)
    to achieve debugging. `DumpingDebugWrapperSession` is used here for testing
    purpose.
    """
    dump_root = tempfile.mkdtemp()

    def dumping_wrapper(sess):  # pylint: disable=invalid-name
      return dumping_wrapper_lib.DumpingDebugWrapperSession(sess, dump_root)

    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, None, number_of_steps=1, session_wrapper=dumping_wrapper)
    self.assertIsNotNone(loss)

    run_root = glob.glob(os.path.join(dump_root, 'run_*'))[-1]
    dump = debug_data.DebugDumpDir(run_root)
    self.assertAllEqual(0,
                        dump.get_tensors('global_step', 0, 'DebugIdentity')[0])
Example #6
0
    def testTrainWithEpochLimit(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)
            tf_inputs_limited = input_lib.limit_epochs(tf_inputs,
                                                       num_epochs=300)
            tf_labels_limited = input_lib.limit_epochs(tf_labels,
                                                       num_epochs=300)

            tf_predictions = LogisticClassifier(tf_inputs_limited)
            loss_ops.log_loss(tf_predictions, tf_labels_limited)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op, logdir, log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
        self.assertTrue(
            os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
        self.assertTrue(
            os.path.isfile(
                '{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
Example #7
0
  def testResumeTrainAchievesRoughlyTheSameLoss(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    number_of_steps = [300, 301, 305]

    for i in range(len(number_of_steps)):
      with ops.Graph().as_default():
        random_seed.set_random_seed(i)
        tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        tf_predictions = LogisticClassifier(tf_inputs)
        loss_ops.log_loss(tf_predictions, tf_labels)
        total_loss = loss_ops.get_total_loss()

        optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

        train_op = learning.create_train_op(total_loss, optimizer)

        loss = learning.train(
            train_op,
            logdir,
            number_of_steps=number_of_steps[i],
            log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
Example #8
0
  def testTrainWithTrace(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op,
          logdir,
          number_of_steps=300,
          log_every_n_steps=10,
          trace_every_n_steps=100)
    self.assertIsNotNone(loss)
    for trace_step in [1, 101, 201]:
      trace_filename = 'tf_trace-%d.json' % trace_step
      self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
Example #9
0
    def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        g = ops.Graph()
        with g.as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertLess(loss, .1)
Example #10
0
  def testTrainWithInitFromCheckpoint(self):
    logdir1 = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
    logdir2 = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')

    # First, train the model one step (make sure the error is high).
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      train_op = self.create_train_op()
      loss = learning.train(train_op, logdir1, number_of_steps=1)
      self.assertGreater(loss, .5)

    # Next, train the model to convergence.
    with ops.Graph().as_default():
      random_seed.set_random_seed(1)
      train_op = self.create_train_op()
      loss = learning.train(
          train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .02)

    # Finally, advance the model a single step and validate that the loss is
    # still low.
    with ops.Graph().as_default():
      random_seed.set_random_seed(2)
      train_op = self.create_train_op()

      model_variables = variables_lib.global_variables()
      model_path = os.path.join(logdir1, 'model.ckpt-300')

      init_op = variables_lib.global_variables_initializer()
      op, init_feed_dict = variables_lib2.assign_from_checkpoint(
          model_path, model_variables)

      def InitAssignFn(sess):
        sess.run(op, init_feed_dict)

      loss = learning.train(
          train_op,
          logdir2,
          number_of_steps=1,
          init_op=init_op,
          init_fn=InitAssignFn)

      self.assertIsNotNone(loss)
      self.assertLess(loss, .02)
Example #11
0
  def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
    logdir1 = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')

    # First, train only the weights of the model.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      weights = variables_lib2.get_variables_by_name('weights')

      train_op = learning.create_train_op(
          total_loss, optimizer, variables_to_train=weights)

      loss = learning.train(
          train_op, logdir1, number_of_steps=200, log_every_n_steps=10)
      self.assertGreater(loss, .015)
      self.assertLess(loss, .05)

    # Next, train the biases of the model.
    with ops.Graph().as_default():
      random_seed.set_random_seed(1)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      biases = variables_lib2.get_variables_by_name('biases')

      train_op = learning.create_train_op(
          total_loss, optimizer, variables_to_train=biases)

      loss = learning.train(
          train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
      self.assertGreater(loss, .015)
      self.assertLess(loss, .05)

    # Finally, train both weights and bias to get lower loss.
    with ops.Graph().as_default():
      random_seed.set_random_seed(2)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      loss = learning.train(
          train_op, logdir1, number_of_steps=400, log_every_n_steps=10)

      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
Example #12
0
  def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10)
Example #13
0
  def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        learning.train(train_op, logdir, init_op=None, number_of_steps=300)
Example #14
0
    def testTrainWithAlteredGradients(self):
        # Use the same learning rate but different gradient multipliers
        # to train two models. Model with equivalently larger learning
        # rate (i.e., learning_rate * gradient_multiplier) has smaller
        # training loss.
        logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                               'tmp_logs1')
        logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                               'tmp_logs2')

        multipliers = [1., 1000.]
        number_of_steps = 10
        losses = []
        learning_rate = 0.001

        # First, train the model with equivalently smaller learning rate.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op(learning_rate=learning_rate,
                                            gradient_multiplier=multipliers[0])
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=number_of_steps)
            losses.append(loss)
            self.assertGreater(loss, .5)

        # Second, train the model with equivalently larger learning rate.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op(learning_rate=learning_rate,
                                            gradient_multiplier=multipliers[1])
            loss = learning.train(train_op,
                                  logdir2,
                                  number_of_steps=number_of_steps)
            losses.append(loss)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .5)

        # The loss of the model trained with larger learning rate should
        # be smaller.
        self.assertGreater(losses[0], losses[1])
Example #15
0
  def testTrainWithAlteredGradients(self):
    # Use the same learning rate but different gradient multipliers
    # to train two models. Model with equivalently larger learning
    # rate (i.e., learning_rate * gradient_multiplier) has smaller
    # training loss.
    logdir1 = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
    logdir2 = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')

    multipliers = [1., 1000.]
    number_of_steps = 10
    losses = []
    learning_rate = 0.001

    # First, train the model with equivalently smaller learning rate.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      train_op = self.create_train_op(
          learning_rate=learning_rate, gradient_multiplier=multipliers[0])
      loss = learning.train(train_op, logdir1, number_of_steps=number_of_steps)
      losses.append(loss)
      self.assertGreater(loss, .5)

    # Second, train the model with equivalently larger learning rate.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      train_op = self.create_train_op(
          learning_rate=learning_rate, gradient_multiplier=multipliers[1])
      loss = learning.train(train_op, logdir2, number_of_steps=number_of_steps)
      losses.append(loss)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .5)

    # The loss of the model trained with larger learning rate should
    # be smaller.
    self.assertGreater(losses[0], losses[1])
Example #16
0
    def test_supervisor_run_gan_model_train_ops_multiple_steps(self):
        step = training_util.create_global_step()
        train_ops = namedtuples.GANTrainOps(
            generator_train_op=constant_op.constant(3.0),
            discriminator_train_op=constant_op.constant(2.0),
            global_step_inc_op=step.assign_add(1))
        train_steps = namedtuples.GANTrainSteps(generator_train_steps=3,
                                                discriminator_train_steps=4)

        final_loss = slim_learning.train(
            train_op=train_ops,
            logdir='',
            global_step=step,
            number_of_steps=1,
            train_step_fn=train.get_sequential_train_steps(train_steps))
        self.assertTrue(np.isscalar(final_loss))
        self.assertEqual(17.0, final_loss)
Example #17
0
  def test_supervisor_run_gan_model_train_ops_multiple_steps(self):
    step = training_util.create_global_step()
    train_ops = namedtuples.GANTrainOps(
        generator_train_op=constant_op.constant(3.0),
        discriminator_train_op=constant_op.constant(2.0),
        global_step_inc_op=step.assign_add(1))
    train_steps = namedtuples.GANTrainSteps(
        generator_train_steps=3, discriminator_train_steps=4)

    final_loss = slim_learning.train(
        train_op=train_ops,
        logdir='',
        global_step=step,
        number_of_steps=1,
        train_step_fn=train.get_sequential_train_steps(train_steps))
    self.assertTrue(np.isscalar(final_loss))
    self.assertEqual(17.0, final_loss)
Example #18
0
  def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
Example #19
0
  def testTrainWithSessionConfig(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      session_config = config_pb2.ConfigProto(allow_soft_placement=True)
      loss = learning.train(
          train_op,
          None,
          number_of_steps=300,
          log_every_n_steps=10,
          session_config=session_config)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
Example #20
0
  def testTrainWithEpochLimit(self):
    logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                          'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
      tf_inputs_limited = input_lib.limit_epochs(tf_inputs, num_epochs=300)
      tf_labels_limited = input_lib.limit_epochs(tf_labels, num_epochs=300)

      tf_predictions = LogisticClassifier(tf_inputs_limited)
      loss_ops.log_loss(tf_predictions, tf_labels_limited)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(train_op, logdir, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
    self.assertTrue(os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
    self.assertTrue(os.path.isfile('{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))