Exemple #1
0
  def testTrainWithTrace(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op,
          logdir,
          number_of_steps=300,
          log_every_n_steps=10,
          trace_every_n_steps=100)
    self.assertIsNotNone(loss)
    for trace_step in [1, 101, 201]:
      trace_filename = 'tf_trace-%d.json' % trace_step
      self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
    def testTrainWithLocalVariable(self):
        logdir = os.path.join(self.get_temp_dir(), 'train_with_local_variable')

        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            local_multiplier = variables_lib.local_variable(1.0)

            tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = training.create_train_op(total_loss, optimizer)

            loss = training.train(
                train_op,
                logdir,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Exemple #3
0
  def testNoneGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(
          total_loss, optimizer, global_step=None)

      global_step = variables_lib.get_or_create_global_step()

      with session_lib.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib2.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step, 0)
Exemple #4
0
  def testResumeTrainAchievesRoughlyTheSameLoss(self):
    number_of_steps = [300, 1, 5]
    logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')

    for i in range(len(number_of_steps)):
      with ops.Graph().as_default():
        random_seed.set_random_seed(i)
        tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        tf_predictions = logistic_classifier(tf_inputs)
        loss_ops.log_loss(tf_predictions, tf_labels)
        total_loss = loss_ops.get_total_loss()

        optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

        train_op = training.create_train_op(total_loss, optimizer)

        saver = saver_lib.Saver()

        loss = training.train(
            train_op,
            logdir,
            hooks=[
                basic_session_run_hooks.StopAtStepHook(
                    num_steps=number_of_steps[i]),
                basic_session_run_hooks.CheckpointSaverHook(
                    logdir, save_steps=50, saver=saver),
            ])
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
Exemple #5
0
  def testResumeTrainAchievesRoughlyTheSameLoss(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    number_of_steps = [300, 301, 305]

    for i in range(len(number_of_steps)):
      with ops.Graph().as_default():
        random_seed.set_random_seed(i)
        tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        tf_predictions = LogisticClassifier(tf_inputs)
        loss_ops.log_loss(tf_predictions, tf_labels)
        total_loss = loss_ops.get_total_loss()

        optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

        train_op = learning.create_train_op(total_loss, optimizer)

        loss = learning.train(
            train_op,
            logdir,
            number_of_steps=number_of_steps[i],
            log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
    def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
        tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        tf_predictions = logistic_classifier(tf_inputs)
        loss_ops.log_loss(tf_predictions, tf_labels)
        total_loss = loss_ops.get_total_loss()

        optimizer = gradient_descent.GradientDescentOptimizer(
            learning_rate=learning_rate)

        def transform_grads_fn(grads):
            if gradient_multiplier != 1.0:
                variables = variables_lib2.trainable_variables()
                gradient_multipliers = {
                    var: gradient_multiplier
                    for var in variables
                }

                with ops.name_scope('multiply_grads'):
                    return training.multiply_gradients(grads,
                                                       gradient_multipliers)
            else:
                return grads

        return training.create_train_op(total_loss,
                                        optimizer,
                                        transform_grads_fn=transform_grads_fn)
Exemple #7
0
    def testTrainWithEpochLimit(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)
            tf_inputs_limited = input_lib.limit_epochs(tf_inputs,
                                                       num_epochs=300)
            tf_labels_limited = input_lib.limit_epochs(tf_labels,
                                                       num_epochs=300)

            tf_predictions = LogisticClassifier(tf_inputs_limited)
            loss_ops.log_loss(tf_predictions, tf_labels_limited)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op, logdir, log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
        self.assertTrue(
            os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
        self.assertTrue(
            os.path.isfile(
                '{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
Exemple #8
0
    def testTrainWithSessionConfig(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            session_config = config_pb2.ConfigProto(allow_soft_placement=True)
            loss = learning.train(train_op,
                                  None,
                                  number_of_steps=300,
                                  log_every_n_steps=10,
                                  session_config=session_config)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
Exemple #9
0
  def testTrainWithSessionWrapper(self):
    """Test that slim.learning.train can take `session_wrapper` args.

    One of the applications of `session_wrapper` is the wrappers of TensorFlow
    Debugger (tfdbg), which intercept methods calls to `tf.Session` (e.g., run)
    to achieve debugging. `DumpingDebugWrapperSession` is used here for testing
    purpose.
    """
    dump_root = tempfile.mkdtemp()

    def dumping_wrapper(sess):  # pylint: disable=invalid-name
      return dumping_wrapper_lib.DumpingDebugWrapperSession(sess, dump_root)

    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, None, number_of_steps=1, session_wrapper=dumping_wrapper)
    self.assertIsNotNone(loss)

    run_root = glob.glob(os.path.join(dump_root, 'run_*'))[-1]
    dump = debug_data.DebugDumpDir(run_root)
    self.assertAllEqual(0,
                        dump.get_tensors('global_step', 0, 'DebugIdentity')[0])
Exemple #10
0
    def testTrainWithLocalVariable(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            local_multiplier = variables_lib2.local_variable(1.0)

            tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Exemple #11
0
    def testUseGlobalStep(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            global_step = variables_lib2.get_or_create_global_step()

            with session.Session() as sess:
                # Initialize all variables
                sess.run(variables_lib.global_variables_initializer())

                for _ in range(10):
                    sess.run([train_op])
                global_step = global_step.eval()
                # After 10 updates global_step should be 10.
                self.assertAllClose(global_step, 10)
Exemple #12
0
  def ModelLoss(self):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = logistic_classifier(tf_inputs)
    loss_ops.log_loss(tf_predictions, tf_labels)
    return loss_ops.get_total_loss()
Exemple #13
0
    def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        g = ops.Graph()
        with g.as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertLess(loss, .1)
Exemple #14
0
  def testEmptyUpdateOps(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer, update_ops=[])

      moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
      moving_variance = variables_lib.get_variables_by_name('moving_variance')[
          0]

      with session_lib.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib2.global_variables_initializer())
        mean, variance = sess.run([moving_mean, moving_variance])
        # After initialization moving_mean == 0 and moving_variance == 1.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)

        for _ in range(10):
          sess.run([train_op])
        mean = moving_mean.eval()
        variance = moving_variance.eval()

        # Since we skip update_ops the moving_vars are not updated.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)
Exemple #15
0
  def testTrainOpInCollection(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer)

      # Make sure the training op was recorded in the proper collection
      self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
Exemple #16
0
  def _train_model(self, checkpoint_dir, num_steps):
    """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = logistic_classifier(tf_inputs)
      loss = loss_ops.log_loss(tf_predictions, tf_labels)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer)

      loss = training.train(
          train_op,
          checkpoint_dir,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
Exemple #17
0
    def _train_model(self, checkpoint_dir, num_steps):
        """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = logistic_classifier(tf_inputs)
            loss = loss_ops.log_loss(tf_predictions, tf_labels)

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(loss, optimizer)

            loss = training.train(
                train_op,
                checkpoint_dir,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
Exemple #18
0
  def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10)
Exemple #19
0
  def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        learning.train(train_op, logdir, init_op=None, number_of_steps=300)
Exemple #20
0
  def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_predictions, tf_labels)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers)
Exemple #21
0
  def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    g = ops.Graph()
    with g.as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer)

      loss = training.train(
          train_op,
          self._logdir,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
      self.assertLess(loss, .1)
Exemple #22
0
    def testUseUpdateOps(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            expected_mean = np.mean(self._inputs, axis=(0))
            expected_var = np.var(self._inputs, axis=(0))
            expected_var = self._addBesselsCorrection(16, expected_var)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            moving_mean = variables_lib2.get_variables_by_name(
                'moving_mean')[0]
            moving_variance = variables_lib2.get_variables_by_name(
                'moving_variance')[0]

            with session.Session() as sess:
                # Initialize all variables
                sess.run(variables_lib.global_variables_initializer())
                mean, variance = sess.run([moving_mean, moving_variance])
                # After initialization moving_mean == 0 and moving_variance == 1.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)

                for _ in range(10):
                    sess.run([train_op])
                mean = moving_mean.eval()
                variance = moving_variance.eval()
                # After 10 updates with decay 0.1 moving_mean == expected_mean and
                # moving_variance == expected_var.
                self.assertAllClose(mean, expected_mean)
                self.assertAllClose(variance, expected_var)