Beispiel #1
0
    def testTrainWithLocalVariable(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            local_multiplier = variables_lib.local_variable(1.0)

            tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
            losses.log_loss(tf_labels, tf_predictions)
            total_loss = losses.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(total_loss, optimizer)

            loss = training.train(
                train_op,
                None,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
                save_summaries_steps=None,
                save_checkpoint_secs=None)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Beispiel #2
0
    def _train_model(self, checkpoint_dir, num_steps):
        """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = logistic_classifier(tf_inputs)
            loss = loss_ops.log_loss(tf_predictions, tf_labels)

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(loss, optimizer)

            loss = training.train(
                train_op,
                checkpoint_dir,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
Beispiel #3
0
    def testTrainWithAlteredGradients(self):
        # Use the same learning rate but different gradient multipliers
        # to train two models. Model with equivalently larger learning
        # rate (i.e., learning_rate * gradient_multiplier) has smaller
        # training loss.
        multipliers = [1., 1000.]
        number_of_steps = 10
        learning_rate = 0.001

        # First, train the model with equivalently smaller learning rate.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op(learning_rate=learning_rate,
                                            gradient_multiplier=multipliers[0])

            loss0 = training.train(train_op,
                                   None,
                                   hooks=[
                                       basic_session_run_hooks.StopAtStepHook(
                                           num_steps=number_of_steps),
                                   ],
                                   save_checkpoint_secs=None,
                                   save_summaries_steps=None)
            self.assertIsNotNone(loss0)
            self.assertGreater(loss0, .5)

        # Second, train the model with equivalently larger learning rate.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op(learning_rate=learning_rate,
                                            gradient_multiplier=multipliers[1])

            loss1 = training.train(train_op,
                                   None,
                                   hooks=[
                                       basic_session_run_hooks.StopAtStepHook(
                                           num_steps=number_of_steps),
                                   ],
                                   save_checkpoint_secs=None,
                                   save_summaries_steps=None)
            self.assertIsNotNone(loss1)
            self.assertLess(loss1, .5)

        # The loss of the model trained with larger learning rate should
        # be smaller.
        self.assertGreater(loss0, loss1)
Beispiel #4
0
    def testResumeTrainAchievesRoughlyTheSameLoss(self):
        number_of_steps = [300, 1, 5]
        logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')

        for i in range(len(number_of_steps)):
            with ops.Graph().as_default():
                random_seed.set_random_seed(i)
                tf_inputs = constant_op.constant(self._inputs,
                                                 dtype=dtypes.float32)
                tf_labels = constant_op.constant(self._labels,
                                                 dtype=dtypes.float32)

                tf_predictions = logistic_classifier(tf_inputs)
                losses.log_loss(tf_labels, tf_predictions)
                total_loss = losses.get_total_loss()

                optimizer = gradient_descent.GradientDescentOptimizer(
                    learning_rate=1.0)

                train_op = training.create_train_op(total_loss, optimizer)

                saver = saver_lib.Saver()

                loss = training.train(
                    train_op,
                    logdir,
                    hooks=[
                        basic_session_run_hooks.StopAtStepHook(
                            num_steps=number_of_steps[i]),
                        basic_session_run_hooks.CheckpointSaverHook(
                            logdir, save_steps=50, saver=saver),
                    ],
                    save_checkpoint_secs=None,
                    save_summaries_steps=None)
                self.assertIsNotNone(loss)
                self.assertLess(loss, .015)
Beispiel #5
0
    def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = batchnorm_classifier(tf_inputs)
            losses.log_loss(tf_labels, tf_predictions)
            total_loss = losses.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = training.create_train_op(total_loss, optimizer)

            loss = training.train(
                train_op,
                None,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
                save_summaries_steps=None,
                save_checkpoint_secs=None)
            self.assertLess(loss, .1)
Beispiel #6
0
    def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
        logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
        if gfile.Exists(logdir):  # For running on jenkins.
            gfile.DeleteRecursively(logdir)

        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights = variables_lib.get_variables_by_name('weights')

            train_op = training.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=weights)

            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir,
                hooks=[
                    basic_session_run_hooks.CheckpointSaverHook(logdir,
                                                                save_steps=200,
                                                                saver=saver),
                    basic_session_run_hooks.StopAtStepHook(num_steps=200),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Next, train the biases of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            biases = variables_lib.get_variables_by_name('biases')

            train_op = training.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=biases)

            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir,
                hooks=[
                    basic_session_run_hooks.CheckpointSaverHook(logdir,
                                                                save_steps=300,
                                                                saver=saver),
                    basic_session_run_hooks.StopAtStepHook(num_steps=300),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Finally, train both weights and bias to get lower loss.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = training.create_train_op(total_loss, optimizer)
            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir,
                hooks=[
                    basic_session_run_hooks.StopAtStepHook(num_steps=400),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
Beispiel #7
0
    def testTrainWithInitFromCheckpoint(self):
        logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
        logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')

        if gfile.Exists(logdir1):  # For running on jenkins.
            gfile.DeleteRecursively(logdir1)
        if gfile.Exists(logdir2):  # For running on jenkins.
            gfile.DeleteRecursively(logdir2)

        # First, train the model one step (make sure the error is high).
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            train_op = self.create_train_op()
            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir1,
                hooks=[
                    basic_session_run_hooks.CheckpointSaverHook(logdir1,
                                                                save_steps=1,
                                                                saver=saver),
                    basic_session_run_hooks.StopAtStepHook(num_steps=1),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertGreater(loss, .5)

        # Next, train the model to convergence.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            train_op = self.create_train_op()
            saver = saver_lib.Saver()
            loss = training.train(
                train_op,
                logdir1,
                hooks=[
                    basic_session_run_hooks.CheckpointSaverHook(logdir1,
                                                                save_steps=300,
                                                                saver=saver),
                    basic_session_run_hooks.StopAtStepHook(num_steps=300),
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .02)

        # Finally, advance the model a single step and validate that the loss is
        # still low.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            train_op = self.create_train_op()

            model_variables = variables_lib2.global_variables()
            model_path = checkpoint_management.latest_checkpoint(logdir1)

            assign_fn = variables_lib.assign_from_checkpoint_fn(
                model_path, model_variables)

            def init_fn(_, session):
                assign_fn(session)

            loss = training.train(
                train_op,
                None,
                scaffold=monitored_session.Scaffold(init_fn=init_fn),
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
                save_checkpoint_secs=None,
                save_summaries_steps=None)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .02)