예제 #1
0
    def ModelLoss(self):
        tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        tf_predictions = logistic_classifier(tf_inputs)
        losses.log_loss(tf_labels, tf_predictions)
        return losses.get_total_loss()
예제 #2
0
    def testTrainWithLocalVariable(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            local_multiplier = variables_lib.local_variable(1.0)

            tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
            losses.log_loss(tf_labels, tf_predictions)
            total_loss = losses.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(total_loss, optimizer)

            loss = training.train(
                train_op,
                None,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
                save_summaries_steps=None,
                save_checkpoint_secs=None)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
예제 #3
0
    def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
        tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        tf_predictions = logistic_classifier(tf_inputs)
        losses.log_loss(tf_labels, tf_predictions)
        total_loss = losses.get_total_loss()

        optimizer = gradient_descent.GradientDescentOptimizer(
            learning_rate=learning_rate)

        def transform_grads_fn(grads):
            if gradient_multiplier != 1.0:
                variables = variables_lib2.trainable_variables()
                gradient_multipliers = {
                    var: gradient_multiplier
                    for var in variables
                }

                with ops.name_scope('multiply_grads'):
                    return training.multiply_gradients(grads,
                                                       gradient_multipliers)
            else:
                return grads

        return training.create_train_op(total_loss,
                                        optimizer,
                                        transform_grads_fn=transform_grads_fn)
예제 #4
0
    def testGlobalStepNotIncrementedWhenSetToNone(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = batchnorm_classifier(tf_inputs)
            loss = losses.log_loss(tf_labels, tf_predictions)
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(loss,
                                                optimizer,
                                                global_step=None)

            global_step = variables_lib.get_or_create_global_step()

            with self.cached_session() as session:
                # Initialize all variables
                session.run(variables_lib2.global_variables_initializer())

                for _ in range(10):
                    session.run(train_op)

                # Since train_op don't use global_step it shouldn't change.
                self.assertAllClose(global_step.eval(), 0)
예제 #5
0
    def testEmptyUpdateOps(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = batchnorm_classifier(tf_inputs)
            loss = losses.log_loss(tf_labels, tf_predictions)
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(loss, optimizer, update_ops=[])

            moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
            moving_variance = variables_lib.get_variables_by_name(
                'moving_variance')[0]

            with self.cached_session() as session:
                # Initialize all variables
                session.run(variables_lib2.global_variables_initializer())
                mean, variance = session.run([moving_mean, moving_variance])
                # After initialization moving_mean == 0 and moving_variance == 1.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)

                for _ in range(10):
                    session.run(train_op)

                mean = moving_mean.eval()
                variance = moving_variance.eval()

                # Since we skip update_ops the moving_vars are not updated.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)
예제 #6
0
    def testResumeTrainAchievesRoughlyTheSameLoss(self):
        number_of_steps = [300, 1, 5]
        logdir = tempfile.mkdtemp('resume_train_same_loss')

        for i in range(len(number_of_steps)):
            with ops.Graph().as_default():
                random_seed.set_random_seed(i)
                tf_inputs = constant_op.constant(self._inputs,
                                                 dtype=dtypes.float32)
                tf_labels = constant_op.constant(self._labels,
                                                 dtype=dtypes.float32)

                tf_predictions = logistic_classifier(tf_inputs)
                losses.log_loss(tf_labels, tf_predictions)
                total_loss = losses.get_total_loss()

                optimizer = gradient_descent.GradientDescentOptimizer(
                    learning_rate=1.0)

                train_op = training.create_train_op(total_loss, optimizer)

                saver = saver_lib.Saver()

                loss = training.train(
                    train_op,
                    logdir,
                    hooks=[
                        basic_session_run_hooks.StopAtStepHook(
                            num_steps=number_of_steps[i]),
                        basic_session_run_hooks.CheckpointSaverHook(
                            logdir, save_steps=50, saver=saver),
                    ],
                    save_checkpoint_secs=None,
                    save_summaries_steps=None)
                self.assertIsNotNone(loss)
                self.assertLess(loss, .015)
예제 #7
0
    def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = batchnorm_classifier(tf_inputs)
            losses.log_loss(tf_labels, tf_predictions)
            total_loss = losses.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = training.create_train_op(total_loss, optimizer)

            loss = training.train(
                train_op,
                None,
                hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
                save_summaries_steps=None,
                save_checkpoint_secs=None)
            self.assertLess(loss, .1)
예제 #8
0
    def testTrainOpInCollection(self):
        with ops.Graph().as_default():
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = batchnorm_classifier(tf_inputs)
            loss = losses.log_loss(tf_labels, tf_predictions)
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            train_op = training.create_train_op(loss, optimizer)

            # Make sure the training op was recorded in the proper collection
            self.assertIn(train_op, ops.get_collection(ops.GraphKeys.TRAIN_OP))