コード例 #1
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights, biases = variables_lib2.get_variables()

            train_op = learning.create_train_op(total_loss, optimizer)
            train_weights = learning.create_train_op(
                total_loss, optimizer, variables_to_train=[weights])
            train_biases = learning.create_train_op(
                total_loss, optimizer, variables_to_train=[biases])

            with session.Session() as sess:
                # Initialize the variables.
                sess.run(variables_lib.global_variables_initializer())

                # Get the initial weights and biases values.
                weights_values, biases_values = sess.run([weights, biases])
                self.assertGreater(np.linalg.norm(weights_values), 0)
                self.assertAlmostEqual(np.linalg.norm(biases_values), 0)

                # Update weights and biases.
                loss = sess.run(train_op)
                self.assertGreater(loss, .5)
                new_weights, new_biases = sess.run([weights, biases])

                # Check that the weights and biases have been updated.
                self.assertGreater(
                    np.linalg.norm(weights_values - new_weights), 0)
                self.assertGreater(np.linalg.norm(biases_values - new_biases),
                                   0)

                weights_values, biases_values = new_weights, new_biases

                # Update only weights.
                loss = sess.run(train_weights)
                self.assertGreater(loss, .5)
                new_weights, new_biases = sess.run([weights, biases])

                # Check that the weights have been updated, but biases have not.
                self.assertGreater(
                    np.linalg.norm(weights_values - new_weights), 0)
                self.assertAlmostEqual(
                    np.linalg.norm(biases_values - new_biases), 0)
                weights_values = new_weights

                # Update only biases.
                loss = sess.run(train_biases)
                self.assertGreater(loss, .5)
                new_weights, new_biases = sess.run([weights, biases])

                # Check that the biases have been updated, but weights have not.
                self.assertAlmostEqual(
                    np.linalg.norm(weights_values - new_weights), 0)
                self.assertGreater(np.linalg.norm(biases_values - new_biases),
                                   0)
コード例 #2
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
        logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                               'tmp_logs1')

        # First, train only the weights of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            weights = variables_lib2.get_variables_by_name('weights')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=weights)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=200,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Next, train the biases of the model.
        with ops.Graph().as_default():
            random_seed.set_random_seed(1)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)
            biases = variables_lib2.get_variables_by_name('biases')

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                variables_to_train=biases)

            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertGreater(loss, .015)
            self.assertLess(loss, .05)

        # Finally, train both weights and bias to get lower loss.
        with ops.Graph().as_default():
            random_seed.set_random_seed(2)
            total_loss = self.ModelLoss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)
            loss = learning.train(train_op,
                                  logdir1,
                                  number_of_steps=400,
                                  log_every_n_steps=10)

            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
コード例 #3
0
  def testEmptyUpdateOps(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer, update_ops=[])

      moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
      moving_variance = variables_lib2.get_variables_by_name(
          'moving_variance')[0]

      with tf.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())
        mean, variance = sess.run([moving_mean, moving_variance])
        # After initialization moving_mean == 0 and moving_variance == 1.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)

        for _ in range(10):
          sess.run([train_op])
        mean = moving_mean.eval()
        variance = moving_variance.eval()
        # Since we skip update_ops the moving_vars are not updated.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)
コード例 #4
0
  def testTrainWithTrace(self):
    logdir = tempfile.mkdtemp('tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op,
          logdir,
          number_of_steps=300,
          log_every_n_steps=10,
          trace_every_n_steps=100)
    self.assertIsNotNone(loss)
    for trace_step in [1, 101, 201]:
      trace_filename = 'tf_trace-%d.json' % (trace_step - 1)
      trace_filename_legacy = 'tf_trace-%d.json' % trace_step

      trace_paths = [os.path.join(logdir, f) for f in
                     (trace_filename, trace_filename_legacy)]
      # Note: with resource variables the traces are created at 0/100/200
      # with legacy variables traces are created at 1/101/201
      self.assertTrue(any(os.path.isfile(path) for path in trace_paths),
                      trace_paths)
コード例 #5
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        g = ops.Graph()
        with g.as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertLess(loss, .1)
コード例 #6
0
    def testUseGlobalStep(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            global_step = variables_lib2.get_or_create_global_step()

            with tf.Session() as sess:
                # Initialize all variables
                sess.run(variables_lib.global_variables_initializer())

                for _ in range(10):
                    sess.run([train_op])
                global_step = global_step.eval()
                # After 10 updates global_step should be 10.
                self.assertAllClose(global_step, 10)
コード例 #7
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testResumeTrainAchievesRoughlyTheSameLoss(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        number_of_steps = [300, 301, 305]

        for i in range(len(number_of_steps)):
            with ops.Graph().as_default():
                random_seed.set_random_seed(i)
                tf_inputs = constant_op.constant(self._inputs,
                                                 dtype=dtypes.float32)
                tf_labels = constant_op.constant(self._labels,
                                                 dtype=dtypes.float32)

                tf_predictions = LogisticClassifier(tf_inputs)
                loss_ops.log_loss(tf_predictions, tf_labels)
                total_loss = loss_ops.get_total_loss()

                optimizer = gradient_descent.GradientDescentOptimizer(
                    learning_rate=1.0)

                train_op = learning.create_train_op(total_loss, optimizer)

                loss = learning.train(train_op,
                                      logdir,
                                      number_of_steps=number_of_steps[i],
                                      log_every_n_steps=10)
                self.assertIsNotNone(loss)
                self.assertLess(loss, .015)
コード例 #8
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testNoneGlobalStep(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss,
                                                optimizer,
                                                global_step=None)

            global_step = variables_lib2.get_or_create_global_step()

            with session.Session() as sess:
                # Initialize all variables
                sess.run(variables_lib.global_variables_initializer())

                for _ in range(10):
                    sess.run([train_op])
                global_step = global_step.eval()
                # Since train_op don't use global_step it shouldn't change.
                self.assertAllClose(global_step, 0)
コード例 #9
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithLocalVariable(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            local_multiplier = variables_lib2.local_variable(1.0)

            tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10)
            self.assertIsNotNone(loss)
            self.assertLess(loss, .015)
コード例 #10
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithTrace(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()
            summary.scalar('total_loss', total_loss)

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  logdir,
                                  number_of_steps=300,
                                  log_every_n_steps=10,
                                  trace_every_n_steps=100)
        self.assertIsNotNone(loss)
        for trace_step in [1, 101, 201]:
            trace_filename = 'tf_trace-%d.json' % trace_step
            self.assertTrue(
                os.path.isfile(os.path.join(logdir, trace_filename)))
コード例 #11
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithSessionConfig(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            session_config = config_pb2.ConfigProto(allow_soft_placement=True)
            loss = learning.train(train_op,
                                  None,
                                  number_of_steps=300,
                                  log_every_n_steps=10,
                                  session_config=session_config)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
コード例 #12
0
    def testTrainWithNonDefaultGraph(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        g = ops.Graph()
        with g.as_default():
            random_seed.set_random_seed(0)
            tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
            tf_labels = tf.constant(self._labels, dtype=tf.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_labels, tf_predictions)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

        loss = learning.train(train_op,
                              logdir,
                              number_of_steps=300,
                              log_every_n_steps=10,
                              graph=g)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
コード例 #13
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithEpochLimit(self):
        logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
                              'tmp_logs')
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)
            tf_inputs_limited = input_lib.limit_epochs(tf_inputs,
                                                       num_epochs=300)
            tf_labels_limited = input_lib.limit_epochs(tf_labels,
                                                       num_epochs=300)

            tf_predictions = LogisticClassifier(tf_inputs_limited)
            loss_ops.log_loss(tf_predictions, tf_labels_limited)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op, logdir, log_every_n_steps=10)
        self.assertIsNotNone(loss)
        self.assertLess(loss, .015)
        self.assertTrue(
            os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
        self.assertTrue(
            os.path.isfile(
                '{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
コード例 #14
0
  def testRecordTrainOpInCollection(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = learning.create_train_op(total_loss, optimizer)

      # Make sure the training op was recorded in the proper collection
      self.assertIn(train_op, ops.get_collection(ops.GraphKeys.TRAIN_OP))
コード例 #15
0
  def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    logdir = tempfile.mkdtemp('tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        learning.train(train_op, logdir, init_op=None, number_of_steps=300)
コード例 #16
0
  def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10)
コード例 #17
0
  def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
    tf_labels = tf.constant(self._labels, dtype=tf.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_labels, tf_predictions)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers)
コード例 #18
0
  def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    logdir = tempfile.mkdtemp('tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_labels, tf_predictions)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
コード例 #19
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithSessionWrapper(self):
        """Test that slim.learning.train can take `session_wrapper` args.

    One of the applications of `session_wrapper` is the wrappers of TensorFlow
    Debugger (tfdbg), which intercept methods calls to `tf.compat.v1.Session`
    (e.g., run)
    to achieve debugging. `DumpingDebugWrapperSession` is used here for testing
    purpose.
    """
        dump_root = tempfile.mkdtemp()

        def dumping_wrapper(sess):  # pylint: disable=invalid-name
            return dumping_wrapper_lib.DumpingDebugWrapperSession(
                sess, dump_root)

        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            loss = learning.train(train_op,
                                  None,
                                  number_of_steps=1,
                                  session_wrapper=dumping_wrapper)
        self.assertIsNotNone(loss)

        run_root = glob.glob(os.path.join(dump_root, 'run_*'))[-1]
        dump = debug_data.DebugDumpDir(run_root)
        self.assertAllEqual(
            0,
            dump.get_tensors('global_step', 0, 'DebugIdentity')[0])
コード例 #20
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testUseUpdateOps(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            expected_mean = np.mean(self._inputs, axis=(0))
            expected_var = np.var(self._inputs, axis=(0))
            expected_var = self._addBesselsCorrection(16, expected_var)

            tf_predictions = BatchNormClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()
            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)

            moving_mean = variables_lib2.get_variables_by_name(
                'moving_mean')[0]
            moving_variance = variables_lib2.get_variables_by_name(
                'moving_variance')[0]

            with session.Session() as sess:
                # Initialize all variables
                sess.run(variables_lib.global_variables_initializer())
                mean, variance = sess.run([moving_mean, moving_variance])
                # After initialization moving_mean == 0 and moving_variance == 1.
                self.assertAllClose(mean, [0] * 4)
                self.assertAllClose(variance, [1] * 4)

                for _ in range(10):
                    sess.run([train_op])
                mean = moving_mean.eval()
                variance = moving_variance.eval()
                # After 10 updates with decay 0.1 moving_mean == expected_mean and
                # moving_variance == expected_var.
                self.assertAllClose(mean, expected_mean)
                self.assertAllClose(variance, expected_var)
コード例 #21
0
ファイル: learning_test.py プロジェクト: yonghankim/tf-slim
    def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
        with ops.Graph().as_default():
            random_seed.set_random_seed(0)
            tf_inputs = constant_op.constant(self._inputs,
                                             dtype=dtypes.float32)
            tf_labels = constant_op.constant(self._labels,
                                             dtype=dtypes.float32)

            tf_predictions = LogisticClassifier(tf_inputs)
            loss_ops.log_loss(tf_predictions, tf_labels)
            total_loss = loss_ops.get_total_loss()
            summary.scalar('total_loss', total_loss)

            optimizer = gradient_descent.GradientDescentOptimizer(
                learning_rate=1.0)

            train_op = learning.create_train_op(total_loss, optimizer)
            summary_op = summary.merge_all()

            with self.assertRaises(ValueError):
                learning.train(train_op,
                               None,
                               number_of_steps=300,
                               summary_op=summary_op)