Exemple #1
0
 def testMultiClassWithInvalidNClass(self):
   try:
     head_lib._multi_class_head(n_classes=1)
     self.fail("Softmax with no n_classes did not raise error.")
   except ValueError:
     # Expected
     pass
Exemple #2
0
  def testEval(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head((head1, head2), (1, .5))
    labels = {
        "label1": (1,),
        "label2": (1,)
    }
    model_fn_ops = head.create_model_fn_ops(
        features={"weights": (2.0, 10.0)},
        labels=labels,
        mode=model_fn.ModeKeys.EVAL,
        train_op_fn=_noop_train_op,
        logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))

    self.assertIsNotNone(model_fn_ops.predictions)
    self.assertIsNotNone(model_fn_ops.loss)
    self.assertIsNone(model_fn_ops.train_op)
    self.assertIsNotNone(model_fn_ops.eval_metric_ops)
    self.assertIsNone(model_fn_ops.output_alternatives)

    metric_ops = model_fn_ops.eval_metric_ops

    # Tests eval keys.
    self.assertIn("accuracy/head1", metric_ops.keys())
    self.assertIn("accuracy/head2", metric_ops.keys())
Exemple #3
0
    def testTrain_withHeadWeights(self):
        head1 = head_lib._multi_class_head(n_classes=3,
                                           label_name="label1",
                                           head_name="head1")
        head2 = head_lib._multi_class_head(n_classes=4,
                                           label_name="label2",
                                           head_name="head2")
        head = head_lib._multi_head([head1, head2], [1, .5])
        logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
        labels = {
            "label1": constant_op.constant([1]),
            "label2": constant_op.constant([1])
        }
        features = {"weights": constant_op.constant([2.0, 10.0])}
        model_fn_ops = head.head_ops(features,
                                     labels,
                                     model_fn.ModeKeys.TRAIN,
                                     _noop_train_op,
                                     logits=logits)
        self.assertEquals(None, model_fn_ops.predictions)
        self.assertTrue(model_fn_ops.loss is not None)
        self.assertTrue(model_fn_ops.train_op is not None)
        self.assertFalse(model_fn_ops.eval_metric_ops)
        self.assertEquals(None, model_fn_ops.signature_fn)
        self.assertEquals(None, model_fn_ops.output_alternatives)

        with session.Session() as sess:
            self.assertAlmostEqual(1.531,
                                   sess.run(model_fn_ops.loss),
                                   places=3)
 def testMultiClassWithInvalidNClass(self):
     try:
         head_lib._multi_class_head(n_classes=1)
         self.fail("Softmax with no n_classes did not raise error.")
     except ValueError:
         # Expected
         pass
Exemple #5
0
    def testEval(self):
        head1 = head_lib._multi_class_head(n_classes=3,
                                           label_name="label1",
                                           head_name="head1")
        head2 = head_lib._multi_class_head(n_classes=4,
                                           label_name="label2",
                                           head_name="head2")
        head = head_lib._multi_head([head1, head2], [1, .5])
        logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
        labels = {
            "label1": constant_op.constant([1]),
            "label2": constant_op.constant([1])
        }
        features = {"weights": constant_op.constant([2.0, 10.0])}
        model_fn_ops = head.head_ops(features,
                                     labels,
                                     model_fn.ModeKeys.EVAL,
                                     _noop_train_op,
                                     logits=logits)

        self.assertTrue(model_fn_ops.predictions)
        self.assertTrue(model_fn_ops.loss is not None)
        self.assertEquals(None, model_fn_ops.train_op)
        self.assertTrue(model_fn_ops.eval_metric_ops)
        self.assertEquals(None, model_fn_ops.signature_fn)
        self.assertEquals(None, model_fn_ops.output_alternatives)

        metric_ops = model_fn_ops.eval_metric_ops

        # Tests eval keys
        self.assertTrue("accuracy/head1" in metric_ops.keys())
        self.assertTrue("accuracy/head2" in metric_ops.keys())
Exemple #6
0
  def testTrain_withNoHeadWeights(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head((head1, head2))
    labels = {
        "label1": (1,),
        "label2": (1,)
    }
    model_fn_ops = head.create_model_fn_ops(
        features={"weights": (2.0, 10.0)},
        labels=labels,
        mode=model_fn.ModeKeys.TRAIN,
        train_op_fn=_noop_train_op,
        logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))

    self.assertIsNone(model_fn_ops.predictions)
    self.assertIsNotNone(model_fn_ops.loss)
    self.assertIsNotNone(model_fn_ops.train_op)
    self.assertFalse(model_fn_ops.eval_metric_ops)
    self.assertIsNone(model_fn_ops.output_alternatives)

    with session.Session() as sess:
      self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
Exemple #7
0
  def testEval(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head((head1, head2), (1, .5))
    labels = {
        "label1": (1,),
        "label2": (1,)
    }
    model_fn_ops = head.create_model_fn_ops(
        features={"weights": (2.0, 10.0)},
        labels=labels,
        mode=model_fn.ModeKeys.EVAL,
        train_op_fn=_noop_train_op,
        logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))

    self.assertIsNotNone(model_fn_ops.predictions)
    self.assertIsNotNone(model_fn_ops.loss)
    self.assertIsNone(model_fn_ops.train_op)
    self.assertIsNotNone(model_fn_ops.eval_metric_ops)
    self.assertIsNone(model_fn_ops.output_alternatives)

    metric_ops = model_fn_ops.eval_metric_ops

    # Tests eval keys.
    self.assertIn("accuracy/head1", metric_ops.keys())
    self.assertIn("accuracy/head2", metric_ops.keys())
  def testTrain_withNoHeadWeights(self):
    head1 = head_lib._multi_class_head(n_classes=3, label_name="label1",
                                       head_name="head1")
    head2 = head_lib._multi_class_head(n_classes=4, label_name="label2",
                                       head_name="head2")
    head = head_lib._multi_head([head1, head2])
    logits = tf.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
    labels = {
        "label1": tf.constant([1]),
        "label2": tf.constant([1])

    }
    features = {"weights": tf.constant([2.0, 10.0])}
    model_fn_ops = head.head_ops(features, labels,
                                 tf.contrib.learn.ModeKeys.TRAIN,
                                 _noop_train_op, logits=logits)

    self.assertEquals(None, model_fn_ops.predictions)
    self.assertTrue(model_fn_ops.loss is not None)
    self.assertTrue(model_fn_ops.train_op is not None)
    self.assertFalse(model_fn_ops.eval_metric_ops)
    self.assertEquals(None, model_fn_ops.signature_fn)
    self.assertEquals(None, model_fn_ops.output_alternatives)

    with tf.Session() as sess:
      self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
Exemple #9
0
  def testTrain_withNoHeadWeights(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head((head1, head2))
    labels = {
        "label1": (1,),
        "label2": (1,)
    }
    model_fn_ops = head.create_model_fn_ops(
        features={"weights": (2.0, 10.0)},
        labels=labels,
        mode=model_fn.ModeKeys.TRAIN,
        train_op_fn=_noop_train_op,
        logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))

    self.assertIsNone(model_fn_ops.predictions)
    self.assertIsNotNone(model_fn_ops.loss)
    self.assertIsNotNone(model_fn_ops.train_op)
    self.assertFalse(model_fn_ops.eval_metric_ops)
    self.assertIsNone(model_fn_ops.output_alternatives)

    with session.Session() as sess:
      self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
  def testEval(self):
    head1 = head_lib._multi_class_head(n_classes=3, label_name="label1",
                                       head_name="head1")
    head2 = head_lib._multi_class_head(n_classes=4, label_name="label2",
                                       head_name="head2")
    head = head_lib._multi_head([head1, head2], [1, .5])
    logits = tf.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
    labels = {
        "label1": tf.constant([1]),
        "label2": tf.constant([1])

    }
    features = {"weights": tf.constant([2.0, 10.0])}
    model_fn_ops = head.head_ops(features, labels,
                                 tf.contrib.learn.ModeKeys.EVAL,
                                 _noop_train_op, logits=logits)

    self.assertTrue(model_fn_ops.predictions)
    self.assertTrue(model_fn_ops.loss is not None)
    self.assertEquals(None, model_fn_ops.train_op)
    self.assertTrue(model_fn_ops.eval_metric_ops)
    self.assertEquals(None, model_fn_ops.signature_fn)
    self.assertEquals(None, model_fn_ops.output_alternatives)

    metric_ops = model_fn_ops.eval_metric_ops

    # Tests eval keys
    self.assertTrue("accuracy/head1" in metric_ops.keys())
    self.assertTrue("accuracy/head2" in metric_ops.keys())
Exemple #11
0
 def testInvalidHeads(self):
   named_head = head_lib._multi_class_head(
       n_classes=3, label_name="label", head_name="head1")
   unnamed_head = head_lib._multi_class_head(
       n_classes=4, label_name="label")
   with self.assertRaisesRegexp(ValueError, "must have names"):
     head_lib._multi_head((named_head, unnamed_head))
   with self.assertRaisesRegexp(ValueError, "must be SingleHead"):
     head_lib._multi_head((named_head, head_lib._multi_head((named_head,))))
Exemple #12
0
 def testInvalidHeads(self):
   named_head = head_lib._multi_class_head(
       n_classes=3, label_name="label", head_name="head1")
   unnamed_head = head_lib._multi_class_head(
       n_classes=4, label_name="label")
   with self.assertRaisesRegexp(ValueError, "must have names"):
     head_lib._multi_head((named_head, unnamed_head))
   with self.assertRaisesRegexp(ValueError, "must be SingleHead"):
     head_lib._multi_head((named_head, head_lib._multi_head((named_head,))))
Exemple #13
0
    def testInfer(self):
        head1 = head_lib._multi_class_head(n_classes=3,
                                           label_name="label1",
                                           head_name="head1")
        head2 = head_lib._multi_class_head(n_classes=4,
                                           label_name="label2",
                                           head_name="head2")
        head = head_lib._multi_head([head1, head2], [1, .5])
        logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
        labels = {
            "label1": constant_op.constant([1]),
            "label2": constant_op.constant([1])
        }
        features = {"weights": constant_op.constant([2.0, 10.0])}
        model_fn_ops = head.head_ops(features,
                                     labels,
                                     model_fn.ModeKeys.INFER,
                                     _noop_train_op,
                                     logits=logits)

        self.assertTrue(model_fn_ops.predictions)
        self.assertEquals(None, model_fn_ops.loss)
        self.assertEquals(None, model_fn_ops.train_op)
        self.assertFalse(model_fn_ops.eval_metric_ops)
        self.assertEquals(None, model_fn_ops.signature_fn)
        self.assertTrue(len(model_fn_ops.output_alternatives) == 2)

        # Tests predictions keys
        pred_keys = model_fn_ops.predictions.keys()
        self.assertTrue(
            ("head1", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
        self.assertTrue(("head1",
                         prediction_key.PredictionKey.CLASSES) in pred_keys)
        self.assertTrue(
            ("head2", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
        self.assertTrue(("head2",
                         prediction_key.PredictionKey.CLASSES) in pred_keys)

        # Tests output alternative
        out_alts = model_fn_ops.output_alternatives
        self.assertEquals(constants.ProblemType.CLASSIFICATION,
                          out_alts["head1"][0])
        self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
                        out_alts["head1"][1].keys())
        self.assertTrue(prediction_key.PredictionKey.CLASSES in
                        out_alts["head1"][1].keys())

        self.assertEquals(constants.ProblemType.CLASSIFICATION,
                          out_alts["head2"][0])
        self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
                        out_alts["head2"][1].keys())
        self.assertTrue(prediction_key.PredictionKey.CLASSES in
                        out_alts["head2"][1].keys())
Exemple #14
0
  def testInfer(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head([head1, head2], [1, .5])
    logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
    labels = {
        "label1": constant_op.constant([1]),
        "label2": constant_op.constant([1])
    }
    features = {"weights": constant_op.constant([2.0, 10.0])}
    model_fn_ops = head.head_ops(
        features,
        labels,
        model_fn.ModeKeys.INFER,
        _noop_train_op,
        logits=logits)

    self.assertTrue(model_fn_ops.predictions)
    self.assertEquals(None, model_fn_ops.loss)
    self.assertEquals(None, model_fn_ops.train_op)
    self.assertFalse(model_fn_ops.eval_metric_ops)
    self.assertEquals(None, model_fn_ops.signature_fn)
    self.assertTrue(len(model_fn_ops.output_alternatives) == 2)

    # Tests predictions keys
    pred_keys = model_fn_ops.predictions.keys()
    self.assertTrue(
        ("head1", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
    self.assertTrue(
        ("head1", prediction_key.PredictionKey.CLASSES) in pred_keys)
    self.assertTrue(
        ("head2", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
    self.assertTrue(
        ("head2", prediction_key.PredictionKey.CLASSES) in pred_keys)

    # Tests output alternative
    out_alts = model_fn_ops.output_alternatives
    self.assertEquals(constants.ProblemType.CLASSIFICATION,
                      out_alts["head1"][0])
    self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
                    out_alts["head1"][1].keys())
    self.assertTrue(
        prediction_key.PredictionKey.CLASSES in out_alts["head1"][1].keys())

    self.assertEquals(constants.ProblemType.CLASSIFICATION,
                      out_alts["head2"][0])
    self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
                    out_alts["head2"][1].keys())
    self.assertTrue(
        prediction_key.PredictionKey.CLASSES in out_alts["head2"][1].keys())
Exemple #15
0
  def testInfer(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head((head1, head2), (1, .5))
    labels = {
        "label1": (1,),
        "label2": (1,)
    }
    model_fn_ops = head.create_model_fn_ops(
        features={"weights": (2.0, 10.0)},
        labels=labels,
        mode=model_fn.ModeKeys.INFER,
        train_op_fn=_noop_train_op,
        logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))

    self.assertIsNotNone(model_fn_ops.predictions)
    self.assertIsNone(model_fn_ops.loss)
    self.assertIsNone(model_fn_ops.train_op)
    self.assertFalse(model_fn_ops.eval_metric_ops)

    # Tests predictions keys.
    self.assertItemsEqual((
        ("head1", prediction_key.PredictionKey.LOGITS),
        ("head1", prediction_key.PredictionKey.PROBABILITIES),
        ("head1", prediction_key.PredictionKey.CLASSES),
        ("head2", prediction_key.PredictionKey.LOGITS),
        ("head2", prediction_key.PredictionKey.PROBABILITIES),
        ("head2", prediction_key.PredictionKey.CLASSES),
    ), model_fn_ops.predictions.keys())

    # Tests output alternative.
    self.assertEquals({
        "head1": constants.ProblemType.CLASSIFICATION,
        "head2": constants.ProblemType.CLASSIFICATION,
    }, {
        k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
    })
    self.assertItemsEqual((
        prediction_key.PredictionKey.LOGITS,
        prediction_key.PredictionKey.PROBABILITIES,
        prediction_key.PredictionKey.CLASSES,
    ), model_fn_ops.output_alternatives["head1"][1].keys())
    self.assertItemsEqual((
        prediction_key.PredictionKey.LOGITS,
        prediction_key.PredictionKey.PROBABILITIES,
        prediction_key.PredictionKey.CLASSES,
    ), model_fn_ops.output_alternatives["head2"][1].keys())
Exemple #16
0
  def testInfer(self):
    head1 = head_lib._multi_class_head(
        n_classes=3, label_name="label1", head_name="head1")
    head2 = head_lib._multi_class_head(
        n_classes=4, label_name="label2", head_name="head2")
    head = head_lib._multi_head((head1, head2), (1, .5))
    labels = {
        "label1": (1,),
        "label2": (1,)
    }
    model_fn_ops = head.create_model_fn_ops(
        features={"weights": (2.0, 10.0)},
        labels=labels,
        mode=model_fn.ModeKeys.INFER,
        train_op_fn=_noop_train_op,
        logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))

    self.assertIsNotNone(model_fn_ops.predictions)
    self.assertIsNone(model_fn_ops.loss)
    self.assertIsNone(model_fn_ops.train_op)
    self.assertFalse(model_fn_ops.eval_metric_ops)
    self.assertEquals(2, len(model_fn_ops.output_alternatives))

    # Tests predictions keys.
    pred_keys = model_fn_ops.predictions.keys()
    self.assertIn(
        ("head1", prediction_key.PredictionKey.PROBABILITIES), pred_keys)
    self.assertIn(
        ("head1", prediction_key.PredictionKey.CLASSES), pred_keys)
    self.assertIn(
        ("head2", prediction_key.PredictionKey.PROBABILITIES), pred_keys)
    self.assertIn(
        ("head2", prediction_key.PredictionKey.CLASSES), pred_keys)

    # Tests output alternative.
    out_alts = model_fn_ops.output_alternatives
    self.assertEquals(constants.ProblemType.CLASSIFICATION,
                      out_alts["head1"][0])
    self.assertIn(prediction_key.PredictionKey.PROBABILITIES,
                  out_alts["head1"][1].keys())
    self.assertIn(
        prediction_key.PredictionKey.CLASSES, out_alts["head1"][1].keys())

    self.assertEquals(constants.ProblemType.CLASSIFICATION,
                      out_alts["head2"][0])
    self.assertIn(prediction_key.PredictionKey.PROBABILITIES,
                  out_alts["head2"][1].keys())
    self.assertIn(
        prediction_key.PredictionKey.CLASSES, out_alts["head2"][1].keys())
    def testJointLinearModel(self):
        """Tests that loss goes down with training."""
        def input_fn():
            return {
                'age':
                sparse_tensor.SparseTensor(values=['1'],
                                           indices=[[0, 0]],
                                           dense_shape=[1, 1]),
                'language':
                sparse_tensor.SparseTensor(values=['english'],
                                           indices=[[0, 0]],
                                           dense_shape=[1, 1])
            }, constant_op.constant([[1]])

        language = feature_column.sparse_column_with_hash_bucket(
            'language', 100)
        age = feature_column.sparse_column_with_hash_bucket('age', 2)

        head = head_lib._multi_class_head(n_classes=2)
        classifier = _joint_linear_estimator(head,
                                             feature_columns=[age, language])

        classifier.fit(input_fn=input_fn, steps=1000)
        loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
        classifier.fit(input_fn=input_fn, steps=2000)
        loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
        self.assertLess(loss2, loss1)
        self.assertLess(loss2, 0.01)
Exemple #18
0
 def testMultiClassWithInvalidLogits(self):
   head = head_lib._multi_class_head(n_classes=len(self._logits[0]) + 1)
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
       head.create_model_fn_ops(
           {}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
           logits=self._logits)
Exemple #19
0
 def testBinaryClassificationWithCenteredBias(self):
     head = head_lib._multi_class_head(n_classes=2,
                                       enable_centered_bias=True)
     with ops.Graph().as_default(), session.Session():
         logits = constant_op.constant(self._logits)
         labels = constant_op.constant(self._labels)
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         model_fn_ops = head.head_ops({},
                                      labels,
                                      model_fn.ModeKeys.TRAIN,
                                      _noop_train_op,
                                      logits=logits)
         _assert_variables(self,
                           expected_global=(
                               "centered_bias_weight:0",
                               "centered_bias_weight/Adagrad:0",
                           ),
                           expected_trainable=("centered_bias_weight:0", ))
         variables.global_variables_initializer().run()
         _assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
         expected_loss = .81326175
         _assert_metrics(self, expected_loss,
                         self._expected_eval_metrics(expected_loss),
                         model_fn_ops)
Exemple #20
0
 def testBinaryClassificationWithWeights(self):
     n_classes = 2
     head = head_lib._multi_class_head(n_classes=n_classes,
                                       weight_column_name="label_weight")
     with ops.Graph().as_default(), session.Session():
         weights = ((1., ), (0., ))
         features = {"label_weight": constant_op.constant(weights)}
         logits = constant_op.constant(self._logits)
         labels = constant_op.constant(self._labels)
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         model_fn_ops = head.head_ops(features,
                                      labels,
                                      model_fn.ModeKeys.TRAIN,
                                      _noop_train_op,
                                      logits=logits)
         _assert_no_variables(self)
         _assert_summary_tags(self, ["loss"])
         expected_total_loss = .31326166
         _assert_metrics(
             self,
             expected_total_loss / len(weights),
             {
                 "accuracy": 1. / 1,
                 "accuracy/baseline_label_mean": 1. / 1,
                 "accuracy/threshold_0.500000_mean": 1. / 1,
                 "auc": 0. / 1,
                 "labels/actual_label_mean": 1. / 1,
                 "labels/prediction_mean": .731059,  # softmax
                 # TODO(ptucker): Is this the correct eval loss, sum not average?
                 "loss": expected_total_loss,
                 "precision/positive_threshold_0.500000_mean": 1. / 1,
                 "recall/positive_threshold_0.500000_mean": 1. / 1,
             },
             model_fn_ops)
  def testJointLinearModel(self):
    """Tests that loss goes down with training."""

    def input_fn():
      return {
          'age':
              sparse_tensor.SparseTensor(
                  values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
          'language':
              sparse_tensor.SparseTensor(
                  values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
      }, constant_op.constant([[1]])

    language = feature_column.sparse_column_with_hash_bucket('language', 100)
    age = feature_column.sparse_column_with_hash_bucket('age', 2)

    head = head_lib._multi_class_head(n_classes=2)
    classifier = _joint_linear_estimator(head, feature_columns=[age, language])

    classifier.fit(input_fn=input_fn, steps=1000)
    loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
    classifier.fit(input_fn=input_fn, steps=2000)
    loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
    self.assertLess(loss2, loss1)
    self.assertLess(loss2, 0.01)
def get_conv_classifier():
    n_classes = 5
    feature_columns = [layers.real_valued_column("", dimension=3)]

    # learning_rate = 1.0
    # optimizer = AdagradOptimizer(learning_rate)
    #
    # learning_rate = 1.0
    # optimizer = AdadeltaOptimizer(learning_rate=learning_rate)

    # ~ 62.55%
    learning_rate = 0.01
    optimizer = AdamOptimizer(learning_rate, epsilon=0.1)

    # learning_rate = 0.05
    # optimizer = GradientDescentOptimizer(learning_rate)

    # learning_rate = 0.1
    # optimizer = RMSPropOptimizer(learning_rate, momentum=0.1)

    # learning_rate = 0.1
    # optimizer = FtrlOptimizer(learning_rate)

    return SKCompat(Estimator(
        model_fn=get_conv_model,
        params={
            'head': head_lib._multi_class_head(  # pylint: disable=protected-access
                n_classes,
                enable_centered_bias=False),
            'feature_columns': feature_columns,
            'activation_fn': tf.nn.relu,
            'learning_rate': learning_rate,
            'optimizer': optimizer
        },
        model_dir='saved_model'))
Exemple #23
0
 def testBinaryClassificationWithLogitsInput(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
         logits_input=((0., 0.), (0., 0.)))
     w = ("logits/weights:0", "logits/biases:0")
     _assert_variables(
         self, expected_global=w, expected_model=w, expected_trainable=w)
     variables.global_variables_initializer().run()
     _assert_summary_tags(self, ["loss"])
     expected_loss = .69314718
     label_mean = np.mean(self._labels)
     _assert_metrics(self, expected_loss, {
         "accuracy": 1. / 2,
         "accuracy/baseline_label_mean": label_mean,
         "accuracy/threshold_0.500000_mean": 1. / 2,
         "auc": 1. / 2,
         "labels/actual_label_mean": label_mean,
         "labels/prediction_mean": .5,  # softmax
         "loss": expected_loss,
         "precision/positive_threshold_0.500000_mean": 0. / 2,
         "recall/positive_threshold_0.500000_mean": 0. / 1,
     }, model_fn_ops)
 def testBinaryClassificationWithWeights(self):
   n_classes = 2
   head = head_lib._multi_class_head(
       n_classes=n_classes, weight_column_name="label_weight")
   with tf.Graph().as_default(), tf.Session():
     weights = ((1.,), (0.,))
     features = {"label_weight": tf.constant(weights)}
     logits = tf.constant(self._logits)
     labels = tf.constant(self._labels)
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops(features, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=logits)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     expected_total_loss = .31326166
     _assert_metrics(
         self, expected_total_loss / len(weights), {
             "accuracy": 1. / 1,
             "accuracy/baseline_label_mean": 1. / 1,
             "accuracy/threshold_0.500000_mean": 1. / 1,
             "auc": 0. / 1,
             "labels/actual_label_mean": 1. / 1,
             "labels/prediction_mean": .731059,  # softmax
             # TODO(ptucker): Is this the correct eval loss, sum not average?
             "loss": expected_total_loss,
             "precision/positive_threshold_0.500000_mean": 1. / 1,
             "recall/positive_threshold_0.500000_mean": 1. / 1,
         }, model_fn_ops)
Exemple #25
0
  def testRaisesNonEmbeddingColumn(self):
    one_hot_language = tf.contrib.layers.one_hot_column(
        tf.contrib.layers.sparse_column_with_hash_bucket('language', 10))

    params = {
        'feature_columns': [one_hot_language],
        'head': head_lib._multi_class_head(2),
        'hidden_units': [1],
        # Set lr mult to 0. to keep embeddings constant.
        'embedding_lr_multipliers': {
            one_hot_language: 0.0
        },
    }
    features = {
        'language':
            tf.SparseTensor(
                values=['en', 'fr', 'zh'],
                indices=[[0, 0], [1, 0], [2, 0]],
                dense_shape=[3, 1]),
    }
    labels = tf.constant([[0], [0], [0]], dtype=tf.int32)
    with self.assertRaisesRegexp(
        ValueError, 'can only be defined for embedding columns'):
      dnn._dnn_model_fn(features, labels,
                        tf.contrib.learn.ModeKeys.TRAIN, params)
Exemple #26
0
 def testMultiClassWithInvalidLogits(self):
   head = head_lib._multi_class_head(n_classes=len(self._logits[0]) + 1)
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
       head.create_model_fn_ops(
           {}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
           logits=self._logits)
Exemple #27
0
 def testBinaryClassificationWithLogitsInput(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
         logits_input=((0., 0.), (0., 0.)))
     self._assert_output_alternatives(model_fn_ops)
     w = ("binary_logistic_head/logits/weights:0",
          "binary_logistic_head/logits/biases:0")
     _assert_variables(
         self, expected_global=w, expected_model=w, expected_trainable=w)
     variables.global_variables_initializer().run()
     _assert_summary_tags(self, ["loss"])
     expected_loss = .69314718
     label_mean = np.mean(self._labels)
     _assert_metrics(self, expected_loss, {
         "accuracy": 1. / 2,
         "accuracy/baseline_label_mean": label_mean,
         "accuracy/threshold_0.500000_mean": 1. / 2,
         "auc": 1. / 2,
         "labels/actual_label_mean": label_mean,
         "labels/prediction_mean": .5,  # softmax
         "loss": expected_loss,
         "precision/positive_threshold_0.500000_mean": 0. / 2,
         "recall/positive_threshold_0.500000_mean": 0. / 1,
     }, model_fn_ops)
    def testLinearModel(self):
        """Tests that loss goes down with training."""
        def input_fn():
            return {
                'age':
                tf.constant([1]),
                'language':
                tf.SparseTensor(values=['english'],
                                indices=[[0, 0]],
                                shape=[1, 1])
            }, tf.constant([[1]])

        language = tf.contrib.layers.sparse_column_with_hash_bucket(
            'language', 100)
        age = tf.contrib.layers.real_valued_column('age')

        head = head_lib._multi_class_head(n_classes=2)
        classifier = LinearEstimator(head, feature_columns=[age, language])

        classifier.fit(input_fn=input_fn, steps=1000)
        loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
        classifier.fit(input_fn=input_fn, steps=2000)
        loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
        self.assertLess(loss2, loss1)
        self.assertLess(loss2, 0.01)
Exemple #29
0
 def testBinaryClassificationWithLogitsAndLogitsInput(self):
   head = head_lib._multi_class_head(n_classes=2)
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(
         ValueError, "Both logits and logits_input supplied"):
       head.create_model_fn_ops(
           {}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
           logits_input=((0., 0.), (0., 0.)), logits=self._logits)
Exemple #30
0
 def testBinaryClassificationWithLogitsAndLogitsInput(self):
   head = head_lib._multi_class_head(n_classes=2)
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(
         ValueError, "Both logits and logits_input supplied"):
       head.create_model_fn_ops(
           {}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
           logits_input=((0., 0.), (0., 0.)), logits=self._logits)
Exemple #31
0
 def testBinaryClassification(self):
     head = head_lib._multi_class_head(n_classes=2)
     with tf.Graph().as_default(), tf.Session() as sess:
         logits = tf.constant([[1.0], [1.0]])
         labels = tf.constant([[1.0], [0.0]])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         model_fn_ops = head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
         self.assertAlmostEqual(0.81326175, sess.run(model_fn_ops.loss), delta=1e-6)
Exemple #32
0
 def testMultiClass(self):
     head = head_lib._multi_class_head(n_classes=3)
     with tf.Graph().as_default(), tf.Session() as sess:
         logits = tf.constant([[1.0, 0.0, 0.0]])
         labels = tf.constant([2])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         model_fn_ops = head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
         self.assertAlmostEqual(1.5514446, sess.run(model_fn_ops.loss))
    def testDNNModel(self):
        """Tests multi-class classification using matrix data as input."""
        cont_features = [tf.contrib.layers.real_valued_column("feature", dimension=4)]

        head = head_lib._multi_class_head(n_classes=3)
        classifier = DNNEstimator(head, feature_columns=cont_features, hidden_units=[3, 3])

        classifier.fit(input_fn=_iris_input_fn, steps=1000)
        classifier.evaluate(input_fn=_iris_input_fn, steps=100)
Exemple #34
0
 def testMultiClassWithLogitsAndLogitsInput(self):
   n_classes = 3
   head = head_lib._multi_class_head(
       n_classes=n_classes, metric_class_ids=range(n_classes))
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(
         ValueError, "Both logits and logits_input supplied"):
       head.create_model_fn_ops(
           {}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
           logits_input=((0., 0.),), logits=self._logits)
Exemple #35
0
 def testMultiClassWithLogitsAndLogitsInput(self):
   n_classes = 3
   head = head_lib._multi_class_head(
       n_classes=n_classes, metric_class_ids=range(n_classes))
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(
         ValueError, "Both logits and logits_input supplied"):
       head.create_model_fn_ops(
           {}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
           logits_input=((0., 0.),), logits=self._logits)
Exemple #36
0
 def testBinaryClassification(self):
   head = head_lib._multi_class_head(n_classes=2)
   with tf.Graph().as_default(), tf.Session() as sess:
     logits = tf.constant([[1.], [1.]])
     targets = tf.constant([[1.], [0.]])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops({}, targets,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  None, logits=logits)
     self.assertAlmostEqual(.81326163, sess.run(model_fn_ops.loss))
Exemple #37
0
 def testBinaryClassification(self):
   head = head_lib._multi_class_head(n_classes=2)
   with tf.Graph().as_default(), tf.Session() as sess:
     logits = tf.constant([[1.], [1.]])
     labels = tf.constant([[1.], [0.]])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops({}, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=logits)
     self.assertAlmostEqual(.81326163, sess.run(model_fn_ops.loss))
Exemple #38
0
 def testErrorInSparseTensorLabels(self):
     head = head_lib._multi_class_head(n_classes=2)
     with tf.Graph().as_default():
         prediction = tf.constant([[1.0], [1.0], [3.0]])
         labels = tf.SparseTensor(
             indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
             values=tf.constant([0, 1, 1]),
             shape=[3, 1],
         )
         with self.assertRaisesRegexp(ValueError, "SparseTensor is not supported as labels."):
             head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction)
Exemple #39
0
 def testMultiClass(self):
   head = head_lib._multi_class_head(n_classes=3)
   with tf.Graph().as_default(), tf.Session() as sess:
     logits = tf.constant([[1., 0., 0.]])
     targets = tf.constant([2])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops({}, targets,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  None, logits=logits)
     self.assertAlmostEqual(1.5514446, sess.run(model_fn_ops.loss))
Exemple #40
0
 def testMultiClassWithWeight(self):
     head = head_lib._multi_class_head(n_classes=3, weight_column_name="label_weight")
     with tf.Graph().as_default(), tf.Session() as sess:
         features = {"label_weight": tf.constant([0.1])}
         logits = tf.constant([[1.0, 0.0, 0.0]])
         labels = tf.constant([2])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         model_fn_ops = head.head_ops(
             features, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=logits
         )
         self.assertAlmostEqual(0.15514446, sess.run(model_fn_ops.loss))
Exemple #41
0
 def testBinaryClassificationWithWeights(self):
     head = head_lib._multi_class_head(n_classes=2, weight_column_name="label_weight")
     with tf.Graph().as_default(), tf.Session() as sess:
         features = {"label_weight": tf.constant([[1.0], [0.0]])}
         logits = tf.constant([[1.0], [1.0]])
         labels = tf.constant([[1.0], [0.0]])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         model_fn_ops = head.head_ops(
             features, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=logits
         )
         self.assertAlmostEqual(0.31326166 / 2, sess.run(model_fn_ops.loss), delta=1e-6)
Exemple #42
0
 def testErrorInSparseTensorLabels(self):
   head = head_lib._multi_class_head(n_classes=2)
   with tf.Graph().as_default():
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.SparseTensor(
         indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
         values=tf.constant([0, 1, 1]),
         shape=[3, 1])
     with self.assertRaisesRegexp(
         ValueError, "SparseTensor is not supported as labels."):
       head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN,
                     _noop_train_op, logits=prediction)
Exemple #43
0
 def testErrorInSparseTensorTarget(self):
   head = head_lib._multi_class_head(n_classes=2)
   with tf.Graph().as_default():
     prediction = tf.constant([[1.], [1.], [3.]])
     targets = tf.SparseTensor(
         indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
         values=tf.constant([0, 1, 1]),
         shape=[3, 1])
     with self.assertRaisesRegexp(
         ValueError, "SparseTensor is not supported as a target"):
       head.head_ops({}, targets, tf.contrib.learn.ModeKeys.TRAIN, None,
                     logits=prediction)
  def testDNNModel(self):
    """Tests multi-class classification using matrix data as input."""
    cont_features = [
        tf.contrib.layers.real_valued_column('feature', dimension=4)]

    head = head_lib._multi_class_head(n_classes=3)
    classifier = DNNEstimator(head,
                              feature_columns=cont_features,
                              hidden_units=[3, 3])

    classifier.fit(input_fn=_iris_input_fn, steps=1000)
    classifier.evaluate(input_fn=_iris_input_fn, steps=100)
Exemple #45
0
 def testBinaryClassificationInferMode(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, model_fn.ModeKeys.INFER, self._labels, _noop_train_op,
         logits=self._logits)
     self._assert_output_alternatives(model_fn_ops)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
Exemple #46
0
 def testBinaryClassificationInferMode(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, model_fn.ModeKeys.INFER, self._labels, _noop_train_op,
         logits=self._logits)
     self._assert_output_alternatives(model_fn_ops)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
Exemple #47
0
 def testBinaryClassificationWithWeights(self):
   head = head_lib._multi_class_head(
       n_classes=2, weight_column_name="label_weight")
   with tf.Graph().as_default(), tf.Session() as sess:
     features = {"label_weight": tf.constant([[1.], [0.]])}
     logits = tf.constant([[1.], [1.]])
     labels = tf.constant([[1.], [0.]])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops(features, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=logits)
     self.assertAlmostEqual(.31326166 / 2, sess.run(model_fn_ops.loss))
Exemple #48
0
 def testMultiClassWithWeight(self):
   head = head_lib._multi_class_head(
       n_classes=3, weight_column_name="label_weight")
   with tf.Graph().as_default(), tf.Session() as sess:
     features = {"label_weight": tf.constant([0.1])}
     logits = tf.constant([[1., 0., 0.]])
     targets = tf.constant([2])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops(features, targets,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  None, logits=logits)
     self.assertAlmostEqual(.15514446, sess.run(model_fn_ops.loss))
Exemple #49
0
 def testBinaryClassificationInferMode(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, self._labels, model_fn.ModeKeys.INFER, _noop_train_op,
         logits=self._logits)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
     self.assertEquals(1, len(model_fn_ops.output_alternatives))
     self.assertEquals(constants.ProblemType.LOGISTIC_REGRESSION,
                       model_fn_ops.output_alternatives[None][0])
Exemple #50
0
 def testBinaryClassificationWithLogits(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, self._labels, model_fn.ModeKeys.TRAIN, _noop_train_op,
         logits=self._logits)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     expected_loss = .81326175
     _assert_metrics(self, expected_loss,
                     self._expected_eval_metrics(expected_loss), model_fn_ops)
Exemple #51
0
 def testBinaryClassificationInferMode_withWightColumn(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes,
                                     weight_column_name="label_weight")
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         # This is what is being tested, features should not have weight for
         # inference.
         {}, model_fn.ModeKeys.INFER, self._labels, _noop_train_op,
         logits=self._logits)
     self._assert_output_alternatives(model_fn_ops)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
Exemple #52
0
 def testBinaryClassificationInferMode_withWightColumn(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes,
                                     weight_column_name="label_weight")
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         # This is what is being tested, features should not have weight for
         # inference.
         {}, model_fn.ModeKeys.INFER, self._labels, _noop_train_op,
         logits=self._logits)
     self._assert_output_alternatives(model_fn_ops)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
Exemple #53
0
 def testBinaryClassificationWithLabelName(self):
   label_name = "my_label"
   head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
   with ops.Graph().as_default(), session.Session():
     logits = constant_op.constant(self._logits)
     labels = {label_name: constant_op.constant(self._labels)}
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops(
         {}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     expected_loss = .81326175
     _assert_metrics(self, expected_loss,
                     self._expected_eval_metrics(expected_loss), model_fn_ops)
Exemple #54
0
 def testMultiClassEvalMode(self):
   n_classes = 3
   head = head_lib._multi_class_head(
       n_classes=n_classes, metric_class_ids=range(n_classes))
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, self._labels, model_fn.ModeKeys.EVAL, _noop_train_op,
         logits=self._logits)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     expected_loss = 1.5514446
     _assert_metrics(self, expected_loss,
                     self._expected_eval_metrics(expected_loss), model_fn_ops)
Exemple #55
0
 def testMultiClassWithLogits(self):
   n_classes = 3
   head = head_lib._multi_class_head(
       n_classes=n_classes, metric_class_ids=range(n_classes))
   with ops.Graph().as_default(), session.Session():
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.create_model_fn_ops(
         {}, model_fn.ModeKeys.TRAIN, self._labels, _noop_train_op,
         logits=self._logits)
     self._assert_output_alternatives(model_fn_ops)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     expected_loss = 1.5514446
     _assert_metrics(self, expected_loss,
                     self._expected_eval_metrics(expected_loss), model_fn_ops)
Exemple #56
0
 def testBinaryClassificationWithLabelName(self):
   label_name = "my_label"
   head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
   with tf.Graph().as_default(), tf.Session():
     logits = tf.constant(self._logits)
     labels = {label_name: tf.constant(self._labels)}
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     model_fn_ops = head.head_ops({}, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=logits)
     _assert_no_variables(self)
     expected_loss = .81326175
     _assert_metrics(
         self, expected_loss, self._expected_eval_metrics(expected_loss),
         model_fn_ops)
Exemple #57
0
 def testErrorInSparseTensorLabels(self):
   n_classes = 2
   head = head_lib._multi_class_head(n_classes=n_classes)
   with ops.Graph().as_default():
     labels = sparse_tensor.SparseTensorValue(
         indices=((0, 0), (1, 0), (2, 0)),
         values=(0, 1, 1),
         dense_shape=(3, 1))
     with self.assertRaisesRegexp(ValueError,
                                  "SparseTensor is not supported as labels."):
       head.create_model_fn_ops(
           {},
           labels,
           model_fn.ModeKeys.TRAIN,
           _noop_train_op,
           logits=((1.,), (1.,), (3.,)))