Пример #1
0
 def testMultiClassWithInvalidNClass(self):
     try:
         target_column_lib.multi_class_target(n_classes=1)
         self.fail("Softmax with no n_classes did not raise error.")
     except ValueError:
         # Expected
         pass
Пример #2
0
 def testMultiClassWithInvalidNClass(self):
   try:
     target_column_lib.multi_class_target(n_classes=1)
     self.fail("Softmax with no n_classes did not raise error.")
   except ValueError:
     # Expected
     pass
Пример #3
0
 def _getModelFnOpsForMode(self, mode):
   """Helper for testGetRnnModelFn{Train,Eval,Infer}()."""
   num_units = [4]
   seq_columns = [
       feature_column.real_valued_column(
           'inputs', dimension=1)
   ]
   features = {
       'inputs': constant_op.constant([1., 2., 3.]),
   }
   labels = constant_op.constant([1., 0., 1.])
   model_fn = ssre._get_rnn_model_fn(
       cell_type='basic_rnn',
       target_column=target_column_lib.multi_class_target(n_classes=2),
       optimizer='SGD',
       num_unroll=2,
       num_units=num_units,
       num_threads=1,
       queue_capacity=10,
       batch_size=1,
       # Only CLASSIFICATION yields eval metrics to test for.
       problem_type=constants.ProblemType.CLASSIFICATION,
       sequence_feature_columns=seq_columns,
       context_feature_columns=None,
       learning_rate=0.1)
   model_fn_ops = model_fn(features=features, labels=labels, mode=mode)
   return model_fn_ops
 def _getModelFnOpsForMode(self, mode):
   """Helper for testGetRnnModelFn{Train,Eval,Infer}()."""
   num_units = [4]
   seq_columns = [
       feature_column.real_valued_column(
           'inputs', dimension=1)
   ]
   features = {
       'inputs': constant_op.constant([1., 2., 3.]),
   }
   labels = constant_op.constant([1., 0., 1.])
   model_fn = ssre._get_rnn_model_fn(
       cell_type='basic_rnn',
       target_column=target_column_lib.multi_class_target(n_classes=2),
       optimizer='SGD',
       num_unroll=2,
       num_units=num_units,
       num_threads=1,
       queue_capacity=10,
       batch_size=1,
       # Only CLASSIFICATION yields eval metrics to test for.
       problem_type=constants.ProblemType.CLASSIFICATION,
       sequence_feature_columns=seq_columns,
       context_feature_columns=None,
       learning_rate=0.1)
   model_fn_ops = model_fn(features=features, labels=labels, mode=mode)
   return model_fn_ops
Пример #5
0
 def testMultiClass(self):
     target_column = target_column_lib.multi_class_target(n_classes=3)
     with ops.Graph().as_default(), session.Session() as sess:
         logits = constant_op.constant([[1., 0., 0.]])
         labels = constant_op.constant([2])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         self.assertAlmostEqual(
             1.5514446, sess.run(target_column.loss(logits, labels, {})))
Пример #6
0
 def testMultiClass(self):
   target_column = target_column_lib.multi_class_target(n_classes=3)
   with ops.Graph().as_default(), session.Session() as sess:
     logits = constant_op.constant([[1., 0., 0.]])
     labels = constant_op.constant([2])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     self.assertAlmostEqual(1.5514446,
                            sess.run(target_column.loss(logits, labels, {})))
Пример #7
0
 def testBinaryClassification(self):
     target_column = target_column_lib.multi_class_target(n_classes=2)
     with ops.Graph().as_default(), session.Session() as sess:
         logits = constant_op.constant([[1.], [1.]])
         labels = constant_op.constant([[1.], [0.]])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         self.assertAlmostEqual(0.81326175,
                                sess.run(
                                    target_column.loss(logits, labels, {})),
                                delta=1e-6)
Пример #8
0
 def testBinaryClassification(self):
   target_column = target_column_lib.multi_class_target(n_classes=2)
   with ops.Graph().as_default(), session.Session() as sess:
     logits = constant_op.constant([[1.], [1.]])
     labels = constant_op.constant([[1.], [0.]])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     self.assertAlmostEqual(
         0.81326175,
         sess.run(target_column.loss(logits, labels, {})),
         delta=1e-6)
Пример #9
0
 def testMultiClassWithWeight(self):
   target_column = target_column_lib.multi_class_target(
       n_classes=3, weight_column_name="label_weight")
   with ops.Graph().as_default(), session.Session() as sess:
     features = {"label_weight": constant_op.constant([0.1])}
     logits = constant_op.constant([[1., 0., 0.]])
     labels = constant_op.constant([2])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     self.assertAlmostEqual(
         1.5514446, sess.run(target_column.loss(logits, labels, features)))
Пример #10
0
 def testMultiClassWithWeight(self):
     target_column = target_column_lib.multi_class_target(
         n_classes=3, weight_column_name="label_weight")
     with ops.Graph().as_default(), session.Session() as sess:
         features = {"label_weight": constant_op.constant([0.1])}
         logits = constant_op.constant([[1., 0., 0.]])
         labels = constant_op.constant([2])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         self.assertAlmostEqual(
             1.5514446,
             sess.run(target_column.loss(logits, labels, features)))
Пример #11
0
 def testBinaryEvalMetrics(self):
   target_column = target_column_lib.multi_class_target(n_classes=2)
   with ops.Graph().as_default(), session.Session() as sess:
     logits = constant_op.constant([[1.], [1.], [-1.]])
     labels = constant_op.constant([[1.], [0.], [1.]])
     eval_dict = target_column.get_eval_ops({}, logits, labels)
     # TODO(zakaria): test all metrics
     accuracy_op, update_op = eval_dict["accuracy/threshold_0.500000_mean"]
     sess.run(variables.global_variables_initializer())
     sess.run(variables.local_variables_initializer())
     sess.run(update_op)
     self.assertAlmostEqual(1.0 / 3, sess.run(accuracy_op))
Пример #12
0
 def testBinaryClassificationWithWeights(self):
   target_column = target_column_lib.multi_class_target(
       n_classes=2, weight_column_name="label_weight")
   with ops.Graph().as_default(), session.Session() as sess:
     features = {"label_weight": constant_op.constant([[1.], [0.]])}
     logits = constant_op.constant([[1.], [1.]])
     labels = constant_op.constant([[1.], [0.]])
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     self.assertAlmostEqual(
         .31326166,
         sess.run(target_column.loss(logits, labels, features)),
         delta=1e-6)
Пример #13
0
 def testMultiClassEvalMetrics(self):
   target_column = target_column_lib.multi_class_target(n_classes=3)
   with ops.Graph().as_default(), session.Session() as sess:
     logits = constant_op.constant([[1., 0., 0.]])
     labels = constant_op.constant([2])
     eval_dict = target_column.get_eval_ops({}, logits, labels)
     loss_op, update_op = eval_dict["loss"]
     sess.run(variables.global_variables_initializer())
     sess.run(variables.local_variables_initializer())
     sess.run(update_op)
     # logloss: z:label, x:logit
     # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
     self.assertAlmostEqual(1.5514446, sess.run(loss_op))
Пример #14
0
 def testMultiClassEvalMetrics(self):
     target_column = target_column_lib.multi_class_target(n_classes=3)
     with ops.Graph().as_default(), session.Session() as sess:
         logits = constant_op.constant([[1., 0., 0.]])
         labels = constant_op.constant([2])
         eval_dict = target_column.get_eval_ops({}, logits, labels)
         loss_op, update_op = eval_dict["loss"]
         sess.run(variables.global_variables_initializer())
         sess.run(variables.local_variables_initializer())
         sess.run(update_op)
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         self.assertAlmostEqual(1.5514446, sess.run(loss_op))
Пример #15
0
 def testBinaryEvalMetrics(self):
     target_column = target_column_lib.multi_class_target(n_classes=2)
     with ops.Graph().as_default(), session.Session() as sess:
         logits = constant_op.constant([[1.], [1.], [-1.]])
         labels = constant_op.constant([[1.], [0.], [1.]])
         eval_dict = target_column.get_eval_ops({}, logits, labels)
         # TODO(zakaria): test all metrics
         accuracy_op, update_op = eval_dict[
             "accuracy/threshold_0.500000_mean"]
         sess.run(variables.global_variables_initializer())
         sess.run(variables.local_variables_initializer())
         sess.run(update_op)
         self.assertAlmostEqual(1.0 / 3, sess.run(accuracy_op))
Пример #16
0
 def testBinaryClassificationWithWeights(self):
     target_column = target_column_lib.multi_class_target(
         n_classes=2, weight_column_name="label_weight")
     with ops.Graph().as_default(), session.Session() as sess:
         features = {"label_weight": constant_op.constant([[1.], [0.]])}
         logits = constant_op.constant([[1.], [1.]])
         labels = constant_op.constant([[1.], [0.]])
         # logloss: z:label, x:logit
         # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
         self.assertAlmostEqual(
             .31326166,
             sess.run(target_column.loss(logits, labels, features)),
             delta=1e-6)
 def _GetModelFnOpsForMode(self, mode):
   """Helper for testGetDynamicRnnModelFn{Train,Eval,Infer}()."""
   model_fn = dynamic_rnn_estimator._get_dynamic_rnn_model_fn(
       self.rnn_cell,
       target_column=target_column_lib.multi_class_target(n_classes=2),
       # Only CLASSIFICATION yields eval metrics to test for.
       problem_type=dynamic_rnn_estimator.ProblemType.CLASSIFICATION,
       prediction_type=dynamic_rnn_estimator.PredictionType.MULTIPLE_VALUE,
       optimizer='SGD',
       sequence_feature_columns=self.sequence_feature_columns,
       context_feature_columns=self.context_feature_columns,
       learning_rate=0.1)
   labels = self.GetClassificationTargetsOrNone(mode)
   model_fn_ops = model_fn(
       features=self.GetColumnsToTensors(), labels=labels, mode=mode)
   return model_fn_ops
 def _GetModelFnOpsForMode(self, mode):
   """Helper for testGetDynamicRnnModelFn{Train,Eval,Infer}()."""
   model_fn = dynamic_rnn_estimator._get_dynamic_rnn_model_fn(
       self.rnn_cell,
       target_column=target_column_lib.multi_class_target(n_classes=2),
       # Only CLASSIFICATION yields eval metrics to test for.
       problem_type=dynamic_rnn_estimator.ProblemType.CLASSIFICATION,
       prediction_type=dynamic_rnn_estimator.PredictionType.MULTIPLE_VALUE,
       optimizer='SGD',
       sequence_feature_columns=self.sequence_feature_columns,
       context_feature_columns=self.context_feature_columns,
       learning_rate=0.1)
   labels = self.GetClassificationTargetsOrNone(mode)
   model_fn_ops = model_fn(
       features=self.GetColumnsToTensors(), labels=labels, mode=mode)
   return model_fn_ops