def testInvalidHeads(self): named_head = head_lib._multi_class_head( n_classes=3, label_name="label", head_name="head1") unnamed_head = head_lib._multi_class_head( n_classes=4, label_name="label") with self.assertRaisesRegexp(ValueError, "must have names"): head_lib._multi_head((named_head, unnamed_head)) with self.assertRaisesRegexp(ValueError, "must be SingleHead"): head_lib._multi_head((named_head, head_lib._multi_head((named_head,))))
def testInvalidHeads(self): named_head = head_lib._multi_class_head( n_classes=3, label_name="label", head_name="head1") unnamed_head = head_lib._multi_class_head( n_classes=4, label_name="label") with self.assertRaisesRegexp(ValueError, "must have names"): head_lib._multi_head((named_head, unnamed_head)) with self.assertRaisesRegexp(ValueError, "must be SingleHead"): head_lib._multi_head((named_head, head_lib._multi_head((named_head,))))
def testEval(self): head1 = head_lib._multi_class_head(n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head(n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head([head1, head2], [1, .5]) logits = tf.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]]) labels = { "label1": tf.constant([1]), "label2": tf.constant([1]) } features = {"weights": tf.constant([2.0, 10.0])} model_fn_ops = head.head_ops(features, labels, tf.contrib.learn.ModeKeys.EVAL, _noop_train_op, logits=logits) self.assertTrue(model_fn_ops.predictions) self.assertTrue(model_fn_ops.loss is not None) self.assertEquals(None, model_fn_ops.train_op) self.assertTrue(model_fn_ops.eval_metric_ops) self.assertEquals(None, model_fn_ops.signature_fn) self.assertEquals(None, model_fn_ops.output_alternatives) metric_ops = model_fn_ops.eval_metric_ops # Tests eval keys self.assertTrue("accuracy/head1" in metric_ops.keys()) self.assertTrue("accuracy/head2" in metric_ops.keys())
def testTrain_withNoHeadWeights(self): head1 = head_lib._multi_class_head(n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head(n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head([head1, head2]) logits = tf.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]]) labels = { "label1": tf.constant([1]), "label2": tf.constant([1]) } features = {"weights": tf.constant([2.0, 10.0])} model_fn_ops = head.head_ops(features, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=logits) self.assertEquals(None, model_fn_ops.predictions) self.assertTrue(model_fn_ops.loss is not None) self.assertTrue(model_fn_ops.train_op is not None) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertEquals(None, model_fn_ops.signature_fn) self.assertEquals(None, model_fn_ops.output_alternatives) with tf.Session() as sess: self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testEval(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head((head1, head2), (1, .5)) labels = { "label1": (1,), "label2": (1,) } model_fn_ops = head.create_model_fn_ops( features={"weights": (2.0, 10.0)}, labels=labels, mode=model_fn.ModeKeys.EVAL, train_op_fn=_noop_train_op, logits=((-0.7, 0.2, .1, .1, .1, .1, .1),)) self.assertIsNotNone(model_fn_ops.predictions) self.assertIsNotNone(model_fn_ops.loss) self.assertIsNone(model_fn_ops.train_op) self.assertIsNotNone(model_fn_ops.eval_metric_ops) self.assertIsNone(model_fn_ops.output_alternatives) metric_ops = model_fn_ops.eval_metric_ops # Tests eval keys. self.assertIn("accuracy/head1", metric_ops.keys()) self.assertIn("accuracy/head2", metric_ops.keys())
def testTrain_withNoHeadWeights(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head((head1, head2)) labels = { "label1": (1,), "label2": (1,) } model_fn_ops = head.create_model_fn_ops( features={"weights": (2.0, 10.0)}, labels=labels, mode=model_fn.ModeKeys.TRAIN, train_op_fn=_noop_train_op, logits=((-0.7, 0.2, .1, .1, .1, .1, .1),)) self.assertIsNone(model_fn_ops.predictions) self.assertIsNotNone(model_fn_ops.loss) self.assertIsNotNone(model_fn_ops.train_op) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertIsNone(model_fn_ops.output_alternatives) with session.Session() as sess: self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testEval(self): head1 = head_lib._multi_class_head(n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head(n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head([head1, head2], [1, .5]) logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]]) labels = { "label1": constant_op.constant([1]), "label2": constant_op.constant([1]) } features = {"weights": constant_op.constant([2.0, 10.0])} model_fn_ops = head.head_ops(features, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits) self.assertTrue(model_fn_ops.predictions) self.assertTrue(model_fn_ops.loss is not None) self.assertEquals(None, model_fn_ops.train_op) self.assertTrue(model_fn_ops.eval_metric_ops) self.assertEquals(None, model_fn_ops.signature_fn) self.assertEquals(None, model_fn_ops.output_alternatives) metric_ops = model_fn_ops.eval_metric_ops # Tests eval keys self.assertTrue("accuracy/head1" in metric_ops.keys()) self.assertTrue("accuracy/head2" in metric_ops.keys())
def testTrain_withHeadWeights(self): head1 = head_lib._multi_class_head(n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head(n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head([head1, head2], [1, .5]) logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]]) labels = { "label1": constant_op.constant([1]), "label2": constant_op.constant([1]) } features = {"weights": constant_op.constant([2.0, 10.0])} model_fn_ops = head.head_ops(features, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits) self.assertEquals(None, model_fn_ops.predictions) self.assertTrue(model_fn_ops.loss is not None) self.assertTrue(model_fn_ops.train_op is not None) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertEquals(None, model_fn_ops.signature_fn) self.assertEquals(None, model_fn_ops.output_alternatives) with session.Session() as sess: self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testTrain_withNoHeadWeights(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head((head1, head2)) labels = { "label1": (1,), "label2": (1,) } model_fn_ops = head.create_model_fn_ops( features={"weights": (2.0, 10.0)}, labels=labels, mode=model_fn.ModeKeys.TRAIN, train_op_fn=_noop_train_op, logits=((-0.7, 0.2, .1, .1, .1, .1, .1),)) self.assertIsNone(model_fn_ops.predictions) self.assertIsNotNone(model_fn_ops.loss) self.assertIsNotNone(model_fn_ops.train_op) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertIsNone(model_fn_ops.output_alternatives) with session.Session() as sess: self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testEval(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head((head1, head2), (1, .5)) labels = { "label1": (1,), "label2": (1,) } model_fn_ops = head.create_model_fn_ops( features={"weights": (2.0, 10.0)}, labels=labels, mode=model_fn.ModeKeys.EVAL, train_op_fn=_noop_train_op, logits=((-0.7, 0.2, .1, .1, .1, .1, .1),)) self.assertIsNotNone(model_fn_ops.predictions) self.assertIsNotNone(model_fn_ops.loss) self.assertIsNone(model_fn_ops.train_op) self.assertIsNotNone(model_fn_ops.eval_metric_ops) self.assertIsNone(model_fn_ops.output_alternatives) metric_ops = model_fn_ops.eval_metric_ops # Tests eval keys. self.assertIn("accuracy/head1", metric_ops.keys()) self.assertIn("accuracy/head2", metric_ops.keys())
def testInfer(self): head1 = head_lib._multi_class_head(n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head(n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head([head1, head2], [1, .5]) logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]]) labels = { "label1": constant_op.constant([1]), "label2": constant_op.constant([1]) } features = {"weights": constant_op.constant([2.0, 10.0])} model_fn_ops = head.head_ops(features, labels, model_fn.ModeKeys.INFER, _noop_train_op, logits=logits) self.assertTrue(model_fn_ops.predictions) self.assertEquals(None, model_fn_ops.loss) self.assertEquals(None, model_fn_ops.train_op) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertEquals(None, model_fn_ops.signature_fn) self.assertTrue(len(model_fn_ops.output_alternatives) == 2) # Tests predictions keys pred_keys = model_fn_ops.predictions.keys() self.assertTrue( ("head1", prediction_key.PredictionKey.PROBABILITIES) in pred_keys) self.assertTrue(("head1", prediction_key.PredictionKey.CLASSES) in pred_keys) self.assertTrue( ("head2", prediction_key.PredictionKey.PROBABILITIES) in pred_keys) self.assertTrue(("head2", prediction_key.PredictionKey.CLASSES) in pred_keys) # Tests output alternative out_alts = model_fn_ops.output_alternatives self.assertEquals(constants.ProblemType.CLASSIFICATION, out_alts["head1"][0]) self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in out_alts["head1"][1].keys()) self.assertTrue(prediction_key.PredictionKey.CLASSES in out_alts["head1"][1].keys()) self.assertEquals(constants.ProblemType.CLASSIFICATION, out_alts["head2"][0]) self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in out_alts["head2"][1].keys()) self.assertTrue(prediction_key.PredictionKey.CLASSES in out_alts["head2"][1].keys())
def testInfer(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head([head1, head2], [1, .5]) logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]]) labels = { "label1": constant_op.constant([1]), "label2": constant_op.constant([1]) } features = {"weights": constant_op.constant([2.0, 10.0])} model_fn_ops = head.head_ops( features, labels, model_fn.ModeKeys.INFER, _noop_train_op, logits=logits) self.assertTrue(model_fn_ops.predictions) self.assertEquals(None, model_fn_ops.loss) self.assertEquals(None, model_fn_ops.train_op) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertEquals(None, model_fn_ops.signature_fn) self.assertTrue(len(model_fn_ops.output_alternatives) == 2) # Tests predictions keys pred_keys = model_fn_ops.predictions.keys() self.assertTrue( ("head1", prediction_key.PredictionKey.PROBABILITIES) in pred_keys) self.assertTrue( ("head1", prediction_key.PredictionKey.CLASSES) in pred_keys) self.assertTrue( ("head2", prediction_key.PredictionKey.PROBABILITIES) in pred_keys) self.assertTrue( ("head2", prediction_key.PredictionKey.CLASSES) in pred_keys) # Tests output alternative out_alts = model_fn_ops.output_alternatives self.assertEquals(constants.ProblemType.CLASSIFICATION, out_alts["head1"][0]) self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in out_alts["head1"][1].keys()) self.assertTrue( prediction_key.PredictionKey.CLASSES in out_alts["head1"][1].keys()) self.assertEquals(constants.ProblemType.CLASSIFICATION, out_alts["head2"][0]) self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in out_alts["head2"][1].keys()) self.assertTrue( prediction_key.PredictionKey.CLASSES in out_alts["head2"][1].keys())
def testInfer(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head((head1, head2), (1, .5)) labels = { "label1": (1,), "label2": (1,) } model_fn_ops = head.create_model_fn_ops( features={"weights": (2.0, 10.0)}, labels=labels, mode=model_fn.ModeKeys.INFER, train_op_fn=_noop_train_op, logits=((-0.7, 0.2, .1, .1, .1, .1, .1),)) self.assertIsNotNone(model_fn_ops.predictions) self.assertIsNone(model_fn_ops.loss) self.assertIsNone(model_fn_ops.train_op) self.assertFalse(model_fn_ops.eval_metric_ops) # Tests predictions keys. self.assertItemsEqual(( ("head1", prediction_key.PredictionKey.LOGITS), ("head1", prediction_key.PredictionKey.PROBABILITIES), ("head1", prediction_key.PredictionKey.CLASSES), ("head2", prediction_key.PredictionKey.LOGITS), ("head2", prediction_key.PredictionKey.PROBABILITIES), ("head2", prediction_key.PredictionKey.CLASSES), ), model_fn_ops.predictions.keys()) # Tests output alternative. self.assertEquals({ "head1": constants.ProblemType.CLASSIFICATION, "head2": constants.ProblemType.CLASSIFICATION, }, { k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives) }) self.assertItemsEqual(( prediction_key.PredictionKey.LOGITS, prediction_key.PredictionKey.PROBABILITIES, prediction_key.PredictionKey.CLASSES, ), model_fn_ops.output_alternatives["head1"][1].keys()) self.assertItemsEqual(( prediction_key.PredictionKey.LOGITS, prediction_key.PredictionKey.PROBABILITIES, prediction_key.PredictionKey.CLASSES, ), model_fn_ops.output_alternatives["head2"][1].keys())
def testInfer(self): head1 = head_lib._multi_class_head( n_classes=3, label_name="label1", head_name="head1") head2 = head_lib._multi_class_head( n_classes=4, label_name="label2", head_name="head2") head = head_lib._multi_head((head1, head2), (1, .5)) labels = { "label1": (1,), "label2": (1,) } model_fn_ops = head.create_model_fn_ops( features={"weights": (2.0, 10.0)}, labels=labels, mode=model_fn.ModeKeys.INFER, train_op_fn=_noop_train_op, logits=((-0.7, 0.2, .1, .1, .1, .1, .1),)) self.assertIsNotNone(model_fn_ops.predictions) self.assertIsNone(model_fn_ops.loss) self.assertIsNone(model_fn_ops.train_op) self.assertFalse(model_fn_ops.eval_metric_ops) self.assertEquals(2, len(model_fn_ops.output_alternatives)) # Tests predictions keys. pred_keys = model_fn_ops.predictions.keys() self.assertIn( ("head1", prediction_key.PredictionKey.PROBABILITIES), pred_keys) self.assertIn( ("head1", prediction_key.PredictionKey.CLASSES), pred_keys) self.assertIn( ("head2", prediction_key.PredictionKey.PROBABILITIES), pred_keys) self.assertIn( ("head2", prediction_key.PredictionKey.CLASSES), pred_keys) # Tests output alternative. out_alts = model_fn_ops.output_alternatives self.assertEquals(constants.ProblemType.CLASSIFICATION, out_alts["head1"][0]) self.assertIn(prediction_key.PredictionKey.PROBABILITIES, out_alts["head1"][1].keys()) self.assertIn( prediction_key.PredictionKey.CLASSES, out_alts["head1"][1].keys()) self.assertEquals(constants.ProblemType.CLASSIFICATION, out_alts["head2"][0]) self.assertIn(prediction_key.PredictionKey.PROBABILITIES, out_alts["head2"][1].keys()) self.assertIn( prediction_key.PredictionKey.CLASSES, out_alts["head2"][1].keys())