Exemplo n.º 1
0
class EvaluatorTest(parameterized.TestCase, tf.test.TestCase):
    @parameterized.named_parameters(
        {
            "testcase_name": "choose_index_0",
            "input_fn": tu.dummy_input_fn([[1., 2]], [[3.]]),
            "steps": 3,
            "adanet_losses": _fake_adanet_losses_0,
            "want_adanet_losses": [3, 6],
        }, {
            "testcase_name": "choose_index_1",
            "input_fn": tu.dummy_input_fn([[1., 2]], [[3.]]),
            "steps": 3,
            "adanet_losses": _fake_adanet_losses_1,
            "want_adanet_losses": [6, 3],
        }, {
            "testcase_name": "none_steps",
            "input_fn": tu.dataset_input_fn(),
            "steps": None,
            "adanet_losses": _fake_adanet_losses_1,
            "want_adanet_losses": [18, 9],
        }, {
            "testcase_name": "input_fn_out_of_range",
            "input_fn": tu.dataset_input_fn(),
            "steps": 3,
            "adanet_losses": _fake_adanet_losses_1,
            "want_adanet_losses": [18, 9],
        })
    def test_adanet_losses(self, input_fn, steps, adanet_losses,
                           want_adanet_losses):
        with self.test_session() as sess:
            evaluator = Evaluator(input_fn=input_fn, steps=steps)
            adanet_losses = evaluator.evaluate_adanet_losses(
                sess, adanet_losses(input_fn))
            self.assertEqual(want_adanet_losses, adanet_losses)
Exemplo n.º 2
0
    def test_tpu_estimator_simple_lifecycle(self, use_tpu):
        config = tf.contrib.tpu.RunConfig(master="", tf_random_seed=42)
        estimator = TPUEstimator(
            head=tu.head(),
            subnetwork_generator=SimpleGenerator(
                [_DNNBuilder("dnn", use_tpu=use_tpu)]),
            max_iteration_steps=200,
            mixture_weight_initializer=tf.zeros_initializer(),
            use_bias=True,
            model_dir=self.test_subdirectory,
            config=config,
            use_tpu=use_tpu,
            train_batch_size=64 if use_tpu else 0)
        max_steps = 300

        xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
        xor_labels = [[1.], [0.], [1.], [0.]]
        train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)

        # Train.
        estimator.train(input_fn=train_input_fn,
                        steps=None,
                        max_steps=max_steps,
                        hooks=None)

        # Evaluate.
        eval_results = estimator.evaluate(input_fn=train_input_fn,
                                          steps=10,
                                          hooks=None)

        # Predict.
        # TODO: skip predictions on TF versions 1.11 and 1.12 since
        # some TPU hooks seem to be failing on predict.
        predictions = []
        tf_version = LooseVersion(tf.VERSION)
        if (tf_version != LooseVersion("1.11.0")
                and tf_version != LooseVersion("1.12.0")):
            predictions = estimator.predict(
                input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))

        # Export SavedModel.
        def serving_input_fn():
            """Input fn for serving export, starting from serialized example."""
            serialized_example = tf.placeholder(dtype=tf.string,
                                                shape=(None),
                                                name="serialized_example")
            return tf.estimator.export.ServingInputReceiver(
                features={"x": tf.constant([[0., 0.]], name="serving_x")},
                receiver_tensors=serialized_example)

        export_saved_model_fn = getattr(estimator, "export_saved_model", None)
        if not callable(export_saved_model_fn):
            export_saved_model_fn = estimator.export_savedmodel
        export_saved_model_fn(export_dir_base=estimator.model_dir,
                              serving_input_receiver_fn=serving_input_fn)

        self.assertAlmostEqual(0.32416, eval_results["loss"], places=3)
        self.assertEqual(max_steps, eval_results["global_step"])
        for prediction in predictions:
            self.assertIsNotNone(prediction["predictions"])
Exemplo n.º 3
0
    def test_tpu_estimator_simple_lifecycle(self, use_tpu,
                                            subnetwork_generator, want_loss):
        config = tf_compat.v1.estimator.tpu.RunConfig(master="",
                                                      tf_random_seed=42)
        estimator = TPUEstimator(head=tf.contrib.estimator.regression_head(
            loss_reduction=tf_compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE),
                                 subnetwork_generator=subnetwork_generator,
                                 max_iteration_steps=10,
                                 model_dir=self.test_subdirectory,
                                 config=config,
                                 use_tpu=use_tpu,
                                 train_batch_size=64 if use_tpu else 0)
        max_steps = 30

        xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
        xor_labels = [[1.], [0.], [1.], [0.]]
        train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)

        # Train.
        estimator.train(input_fn=train_input_fn,
                        steps=None,
                        max_steps=max_steps,
                        hooks=None)

        # Evaluate.
        eval_results = estimator.evaluate(input_fn=train_input_fn,
                                          steps=1,
                                          hooks=None)

        # Predict.
        predictions = estimator.predict(
            input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))

        # Export SavedModel.
        def serving_input_fn():
            """Input fn for serving export, starting from serialized example."""
            serialized_example = tf.placeholder(dtype=tf.string,
                                                shape=(None),
                                                name="serialized_example")
            return tf.estimator.export.ServingInputReceiver(
                features={"x": tf.constant([[0., 0.]], name="serving_x")},
                receiver_tensors=serialized_example)

        export_saved_model_fn = getattr(estimator, "export_saved_model", None)
        if not callable(export_saved_model_fn):
            export_saved_model_fn = estimator.export_savedmodel
        export_saved_model_fn(export_dir_base=estimator.model_dir,
                              serving_input_receiver_fn=serving_input_fn)

        self.assertAlmostEqual(want_loss, eval_results["loss"], places=2)
        self.assertEqual(max_steps, eval_results["global_step"])
        self.assertEqual(2, eval_results["iteration"])
        for prediction in predictions:
            self.assertIsNotNone(prediction["predictions"])
Exemplo n.º 4
0
    def test_tpu_estimator_simple_lifecycle(self, use_tpu,
                                            subnetwork_generator, want_loss):
        config = tf.compat.v1.estimator.tpu.RunConfig(master="",
                                                      tf_random_seed=42)
        estimator = TPUEstimator(
            # TODO: Add test with estimator Head v2.
            head=make_regression_head(use_tpu),
            subnetwork_generator=subnetwork_generator,
            max_iteration_steps=10,
            model_dir=self.test_subdirectory,
            config=config,
            use_tpu=use_tpu,
            train_batch_size=64 if use_tpu else 0)
        max_steps = 30

        xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
        xor_labels = [[1.], [0.], [1.], [0.]]
        train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)

        # Train.
        estimator.train(input_fn=train_input_fn,
                        steps=None,
                        max_steps=max_steps,
                        hooks=None)

        # Evaluate.
        eval_results = estimator.evaluate(input_fn=train_input_fn,
                                          steps=1,
                                          hooks=None)

        # Predict.
        predictions = estimator.predict(input_fn=tu.dataset_input_fn(
            features=[0., 0.], return_dataset=True))
        # We need to iterate over all the predictions before moving on, otherwise
        # the TPU will not be shut down.
        for prediction in predictions:
            self.assertIsNotNone(prediction["predictions"])

        # Export SavedModel.
        def serving_input_fn():
            """Input fn for serving export, starting from serialized example."""
            serialized_example = tf.compat.v1.placeholder(
                dtype=tf.string, shape=(None), name="serialized_example")
            return tf.estimator.export.ServingInputReceiver(
                features={"x": tf.constant([[0., 0.]], name="serving_x")},
                receiver_tensors=serialized_example)

        estimator.export_saved_model(
            export_dir_base=estimator.model_dir,
            serving_input_receiver_fn=serving_input_fn)

        self.assertAlmostEqual(want_loss, eval_results["loss"], places=2)
        self.assertEqual(max_steps, eval_results["global_step"])
        self.assertEqual(2, eval_results["iteration"])
Exemplo n.º 5
0
  def test_tpu_estimator_summaries(self):
    config = tf.contrib.tpu.RunConfig(tf_random_seed=42)
    estimator = TPUEstimator(
        head=tu.head(),
        subnetwork_generator=SimpleGenerator([_DNNBuilder("dnn")]),
        max_iteration_steps=200,
        model_dir=self.test_subdirectory,
        config=config)
    train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])

    with fake_run_on_tpu():
      estimator.train(input_fn=train_input_fn, max_steps=3)
    estimator.evaluate(input_fn=train_input_fn, steps=3)

    self.assertFalse(
        _summaries_exist(self.test_subdirectory + "/candidate/t0_dnn"))
    self.assertTrue(
        _summaries_exist(self.test_subdirectory + "/candidate/t0_dnn/eval"))
Exemplo n.º 6
0
    def test_evaluate_invalid_metric(self):

        input_fn = tu.dummy_input_fn([[1., 2]], [[3.]])
        _, labels = input_fn()
        predictions = [labels * 2, labels * 3]
        metrics = []
        for preds in predictions:
            metrics.append({
                "mse": tf.metrics.mean_squared_error(labels, preds),
                "other_metric_1": (tf.constant(1), tf.constant(1)),
                "other_metric_2": (tf.constant(2), tf.constant(2))
            })

        with self.test_session() as sess:
            evaluator = Evaluator(input_fn=input_fn,
                                  metric_name="dne",
                                  steps=3)
            with self.assertRaises(KeyError):
                metrics = evaluator.evaluate(sess, ensemble_metrics=metrics)
Exemplo n.º 7
0
    def test_evaluate(self):
        with context.graph_mode():
            input_fn = tu.dummy_input_fn([[1., 2]], [[3.]])
            _, labels = input_fn()
            predictions = [labels * 2, labels * 3]
            metrics = []
            for preds in predictions:
                metrics.append({
                    "mse":
                    tf_compat.v1.metrics.mean_squared_error(labels, preds),
                    "other_metric_1": (tf.constant(1), tf.constant(1)),
                    "other_metric_2": (tf.constant(2), tf.constant(2))
                })

            with self.test_session() as sess:
                evaluator = Evaluator(input_fn=input_fn,
                                      metric_name="mse",
                                      steps=3)
                metrics = evaluator.evaluate(sess, ensemble_metrics=metrics)
                self.assertEqual([9, 36], metrics)
Exemplo n.º 8
0
    def test_tpu_estimator_summaries(self, use_tpu, want_loss,
                                     want_adanet_loss, want_eval_summary_loss,
                                     want_predictions):
        config = tf.contrib.tpu.RunConfig(tf_random_seed=42,
                                          save_summary_steps=100,
                                          log_step_count_steps=100)
        assert config.log_step_count_steps

        def metric_fn(predictions):
            return {
                "predictions":
                tf_compat.v1.metrics.mean(predictions["predictions"])
            }

        max_steps = 100
        estimator = TPUEstimator(head=tu.head(),
                                 subnetwork_generator=SimpleGenerator(
                                     [_DNNBuilder("dnn", use_tpu=use_tpu)]),
                                 max_iteration_steps=max_steps,
                                 model_dir=self.test_subdirectory,
                                 metric_fn=metric_fn,
                                 config=config,
                                 use_tpu=use_tpu,
                                 train_batch_size=64 if use_tpu else 0)
        xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
        xor_labels = [[1.], [0.], [1.], [0.]]
        train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)

        estimator.train(input_fn=train_input_fn, max_steps=max_steps)
        eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
        self.assertAlmostEqual(want_loss, eval_results["loss"], places=2)
        self.assertEqual(max_steps, eval_results["global_step"])
        self.assertEqual(0, eval_results["iteration"])

        subnetwork_subdir = os.path.join(self.test_subdirectory,
                                         "subnetwork/t0_dnn")

        ensemble_subdir = os.path.join(
            self.test_subdirectory,
            "ensemble/t0_dnn_grow_complexity_regularized")

        # TODO: Why is the adanet_loss written to 'loss'?
        self.assertAlmostEqual(want_adanet_loss,
                               _get_summary_value("loss",
                                                  self.test_subdirectory),
                               places=1)
        self.assertEqual(
            0.,
            _get_summary_value("iteration/adanet/iteration",
                               self.test_subdirectory))
        self.assertAlmostEqual(3.,
                               _get_summary_value("scalar", subnetwork_subdir),
                               places=3)
        self.assertEqual((3, 3, 1),
                         _get_summary_value("image/image/0",
                                            subnetwork_subdir))
        self.assertAlmostEqual(5.,
                               _get_summary_value("nested/scalar",
                                                  subnetwork_subdir),
                               places=3)
        self.assertAlmostEqual(
            want_adanet_loss,
            _get_summary_value("adanet_loss/adanet/adanet_weighted_ensemble",
                               ensemble_subdir),
            places=1)
        self.assertAlmostEqual(
            0.,
            _get_summary_value(
                "complexity_regularization/adanet/adanet_weighted_ensemble",
                ensemble_subdir),
            places=1)
        self.assertAlmostEqual(1.,
                               _get_summary_value(
                                   "mixture_weight_norms/adanet/"
                                   "adanet_weighted_ensemble/subnetwork_0",
                                   ensemble_subdir),
                               places=1)

        # Eval metric summaries are always written out during eval.
        subnetwork_eval_subdir = os.path.join(subnetwork_subdir, "eval")
        self.assertAlmostEqual(want_eval_summary_loss,
                               _get_summary_value("loss",
                                                  subnetwork_eval_subdir),
                               places=1)
        self.assertAlmostEqual(want_eval_summary_loss,
                               _get_summary_value("average_loss",
                                                  subnetwork_eval_subdir),
                               places=1)
        self.assertAlmostEqual(want_predictions,
                               _get_summary_value("predictions",
                                                  subnetwork_eval_subdir),
                               places=3)

        eval_subdir = os.path.join(self.test_subdirectory, "eval")
        ensemble_eval_subdir = os.path.join(ensemble_subdir, "eval")
        for subdir in [ensemble_eval_subdir, eval_subdir]:
            self.assertEqual([b"| dnn |"],
                             _get_summary_value(
                                 "architecture/adanet/ensembles/0", subdir))
            if subdir == eval_subdir:
                self.assertAlmostEqual(want_loss,
                                       _get_summary_value("loss", subdir),
                                       places=1)
            self.assertAlmostEqual(want_eval_summary_loss,
                                   _get_summary_value("average_loss", subdir),
                                   places=1)
Exemplo n.º 9
0
class ReportMaterializerTest(parameterized.TestCase, tf.test.TestCase):

  # pylint: disable=g-long-lambda
  @parameterized.named_parameters(
      {
          "testcase_name":
              "one_empty_subnetwork",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo":
                      subnetwork.Report(hparams={}, attributes={}, metrics={}),
              },
          "steps":
              3,
          "included_subnetwork_names": ["foo"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo",
                  hparams={},
                  attributes={},
                  metrics={},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "one_subnetwork",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo":
                      subnetwork.Report(
                          hparams={
                              "learning_rate": 1.e-5,
                              "optimizer": "sgd",
                              "num_layers": 0,
                              "use_side_inputs": True,
                          },
                          attributes={
                              "weight_norms": tf.constant(3.14),
                              "foo": tf.constant("bar"),
                              "parameters": tf.constant(7777),
                              "boo": tf.constant(True),
                          },
                          metrics={},
                      ),
              },
          "steps":
              3,
          "included_subnetwork_names": ["foo"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo",
                  hparams={
                      "learning_rate": 1.e-5,
                      "optimizer": "sgd",
                      "num_layers": 0,
                      "use_side_inputs": True,
                  },
                  attributes={
                      "weight_norms": 3.14,
                      "foo": "bar",
                      "parameters": 7777,
                      "boo": True,
                  },
                  metrics={},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "one_subnetwork_iteration_2",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo":
                      subnetwork.Report(
                          hparams={
                              "learning_rate": 1.e-5,
                              "optimizer": "sgd",
                              "num_layers": 0,
                              "use_side_inputs": True,
                          },
                          attributes={
                              "weight_norms": tf.constant(3.14),
                              "foo": tf.constant("bar"),
                              "parameters": tf.constant(7777),
                              "boo": tf.constant(True),
                          },
                          metrics={},
                      ),
              },
          "steps":
              3,
          "iteration_number":
              2,
          "included_subnetwork_names": ["foo"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=2,
                  name="foo",
                  hparams={
                      "learning_rate": 1.e-5,
                      "optimizer": "sgd",
                      "num_layers": 0,
                      "use_side_inputs": True,
                  },
                  attributes={
                      "weight_norms": 3.14,
                      "foo": "bar",
                      "parameters": 7777,
                      "boo": True,
                  },
                  metrics={},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "two_subnetworks",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo1":
                      subnetwork.Report(
                          hparams={
                              "learning_rate": 1.e-5,
                              "optimizer": "sgd",
                              "num_layers": 0,
                              "use_side_inputs": True,
                          },
                          attributes={
                              "weight_norms": tf.constant(3.14),
                              "foo": tf.constant("bar"),
                              "parameters": tf.constant(7777),
                              "boo": tf.constant(True),
                          },
                          metrics={},
                      ),
                  "foo2":
                      subnetwork.Report(
                          hparams={
                              "learning_rate": 1.e-6,
                              "optimizer": "sgd",
                              "num_layers": 1,
                              "use_side_inputs": True,
                          },
                          attributes={
                              "weight_norms": tf.constant(3.1445),
                              "foo": tf.constant("baz"),
                              "parameters": tf.constant(7788),
                              "boo": tf.constant(True),
                          },
                          metrics={},
                      ),
              },
          "steps":
              3,
          "included_subnetwork_names": ["foo2"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo1",
                  hparams={
                      "learning_rate": 1.e-5,
                      "optimizer": "sgd",
                      "num_layers": 0,
                      "use_side_inputs": True,
                  },
                  attributes={
                      "weight_norms": 3.14,
                      "foo": "bar",
                      "parameters": 7777,
                      "boo": True,
                  },
                  metrics={},
                  included_in_final_ensemble=False,
              ),
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo2",
                  hparams={
                      "learning_rate": 1.e-6,
                      "optimizer": "sgd",
                      "num_layers": 1,
                      "use_side_inputs": True,
                  },
                  attributes={
                      "weight_norms": 3.1445,
                      "foo": "baz",
                      "parameters": 7788,
                      "boo": True,
                  },
                  metrics={},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "two_subnetworks_zero_included",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo1":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={},
                      ),
                  "foo2":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={},
                      ),
              },
          "steps":
              3,
          "included_subnetwork_names": [],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo1",
                  hparams={},
                  attributes={},
                  metrics={},
                  included_in_final_ensemble=False,
              ),
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo2",
                  hparams={},
                  attributes={},
                  metrics={},
                  included_in_final_ensemble=False,
              ),
          ],
      }, {
          "testcase_name":
              "two_subnetworks_both_included",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo1":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={},
                      ),
                  "foo2":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={},
                      ),
              },
          "steps":
              3,
          "included_subnetwork_names": ["foo1", "foo2"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo1",
                  hparams={},
                  attributes={},
                  metrics={},
                  included_in_final_ensemble=True,
              ),
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo2",
                  hparams={},
                  attributes={},
                  metrics={},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "materialize_metrics",
          "input_fn":
              tu.dummy_input_fn([[1., 1.], [1., 1.], [1., 1.]],
                                [[1.], [2.], [3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={"moo": tf_compat.v1.metrics.mean(labels)},
                      ),
              },
          "steps":
              3,
          "included_subnetwork_names": ["foo"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo",
                  hparams={},
                  attributes={},
                  metrics={"moo": 2.},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "materialize_metrics_none_steps",
          "input_fn":
              tu.dataset_input_fn([[1., 1.], [1., 1.], [1., 1.]],
                                  [[1.], [2.], [3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={"moo": tf_compat.v1.metrics.mean(labels)},
                      ),
              },
          "steps":
              None,
          "included_subnetwork_names": ["foo"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo",
                  hparams={},
                  attributes={},
                  metrics={"moo": 2.},
                  included_in_final_ensemble=True,
              ),
          ],
      }, {
          "testcase_name":
              "materialize_metrics_non_tensor_op",
          "input_fn":
              tu.dummy_input_fn([[1., 2]], [[3.]]),
          "subnetwork_reports_fn":
              lambda features, labels: {
                  "foo":
                      subnetwork.Report(
                          hparams={},
                          attributes={},
                          metrics={"moo": (tf.constant(42), tf.no_op())},
                      ),
              },
          "steps":
              3,
          "included_subnetwork_names": ["foo"],
          "want_materialized_reports": [
              subnetwork.MaterializedReport(
                  iteration_number=0,
                  name="foo",
                  hparams={},
                  attributes={},
                  metrics={"moo": 42},
                  included_in_final_ensemble=True,
              ),
          ],
      })
  @test_util.run_in_graph_and_eager_modes
  def test_materialize_subnetwork_reports(self,
                                          input_fn,
                                          subnetwork_reports_fn,
                                          steps,
                                          iteration_number=0,
                                          included_subnetwork_names=None,
                                          want_materialized_reports=None):
    with context.graph_mode():
      tf.constant(0.)  # dummy op so that the session graph is never empty.
      features, labels = input_fn()
      subnetwork_reports = subnetwork_reports_fn(features, labels)
      with self.test_session() as sess:
        sess.run(tf_compat.v1.initializers.local_variables())
        report_materializer = ReportMaterializer(input_fn=input_fn, steps=steps)
        materialized_reports = (
            report_materializer.materialize_subnetwork_reports(
                sess, iteration_number, subnetwork_reports,
                included_subnetwork_names))
        self.assertEqual(
            len(want_materialized_reports), len(materialized_reports))
        materialized_reports_dict = {
            blrm.name: blrm for blrm in materialized_reports
        }
        for want_materialized_report in want_materialized_reports:
          materialized_report = (
              materialized_reports_dict[want_materialized_report.name])
          self.assertEqual(iteration_number,
                           materialized_report.iteration_number)
          self.assertEqual(
              set(want_materialized_report.hparams.keys()),
              set(materialized_report.hparams.keys()))
          for hparam_key, want_hparam in (
              want_materialized_report.hparams.items()):
            if isinstance(want_hparam, float):
              self.assertAllClose(want_hparam,
                                  materialized_report.hparams[hparam_key])
            else:
              self.assertEqual(want_hparam,
                               materialized_report.hparams[hparam_key])

          self.assertSetEqual(
              set(want_materialized_report.attributes.keys()),
              set(materialized_report.attributes.keys()))
          for attribute_key, want_attribute in (
              want_materialized_report.attributes.items()):
            if isinstance(want_attribute, float):
              self.assertAllClose(
                  want_attribute,
                  decode(materialized_report.attributes[attribute_key]))
            else:
              self.assertEqual(
                  want_attribute,
                  decode(materialized_report.attributes[attribute_key]))

          self.assertSetEqual(
              set(want_materialized_report.metrics.keys()),
              set(materialized_report.metrics.keys()))
          for metric_key, want_metric in (
              want_materialized_report.metrics.items()):
            if isinstance(want_metric, float):
              self.assertAllClose(
                  want_metric, decode(materialized_report.metrics[metric_key]))
            else:
              self.assertEqual(want_metric,
                               decode(materialized_report.metrics[metric_key]))
Exemplo n.º 10
0
    def test_tpu_estimator_summaries(self, use_tpu):
        config = tf.contrib.tpu.RunConfig(tf_random_seed=42,
                                          save_summary_steps=2,
                                          log_step_count_steps=1)
        assert config.log_step_count_steps
        estimator = TPUEstimator(head=tu.head(),
                                 subnetwork_generator=SimpleGenerator(
                                     [_DNNBuilder("dnn", use_tpu=use_tpu)]),
                                 max_iteration_steps=200,
                                 model_dir=self.test_subdirectory,
                                 config=config,
                                 use_tpu=use_tpu,
                                 train_batch_size=64 if use_tpu else 0)
        xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
        xor_labels = [[1.], [0.], [1.], [0.]]
        train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)

        estimator.train(input_fn=train_input_fn, max_steps=3)
        estimator.evaluate(input_fn=train_input_fn, steps=3)

        ensemble_loss = .5
        self.assertAlmostEqual(ensemble_loss,
                               _check_eventfile_for_keyword(
                                   "loss", self.test_subdirectory),
                               places=1)
        self.assertIsNotNone(
            _check_eventfile_for_keyword("global_step/sec",
                                         self.test_subdirectory))
        eval_subdir = os.path.join(self.test_subdirectory, "eval")
        self.assertAlmostEqual(ensemble_loss,
                               _check_eventfile_for_keyword(
                                   "loss", eval_subdir),
                               places=1)
        self.assertEqual(
            0.,
            _check_eventfile_for_keyword("iteration/adanet/iteration",
                                         self.test_subdirectory))

        candidate_subdir = os.path.join(self.test_subdirectory,
                                        "candidate/t0_dnn")
        self.assertAlmostEqual(3.,
                               _check_eventfile_for_keyword(
                                   "scalar", candidate_subdir),
                               places=3)
        self.assertEqual((3, 3, 1),
                         _check_eventfile_for_keyword("image/image/0",
                                                      candidate_subdir))
        self.assertAlmostEqual(5.,
                               _check_eventfile_for_keyword(
                                   "nested/scalar", candidate_subdir),
                               places=1)
        self.assertAlmostEqual(
            ensemble_loss,
            _check_eventfile_for_keyword(
                "adanet_loss/adanet/adanet_weighted_ensemble",
                candidate_subdir),
            places=1)
        self.assertAlmostEqual(
            0.,
            _check_eventfile_for_keyword(
                "complexity_regularization/adanet/adanet_weighted_ensemble",
                candidate_subdir),
            places=1)
        self.assertAlmostEqual(1.,
                               _check_eventfile_for_keyword(
                                   "mixture_weight_norms/adanet/"
                                   "adanet_weighted_ensemble/subnetwork_0",
                                   candidate_subdir),
                               places=1)
Exemplo n.º 11
0
    def test_tpu_estimator_summaries(self, use_tpu):
        config = tf.contrib.tpu.RunConfig(tf_random_seed=42,
                                          save_summary_steps=2,
                                          log_step_count_steps=1)
        assert config.log_step_count_steps
        estimator = TPUEstimator(head=tu.head(),
                                 subnetwork_generator=SimpleGenerator(
                                     [_DNNBuilder("dnn", use_tpu=use_tpu)]),
                                 max_iteration_steps=200,
                                 model_dir=self.test_subdirectory,
                                 config=config,
                                 use_tpu=use_tpu,
                                 train_batch_size=64 if use_tpu else 0)
        xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
        xor_labels = [[1.], [0.], [1.], [0.]]
        train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)

        estimator.train(input_fn=train_input_fn, max_steps=3)
        estimator.evaluate(input_fn=train_input_fn, steps=3)

        subnetwork_subdir = os.path.join(self.test_subdirectory,
                                         "subnetwork/t0_dnn")

        ensemble_loss = .5
        ensemble_subdir = os.path.join(
            self.test_subdirectory, "ensemble/t0_dnn_complexity_regularized")

        self.assertAlmostEqual(ensemble_loss,
                               _get_summary_value("loss",
                                                  self.test_subdirectory),
                               places=1)
        self.assertEqual(
            0.,
            _get_summary_value("iteration/adanet/iteration",
                               self.test_subdirectory))
        self.assertAlmostEqual(3.,
                               _get_summary_value("scalar", subnetwork_subdir),
                               places=3)
        self.assertEqual((3, 3, 1),
                         _get_summary_value("image/image/0",
                                            subnetwork_subdir))
        self.assertAlmostEqual(5.,
                               _get_summary_value("nested/scalar",
                                                  subnetwork_subdir),
                               places=3)
        self.assertAlmostEqual(
            ensemble_loss,
            _get_summary_value("adanet_loss/adanet/adanet_weighted_ensemble",
                               ensemble_subdir),
            places=1)
        self.assertAlmostEqual(
            0.,
            _get_summary_value(
                "complexity_regularization/adanet/adanet_weighted_ensemble",
                ensemble_subdir),
            places=1)
        self.assertAlmostEqual(1.,
                               _get_summary_value(
                                   "mixture_weight_norms/adanet/"
                                   "adanet_weighted_ensemble/subnetwork_0",
                                   ensemble_subdir),
                               places=1)

        # Eval metric summaries are always written out during eval.
        subnetwork_eval_subdir = os.path.join(subnetwork_subdir, "eval")
        if use_tpu:
            # TODO: Why is subnetwork eval loss 0.0 when use_tpu=False?
            self.assertAlmostEqual(ensemble_loss,
                                   _get_summary_value("loss",
                                                      subnetwork_eval_subdir),
                                   places=1)
        self.assertAlmostEqual(ensemble_loss,
                               _get_summary_value("average_loss",
                                                  subnetwork_eval_subdir),
                               places=1)

        eval_subdir = os.path.join(self.test_subdirectory, "eval")
        ensemble_eval_subdir = os.path.join(ensemble_subdir, "eval")
        for subdir in [ensemble_eval_subdir, eval_subdir]:
            self.assertEqual([b"| dnn |"],
                             _get_summary_value(
                                 "architecture/adanet/ensembles/0", subdir))
            if subdir == eval_subdir:
                self.assertAlmostEqual(ensemble_loss,
                                       _get_summary_value("loss", subdir),
                                       places=1)
            self.assertAlmostEqual(ensemble_loss,
                                   _get_summary_value("average_loss", subdir),
                                   places=1)
Exemplo n.º 12
0
class EvaluatorTest(parameterized.TestCase, tf.test.TestCase):
    @parameterized.named_parameters(
        {
            "testcase_name": "choose_index_0",
            "input_fn": tu.dummy_input_fn([[1., 2]], [[3.]]),
            "steps": 3,
            "adanet_losses": _fake_adanet_losses_0,
            "want_adanet_losses": [3, 6],
        }, {
            "testcase_name": "choose_index_1",
            "input_fn": tu.dummy_input_fn([[1., 2]], [[3.]]),
            "steps": 3,
            "adanet_losses": _fake_adanet_losses_1,
            "want_adanet_losses": [6, 3],
        }, {
            "testcase_name": "none_steps",
            "input_fn": tu.dataset_input_fn(),
            "steps": None,
            "adanet_losses": _fake_adanet_losses_1,
            "want_adanet_losses": [18, 9],
        }, {
            "testcase_name": "input_fn_out_of_range",
            "input_fn": tu.dataset_input_fn(),
            "steps": 3,
            "adanet_losses": _fake_adanet_losses_1,
            "want_adanet_losses": [18, 9],
        })
    def test_evaluate_no_metric_fn_falls_back_to_adanet_losses(
            self, input_fn, steps, adanet_losses, want_adanet_losses):
        adanet_losses = adanet_losses(input_fn)
        metrics = [{
            "adanet_loss": tf.metrics.mean(loss)
        } for loss in adanet_losses]
        with self.test_session() as sess:
            evaluator = Evaluator(input_fn=input_fn, steps=steps)
            adanet_losses = evaluator.evaluate(sess, ensemble_metrics=metrics)
            self.assertEqual(want_adanet_losses, adanet_losses)

    @parameterized.named_parameters(
        {
            "testcase_name": "minimize_returns_nanargmin",
            "objective": Evaluator.Objective.MAXIMIZE,
            "expected_objective_fn": np.nanargmax,
            "metric_fn": lambda x, y: None
        }, {
            "testcase_name": "maximize_returns_nanargmax",
            "objective": Evaluator.Objective.MINIMIZE,
            "expected_objective_fn": np.nanargmin,
            "metric_fn": lambda x, y: None
        })
    def test_objective(self, objective, expected_objective_fn, metric_fn=None):
        evaluator = Evaluator(input_fn=None, objective=objective)
        self.assertEqual(expected_objective_fn, evaluator.objective_fn)

    def test_objective_unsupported_objective(self):
        with self.assertRaises(ValueError):
            Evaluator(input_fn=None, objective="non_existent_objective")

    def test_evaluate(self):

        input_fn = tu.dummy_input_fn([[1., 2]], [[3.]])
        _, labels = input_fn()
        predictions = [labels * 2, labels * 3]
        metrics = []
        for preds in predictions:
            metrics.append({
                "mse": tf.metrics.mean_squared_error(labels, preds),
                "other_metric_1": (tf.constant(1), tf.constant(1)),
                "other_metric_2": (tf.constant(2), tf.constant(2))
            })

        with self.test_session() as sess:
            evaluator = Evaluator(input_fn=input_fn,
                                  metric_name="mse",
                                  steps=3)
            metrics = evaluator.evaluate(sess, ensemble_metrics=metrics)
            self.assertEqual([9, 36], metrics)

    def test_evaluate_invalid_metric(self):

        input_fn = tu.dummy_input_fn([[1., 2]], [[3.]])
        _, labels = input_fn()
        predictions = [labels * 2, labels * 3]
        metrics = []
        for preds in predictions:
            metrics.append({
                "mse": tf.metrics.mean_squared_error(labels, preds),
                "other_metric_1": (tf.constant(1), tf.constant(1)),
                "other_metric_2": (tf.constant(2), tf.constant(2))
            })

        with self.test_session() as sess:
            evaluator = Evaluator(input_fn=input_fn,
                                  metric_name="dne",
                                  steps=3)
            with self.assertRaises(KeyError):
                metrics = evaluator.evaluate(sess, ensemble_metrics=metrics)
Exemplo n.º 13
0
    def test_summaries(self):
        """Tests that summaries are written to candidate directory."""

        run_config = tf.estimator.RunConfig(tf_random_seed=42,
                                            log_step_count_steps=2,
                                            save_summary_steps=2,
                                            model_dir=self.test_subdirectory)
        subnetwork_generator = SimpleGenerator([_SimpleBuilder("dnn")])
        report_materializer = ReportMaterializer(input_fn=tu.dummy_input_fn(
            [[1., 1.]], [[0.]]),
                                                 steps=1)
        estimator = Estimator(head=regression_head.RegressionHead(
            loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
                              subnetwork_generator=subnetwork_generator,
                              report_materializer=report_materializer,
                              max_iteration_steps=10,
                              config=run_config)
        train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
        estimator.train(input_fn=train_input_fn, max_steps=3)

        ensemble_loss = 1.52950
        self.assertAlmostEqual(ensemble_loss,
                               tu.check_eventfile_for_keyword(
                                   "loss", self.test_subdirectory),
                               places=3)
        self.assertIsNotNone(
            tu.check_eventfile_for_keyword("global_step/sec",
                                           self.test_subdirectory))
        self.assertEqual(
            0.,
            tu.check_eventfile_for_keyword("iteration/adanet/iteration",
                                           self.test_subdirectory))

        subnetwork_subdir = os.path.join(self.test_subdirectory,
                                         "subnetwork/t0_dnn")
        self.assertAlmostEqual(3.,
                               tu.check_eventfile_for_keyword(
                                   "scalar", subnetwork_subdir),
                               places=3)
        self.assertEqual(
            (3, 3, 1),
            tu.check_eventfile_for_keyword("image", subnetwork_subdir))
        self.assertAlmostEqual(5.,
                               tu.check_eventfile_for_keyword(
                                   "nested/scalar", subnetwork_subdir),
                               places=3)

        ensemble_subdir = os.path.join(
            self.test_subdirectory,
            "ensemble/t0_dnn_grow_complexity_regularized")
        self.assertAlmostEqual(
            ensemble_loss,
            tu.check_eventfile_for_keyword(
                "adanet_loss/adanet/adanet_weighted_ensemble",
                ensemble_subdir),
            places=1)
        self.assertAlmostEqual(
            0.,
            tu.check_eventfile_for_keyword(
                "complexity_regularization/adanet/adanet_weighted_ensemble",
                ensemble_subdir),
            places=3)
        self.assertAlmostEqual(1.,
                               tu.check_eventfile_for_keyword(
                                   "mixture_weight_norms/adanet/"
                                   "adanet_weighted_ensemble/subnetwork_0",
                                   ensemble_subdir),
                               places=3)