def test_iteration_metrics(self, use_tpu, mode): best_candidate_index = 3 candidates = [] for i in range(10): def metric_fn(val=i): return {"ensemble_metric": tf.metrics.mean(tf.constant(val))} spec = _EnsembleSpec(name="ensemble_{}".format(i), ensemble=None, architecture=None, subnetwork_builders=None, predictions=None, eval_metrics=(metric_fn, {})) candidate = _Candidate(ensemble_spec=spec, adanet_loss=tf.constant(i), is_training=tf.constant(False)) candidates.append(candidate) metrics = _IterationMetrics(candidates, subnetwork_specs=[]) with self.test_session() as sess: metrics_fn = (metrics.best_eval_metrics_tuple if use_tpu else metrics.best_eval_metric_ops) actual = _run_metrics( sess, metrics_fn(tf.constant(best_candidate_index), mode) or {}) if mode == tf.estimator.ModeKeys.EVAL: expected = {"ensemble_metric": best_candidate_index} else: expected = {} self.assertEqual(actual, expected)
def _dummy_candidate(): """Returns a dummy `_Candidate` instance.""" return _Candidate( ensemble_spec=tu.dummy_ensemble_spec("foo"), adanet_loss=1., variables=[tf.Variable(1.)])
def _dummy_candidate(): """Returns a dummy `_Candidate` instance.""" return _Candidate( ensemble_spec=tu.dummy_ensemble_spec("foo"), adanet_loss=1., is_training=True)
def build_candidate(self, ensemble_spec, training, summary, rebuilding): del training # Unused del summary # Unused del rebuilding # Unused return _Candidate(ensemble_spec=ensemble_spec, adanet_loss=ensemble_spec.adanet_loss, variables=[tf.Variable(1.)])
def build_candidate(self, ensemble_spec, training, summary, previous_ensemble_spec=None): del training # Unused del summary # Unused del previous_ensemble_spec # Unused return _Candidate(ensemble_spec=ensemble_spec, adanet_loss=ensemble_spec.adanet_loss)
def create_iteration_metrics(subnetwork_metrics=None, ensemble_metrics=None, use_tpu=False, iteration_number=1): """Creates an instance of the _IterationMetrics class. Args: subnetwork_metrics: List of _SubnetworkMetrics objects. ensemble_metrics: List of _EnsembleMetrics objects. use_tpu: Whether to use TPU-specific variable sharing logic. iteration_number: What number iteration these metrics are for. Returns: An instance of _IterationMetrics that has been populated with the input metrics. """ subnetwork_metrics = subnetwork_metrics or [] ensemble_metrics = ensemble_metrics or [] candidates = [] for i, metric in enumerate(ensemble_metrics): spec = _EnsembleSpec(name="ensemble_{}".format(i), ensemble=None, architecture=None, subnetwork_builders=None, predictions=None, step=None, variables=None, eval_metrics=metric) candidate = _Candidate(ensemble_spec=spec, adanet_loss=tf.constant(i), variables=None) candidates.append(candidate) subnetwork_specs = [] for i, metric in enumerate(subnetwork_metrics): spec = _SubnetworkSpec(name="subnetwork_{}".format(i), subnetwork=None, builder=None, predictions=None, step=None, loss=None, train_op=None, asset_dir=None, eval_metrics=metric, variables=None) subnetwork_specs.append(spec) return _IterationMetrics(iteration_number, candidates, subnetwork_specs=subnetwork_specs, use_tpu=use_tpu)
def build_candidate(self, ensemble_spec, training, iteration_step, summary, previous_ensemble_spec=None, is_previous_best=False): del training # Unused del iteration_step # Unused del summary # Unused del previous_ensemble_spec # Unused return _Candidate(ensemble_spec=ensemble_spec, adanet_loss=ensemble_spec.adanet_loss, is_training="training" in ensemble_spec.name, is_previous_best=is_previous_best)
def test_iteration_metrics(self, use_tpu, mode): with context.graph_mode(): self.setup_graph() best_candidate_index = 3 candidates = [] for i in range(10): def metric_fn(val=i): metric = tf.keras.metrics.Mean() metric.update_state(tf.constant(val)) return { "ensemble_v1_metric": tf_compat.v1.metrics.mean(tf.constant(val)), "ensemble_keras_metric": metric } spec = _EnsembleSpec(name="ensemble_{}".format(i), ensemble=None, architecture=None, subnetwork_builders=None, predictions=None, step=None, eval_metrics=(metric_fn, {})) candidate = _Candidate(ensemble_spec=spec, adanet_loss=tf.constant(i)) candidates.append(candidate) metrics = _IterationMetrics(1, candidates, subnetwork_specs=[]) metrics_fn = (metrics.best_eval_metrics_tuple if use_tpu else metrics.best_eval_metric_ops) actual = self._run_metrics( metrics_fn(tf.constant(best_candidate_index), mode) or {}) if mode == tf.estimator.ModeKeys.EVAL: expected = { "ensemble_v1_metric": best_candidate_index, "ensemble_keras_metric": best_candidate_index, "iteration": 1 } else: expected = {} self.assertEqual(actual, expected)
def test_new_errors(self, ensemble_spec, adanet_loss): with self.test_session(): with self.assertRaises(ValueError): _Candidate(ensemble_spec, adanet_loss)
def test_new(self, ensemble_spec, adanet_loss): with self.test_session(): got = _Candidate(ensemble_spec, adanet_loss) self.assertEqual(got.ensemble_spec, ensemble_spec) self.assertEqual(got.adanet_loss, adanet_loss)