Ejemplo n.º 1
0
    def compute_validation_metrics(self) -> workload.Response:
        metrics = self.estimator.evaluate(input_fn=self.val_spec.input_fn,
                                          steps=self.val_spec.steps,
                                          hooks=self.val_spec.hooks)

        if self.hvd_config.use:
            metrics = self.average_metrics(metrics)
            if self.is_chief:
                logging.debug(f"Averaged validation metrics: {metrics}.")

        estimator._cleanup_after_validation_step(self.estimator._model_dir,
                                                 self.hvd_config.use,
                                                 self.is_chief)

        if not self.is_chief:
            return workload.Skipped()

        return {"validation_metrics": metrics}
Ejemplo n.º 2
0
    def compute_validation_metrics(self) -> workload.Response:
        metrics = self.estimator.evaluate(input_fn=self.eval_spec.input_fn,
                                          steps=self.eval_spec.steps,
                                          hooks=self.eval_spec.hooks)

        if self.hvd_config.use:
            metrics = self.average_metrics(metrics)
            if self.is_chief:
                logging.debug(f"Averaged validation metrics: {metrics}.")

        estimator._cleanup_after_validation_step(
            pathlib.Path(self.estimator._model_dir), self.is_chief)

        # Reset the per-evaluation set of allgather ops in the context.
        self.context.experimental._reset_allgather_ops()

        if not self.is_chief:
            return workload.Skipped()

        return {"validation_metrics": metrics}
Ejemplo n.º 3
0
    def compute_validation_metrics(self) -> workload.Response:
        steps = self.eval_spec.steps if not self.env.test_mode else 1
        metrics = self.estimator.evaluate(input_fn=self.eval_spec.input_fn,
                                          steps=steps,
                                          hooks=self.eval_spec.hooks)

        if self.context.distributed.size > 1:
            metrics = self.average_metrics(metrics)
            if self.is_chief:
                logging.debug(f"Averaged validation metrics: {metrics}.")

        estimator._cleanup_after_validation_step(
            pathlib.Path(self.estimator._model_dir), self.is_chief)

        # Reset the per-evaluation set of allgather ops in the context.
        self.context._reset_allgather_ops()

        if not self.is_chief:
            return {}

        return {"validation_metrics": metrics}