Beispiel #1
0
    def _default_iteration_print(self, engine: Engine) -> None:
        """
        Execute iteration log operation based on Ignite `engine.state.output` data.
        Print the values from `self.output_transform(engine.state.output)`.
        Since `engine.state.output` is a decollated list and we replicated the loss value for every item
        of the decollated list, the default behavior is to print the loss from `output[0]`.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.

        """
        loss = self.output_transform(engine.state.output)
        if loss is None:
            return  # no printing if the output is empty

        out_str = ""
        if isinstance(loss, dict):  # print dictionary items
            for name in sorted(loss):
                value = loss[name]
                if not is_scalar(value):
                    warnings.warn(
                        "ignoring non-scalar output in StatsHandler,"
                        " make sure `output_transform(engine.state.output)` returns"
                        " a scalar or dictionary of key and scalar pairs to avoid this warning."
                        " {}:{}".format(name, type(value))
                    )
                    continue  # not printing multi dimensional output
                out_str += self.key_var_format.format(name, value.item() if isinstance(value, torch.Tensor) else value)
        elif is_scalar(loss):  # not printing multi dimensional output
            out_str += self.key_var_format.format(
                self.tag_name, loss.item() if isinstance(loss, torch.Tensor) else loss
            )
        else:
            warnings.warn(
                "ignoring non-scalar output in StatsHandler,"
                " make sure `output_transform(engine.state.output)` returns"
                " a scalar or a dictionary of key and scalar pairs to avoid this warning."
                " {}".format(type(loss))
            )

        if not out_str:
            return  # no value to print

        num_iterations = engine.state.epoch_length
        current_iteration = engine.state.iteration
        if num_iterations is not None:
            current_iteration = (current_iteration - 1) % num_iterations + 1
        current_epoch = engine.state.epoch
        num_epochs = engine.state.max_epochs

        base_str = f"Epoch: {current_epoch}/{num_epochs}, Iter: {current_iteration}/{num_iterations} --"

        self.logger.info(" ".join([base_str, out_str]))
    def _default_iteration_writer(self, engine: Engine,
                                  writer: SummaryWriter) -> None:
        """
        Execute iteration level event write operation based on Ignite `engine.state.output` data.
        Extract the values from `self.output_transform(engine.state.output)`.
        Since `engine.state.output` is a decollated list and we replicated the loss value for every item
        of the decollated list, the default behavior is to track the loss from `output[0]`.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.
            writer: TensorBoard  or TensorBoardX writer, passed or created in TensorBoardHandler.

        """
        loss = self.output_transform(engine.state.output)
        if loss is None:
            return  # do nothing if output is empty
        if isinstance(loss, dict):
            for name in sorted(loss):
                value = loss[name]
                if not is_scalar(value):
                    warnings.warn(
                        "ignoring non-scalar output in TensorBoardStatsHandler,"
                        " make sure `output_transform(engine.state.output)` returns"
                        " a scalar or dictionary of key and scalar pairs to avoid this warning."
                        " {}:{}".format(name, type(value)))
                    continue  # not plot multi dimensional output
                self._write_scalar(
                    _engine=engine,
                    writer=writer,
                    tag=name,
                    value=value.item()
                    if isinstance(value, torch.Tensor) else value,
                    step=engine.state.iteration,
                )
        elif is_scalar(loss):  # not printing multi dimensional output
            self._write_scalar(
                _engine=engine,
                writer=writer,
                tag=self.tag_name,
                value=loss.item() if isinstance(loss, torch.Tensor) else loss,
                step=engine.state.iteration,
            )
        else:
            warnings.warn(
                "ignoring non-scalar output in TensorBoardStatsHandler,"
                " make sure `output_transform(engine.state.output)` returns"
                " a scalar or a dictionary of key and scalar pairs to avoid this warning."
                " {}".format(type(loss)))
        writer.flush()
Beispiel #3
0
    def _default_epoch_print(self, engine: Engine) -> None:
        """
        Execute epoch level log operation.
        Default to print the values from Ignite `engine.state.metrics` dict and
        print the values of specified attributes of `engine.state`.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.

        """
        current_epoch = self.global_epoch_transform(engine.state.epoch)

        prints_dict = engine.state.metrics
        if prints_dict is not None and len(prints_dict) > 0:
            out_str = f"Epoch[{current_epoch}] Metrics -- "
            for name in sorted(prints_dict):
                value = prints_dict[name]
                out_str += self.key_var_format.format(name, value) if is_scalar(value) else f"{name}: {str(value)}"
            self.logger.info(out_str)

        if (
            hasattr(engine.state, "key_metric_name")
            and hasattr(engine.state, "best_metric")
            and hasattr(engine.state, "best_metric_epoch")
        ):
            out_str = f"Key metric: {engine.state.key_metric_name} "  # type: ignore
            out_str += f"best value: {engine.state.best_metric} "  # type: ignore
            out_str += f"at epoch: {engine.state.best_metric_epoch}"  # type: ignore
            self.logger.info(out_str)

        if self.state_attributes is not None and len(self.state_attributes) > 0:
            out_str = "State values: "
            for attr in self.state_attributes:
                out_str += f"{attr}: {getattr(engine.state, attr, None)} "
            self.logger.info(out_str)
    def _default_iteration_writer(
            self, engine: "ignite.engine.Engine",
            writer: "torch.utils.tensorboard.SummaryWriter") -> None:
        """
        Execute iteration level event write operation based on Ignite engine.state data.
        Default is to write the loss value of current iteration.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.
            writer: TensorBoard writer, created in TensorBoardHandler.

        """
        loss = self.output_transform(engine.state.output)
        if loss is None:
            return  # do nothing if output is empty
        if isinstance(loss, dict):
            for name in sorted(loss):
                value = loss[name]
                if not is_scalar(value):
                    warnings.warn(
                        "ignoring non-scalar output in TensorBoardStatsHandler,"
                        " make sure `output_transform(engine.state.output)` returns"
                        " a scalar or dictionary of key and scalar pairs to avoid this warning."
                        " {}:{}".format(name, type(value)))
                    continue  # not plot multi dimensional output
                writer.add_scalar(
                    name,
                    value.item() if torch.is_tensor(value) else value,
                    engine.state.iteration)
        elif is_scalar(loss):  # not printing multi dimensional output
            writer.add_scalar(self.tag_name,
                              loss.item() if torch.is_tensor(loss) else loss,
                              engine.state.iteration)
        else:
            warnings.warn(
                "ignoring non-scalar output in TensorBoardStatsHandler,"
                " make sure `output_transform(engine.state.output)` returns"
                " a scalar or a dictionary of key and scalar pairs to avoid this warning."
                " {}".format(type(loss)))
        writer.flush()
Beispiel #5
0
        def _compare_metrics(engine: Workflow) -> None:
            key_metric_name = engine.state.key_metric_name
            if key_metric_name is not None:
                current_val_metric = engine.state.metrics[key_metric_name]
                if not is_scalar(current_val_metric):
                    warnings.warn(
                        "key metric is not a scalar value, skip the metric comparison with the current best metric."
                        "please set other metrics as the key metric, or change the `reduction` mode to 'mean'."
                    )
                    return

                if self.metric_cmp_fn(current_val_metric, engine.state.best_metric):
                    self.logger.info(f"Got new best metric of {key_metric_name}: {current_val_metric}")
                    engine.state.best_metric = current_val_metric
                    engine.state.best_metric_epoch = engine.state.epoch
Beispiel #6
0
 def _score_func(engine: Engine):
     if isinstance(key_metric_name, str):
         metric_name = key_metric_name
     elif hasattr(engine.state, "key_metric_name"):
         metric_name = engine.state.key_metric_name  # type: ignore
     else:
         raise ValueError(
             f"Incompatible values: save_key_metric=True and key_metric_name={key_metric_name}."
         )
     metric = engine.state.metrics[metric_name]
     if not is_scalar(metric):
         warnings.warn(
             "key metric is not a scalar value, skip metric comaprison and don't save a model."
             "please use other metrics as key metric, or change the `reduction` mode to 'mean'."
         )
         return -1
     return (-1 if key_metric_negative_sign else 1) * metric
Beispiel #7
0
    def _default_epoch_writer(self, engine: Engine, writer) -> None:
        """
        Execute epoch level event write operation.
        Default to write the values from Ignite `engine.state.metrics` dict and
        write the values of specified attributes of `engine.state`.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.
            writer: TensorBoard or TensorBoardX writer, passed or created in TensorBoardHandler.

        """
        current_epoch = self.global_epoch_transform(engine.state.epoch)
        summary_dict = engine.state.metrics
        for name, value in summary_dict.items():
            if is_scalar(value):
                self._write_scalar(engine, writer, name, value, current_epoch)

        if self.state_attributes is not None:
            for attr in self.state_attributes:
                self._write_scalar(engine, writer, attr,
                                   getattr(engine.state, attr, None),
                                   current_epoch)
        writer.flush()