Ejemplo n.º 1
0
 def test_hparams_wrong_parameter(self):
     with self.assertRaises(TypeError):
         summary.hparams([], {})
     with self.assertRaises(TypeError):
         summary.hparams({}, [])
     with self.assertRaises(ValueError):
         res = summary.hparams({'pytorch': [1, 2]}, {'accuracy': 2.0})
     # metric data is used in writer.py so the code path is different, which leads to different exception type.
     with self.assertRaises(NotImplementedError):
         with self.createSummaryWriter() as writer:
             writer.add_hparams({'pytorch': 1.0}, {'accuracy': [1, 2]})
Ejemplo n.º 2
0
def _log_hparams(experiment,
                 hparam_dict=None,
                 metric_dict=None,
                 name=None,
                 global_step=None):
    if type(hparam_dict) is not dict or type(metric_dict) is not dict:
        raise TypeError("hparam_dict and metric_dict should be dictionary.")

    # todo is it possible to use the default file_writer here?
    with SummaryWriter(log_dir=os.path.join(
            experiment.file_writer.get_logdir(), name)) as w_hp:
        if global_step == 0:
            exp, ssi, sei = hparams(hparam_dict, metric_dict)
            w_hp.file_writer.add_summary(exp)
            w_hp.file_writer.add_summary(ssi)
            w_hp.file_writer.add_summary(sei)

        if global_step > 0:
            for k, v in metric_dict.items():
                # this needs to be added to the same summarywriter object as the hparams
                # either log hparams in the other summary writer object
                # or log after each epoch values in same summarywriter object as hparams
                if isinstance(v, dict):
                    w_hp.add_scalars(k, v, global_step=global_step)
                    logger.warning(
                        "Logging multiple scalars with dict will not work for"
                        "hparams and metrics. Because add_scalars generates new"
                        " filewriters but everything that should be shown in hparams"
                        "needs be written with the same filewriter.")
                else:
                    w_hp.add_scalar(k, v, global_step=global_step)
Ejemplo n.º 3
0
    def add_hparams(self,
                    hparam_dict,
                    metric_dict,
                    hparam_domain_discrete=None,
                    run_name=None):

        torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError(
                'hparam_dict and metric_dict should be dictionary.')

        exp, ssi, sei = hparams(hparam_dict, metric_dict,
                                hparam_domain_discrete)

        if not run_name:
            logdir = self._get_file_writer().get_logdir()
        else:
            logdir = os.path.join(self._get_file_writer().get_logdir(),
                                  run_name)

        with SummaryWriter(log_dir=logdir) as w_hp:
            w_hp.file_writer.add_summary(exp)
            w_hp.file_writer.add_summary(ssi)
            w_hp.file_writer.add_summary(sei)
            for k, v in metric_dict.items():
                w_hp.add_scalar(k, v)
Ejemplo n.º 4
0
    def log_hyperparams(self,
                        params: Union[Dict[str, Any], Namespace],
                        metrics: Optional[Dict[str, Any]] = None) -> None:
        params = self._convert_params(params)

        # store params to output
        if OMEGACONF_AVAILABLE and isinstance(params, Container):
            self.hparams = OmegaConf.merge(self.hparams, params)
        else:
            self.hparams.update(params)

        # format params into the suitable for tensorboard
        params = self._flatten_dict(params)
        params = self._sanitize_params(params)

        if metrics is None:
            if self._default_hp_metric:
                metrics = {"hp_metric": -1}
        elif not isinstance(metrics, dict):
            metrics = {"hp_metric": metrics}

        if metrics:
            self.log_metrics(metrics, 0)
            exp, ssi, sei = hparams(params, metrics)
            writer = self.experiment._get_file_writer()
            writer.add_summary(exp)
            writer.add_summary(ssi)
            writer.add_summary(sei)
Ejemplo n.º 5
0
def add_hparams(self, hparam_dict, metric_dict, global_step=None):
    from torch.utils.tensorboard.summary import hparams
    """Add a set of hyperparameters to be compared in TensorBoard.
    Args:
        hparam_dict (dictionary): Each key-value pair in the dictionary is the
            name of the hyper parameter and it's corresponding value.
        metric_dict (dictionary): Each key-value pair in the dictionary is the
            name of the metric and it's corresponding value. Note that the key used
            here should be unique in the tensorboard record. Otherwise the value
            you added by `add_scalar` will be displayed in hparam plugin. In most
            cases, this is unwanted.

        p.s. The value in the dictionary can be `int`, `float`, `bool`, `str`, or
        0-dim tensor
    Examples::
        from torch.utils.tensorboard import SummaryWriter
        with SummaryWriter() as w:
            for i in range(5):
                w.add_hparams({'lr': 0.1*i, 'bsize': i},
                                {'hparam/accuracy': 10*i, 'hparam/loss': 10*i})
    Expected result:
    .. image:: _static/img/tensorboard/add_hparam.png
        :scale: 50 %
    """
    if type(hparam_dict) is not dict or type(metric_dict) is not dict:
        raise TypeError('hparam_dict and metric_dict should be dictionary.')
    exp, ssi, sei = hparams(hparam_dict, metric_dict)

    self.file_writer.add_summary(exp, global_step)
    self.file_writer.add_summary(ssi, global_step)
    self.file_writer.add_summary(sei, global_step)
    for k, v in metric_dict.items():
        self.add_scalar(k, v, global_step)
Ejemplo n.º 6
0
    def log_hyperparams(self,
                        params: Union[Dict[str, Any], Namespace],
                        metrics: Optional[Dict[str, Any]] = None) -> None:
        params = self._convert_params(params)
        wandb_experiment, tensorboard_experiment = self.experiment

        # store params to output
        if OMEGACONF_AVAILABLE and isinstance(params, Container):
            self.hparams = OmegaConf.merge(self.hparams, params)
        else:
            self.hparams.update(params)
        params = self._flatten_dict(params)
        params = self._sanitize_callable_params(params)
        if metrics is None:
            if self._default_hp_metric:
                metrics = {"hp_metric": -1}
        elif not isinstance(metrics, dict):
            metrics = {"hp_metric": metrics}

        # TensorBoard
        if metrics:
            metrics = self._add_prefix(metrics)
            for k, v in metrics.items():
                if isinstance(v, torch.Tensor):
                    v = v.item()
                tensorboard_experiment.add_scalar(k, v, 0)
            exp, ssi, sei = hparams(params, metrics)
            writer = tensorboard_experiment._get_file_writer()
            writer.add_summary(exp)
            writer.add_summary(ssi)
            writer.add_summary(sei)

        # Wandb
        wandb_experiment.config.update(params, allow_val_change=True)
Ejemplo n.º 7
0
    def log_hyperparams(self, params: Union[Dict[str, Any], Namespace],
                        metrics: Optional[Dict[str, Any]] = None) -> None:
        params = self._convert_params(params)

        # store params to output
        self.hparams.update(params)

        # format params into the suitable for tensorboard
        params = self._flatten_dict(params)
        params = self._sanitize_params(params)

        if parse_version(torch.__version__) < parse_version("1.3.0"):
            warn(
                f"Hyperparameter logging is not available for Torch version {torch.__version__}."
                " Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable"
                " hyperparameter logging."
            )
        else:
            from torch.utils.tensorboard.summary import hparams

            if metrics is None:
                metrics = {}
            exp, ssi, sei = hparams(params, metrics)
            writer = self.experiment._get_file_writer()
            writer.add_summary(exp)
            writer.add_summary(ssi)
            writer.add_summary(sei)

            if metrics:
                # necessary for hparam comparison with metrics
                self.log_metrics(metrics)
Ejemplo n.º 8
0
 def _add_hparams2(self, hparam_dict: dict, metric_dict: dict):
     exp, ssi, sei = hparams(hparam_dict, metric_dict)
     self.tf_summary_writer.file_writer.add_summary(exp)
     self.tf_summary_writer.file_writer.add_summary(ssi)
     self.tf_summary_writer.file_writer.add_summary(sei)
     for k, v in metric_dict.items():
         self.tf_summary_writer.add_scalar(k, v)
Ejemplo n.º 9
0
 def _add_hparams(self, hparam_dict: dict, metric_dict: dict):
     exp, ssi, sei = hparams(hparam_dict, metric_dict)
     with SummaryWriter(log_dir=self.log_dir) as w_hp:
         w_hp.file_writer.add_summary(exp)
         w_hp.file_writer.add_summary(ssi)
         w_hp.file_writer.add_summary(sei)
         for k, v in metric_dict.items():
             w_hp.add_scalar(k, v)
Ejemplo n.º 10
0
 def log_hyperparams_metrics(self, params: dict, metrics: dict) -> None:
     params = self._convert_params(params)
     exp, ssi, sei = hparams(params, metrics)
     writer = self.experiment._get_file_writer()
     writer.add_summary(exp)
     writer.add_summary(ssi)
     writer.add_summary(sei)
     # some alternative should be added
     self.tags.update(params)
Ejemplo n.º 11
0
 def add_hparams(self, hparam_dict: dict, metric_dict: dict, **kwargs):
     torch._C._log_api_usage_once('tensorboard.logging.add_hparams')
     if type(hparam_dict) is not dict or type(metric_dict) is not dict:
         raise TypeError('hparam_dict and metric_dict should be dictionary.')
     exp, ssi, sei = summary.hparams(hparam_dict, metric_dict)
     self.file_writer.add_summary(exp)
     self.file_writer.add_summary(ssi)
     self.file_writer.add_summary(sei)
     for k, v in metric_dict.items():
         self.add_scalar(k, v)
Ejemplo n.º 12
0
def log_hyperparams(writer, args):
    from torch.utils.tensorboard.summary import hparams
    vars_args = {
        k: v if isinstance(v, str) else repr(v)
        for k, v in vars(args).items()
    }
    exp, ssi, sei = hparams(vars_args, {})
    writer.file_writer.add_summary(exp)
    writer.file_writer.add_summary(ssi)
    writer.file_writer.add_summary(sei)
Ejemplo n.º 13
0
    def log_hyperparams_metrics(self, params: Union[Dict[str, Any],
                                                    argparse.Namespace],
                                metrics: Dict[str, Any]) -> None:
        params = self._flatten_dict(params)
        params = self._sanitize_params(params)

        exp, ssi, sei = hparams(params, metrics)
        writer = self.experiment._get_file_writer()
        writer.add_summary(exp)
        writer.add_summary(ssi)
        writer.add_summary(sei)

        self.hparams.update(params)
Ejemplo n.º 14
0
    def log_hyperparams_metrics(self, params: Dict[str, any], metrics: Dict[str, any] = None):
        params = self._convert_params(params)
        params = self._flatten_dict(params)
        sanitized_params = self._sanitize_params(params)
        if metrics is None:
            metrics = {}
        exp, ssi, sei = hparams(sanitized_params, metrics)
        writer = self.experiment._get_file_writer()
        writer.add_summary(exp)
        writer.add_summary(ssi)
        writer.add_summary(sei)

        # some alternative should be added
        self.tags.update(sanitized_params)
Ejemplo n.º 15
0
 def _write_hparams(self, hparam_dict, metric_dict, name, hparam_domain_discrete):
     # adapted from
     # https://github.com/lanpa/tensorboardX/blob/master/tensorboardX/writer.py#L336-L376
     exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete)
     w_hp = SummaryWriter(log_dir=os.path.join(self._file_writer.log_dir, name))
     w_hp.file_writer.add_summary(exp)
     w_hp.file_writer.add_summary(ssi)
     w_hp.file_writer.add_summary(sei)
     for k, values in metric_dict.items():
         global_step = 0
         for v in values:
             w_hp.add_scalar(k, v, global_step)
             global_step += 1
     w_hp.close()
Ejemplo n.º 16
0
    def add_hparams(self, hparam_dict, metric_dict, epoch):
        torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError("hparam_dict and metric_dict should be dictionary.")
        exp, ssi, sei = hparams(hparam_dict, metric_dict)

        logdir = self._get_file_writer().get_logdir()

        with SummaryWriter(log_dir=logdir) as w_hp:
            w_hp.file_writer.add_summary(exp)
            w_hp.file_writer.add_summary(ssi)
            w_hp.file_writer.add_summary(sei)
            for k, v in metric_dict.items():
                w_hp.add_scalar(k, v, epoch)
Ejemplo n.º 17
0
    def add_hparams(self, hparam_dict, metric_dict):
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError(
                'hparam_dict and metric_dict should be dictionary.')
        exp, ssi, sei = hparams(hparam_dict, metric_dict)

        logdir = self._get_file_writer().get_logdir()

        with SummaryWriter(log_dir=logdir) as w_hp:
            w_hp.file_writer.add_summary(exp)
            w_hp.file_writer.add_summary(ssi)
            w_hp.file_writer.add_summary(sei)
            for k, v in metric_dict.items():
                w_hp.add_scalar(k, v)
Ejemplo n.º 18
0
def log_hparams(params):
    writer = logger.get_tf_summary_writer()
    params = flatten_dict(params)
    filtered_params = dict()
    for key, value in params.items():
        if type(value) in [int, float, str, bool]:  # , torch.Tensor]:
            filtered_params[key] = value
    hparam_dict = filtered_params
    metric_dict = {'Return/Average': float('nan')}
    exp, ssi, sei = hparams(hparam_dict, metric_dict)
    writer.file_writer.add_summary(exp)
    writer.file_writer.add_summary(ssi)
    writer.file_writer.add_summary(sei)
    for k, v in metric_dict.items():
        writer.add_scalar(k, v)
Ejemplo n.º 19
0
    def add_hparams(
        self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None
    ):
        """Add a set of hyperparameters to be compared in TensorBoard.

        Args:
            hparam_dict (dict): Each key-value pair in the dictionary is the
              name of the hyper parameter and it's corresponding value.
              The type of the value can be one of `bool`, `string`, `float`,
              `int`, or `None`.
            metric_dict (dict): Each key-value pair in the dictionary is the
              name of the metric and it's corresponding value. Note that the key used
              here should be unique in the tensorboard record. Otherwise the value
              you added by ``add_scalar`` will be displayed in hparam plugin. In most
              cases, this is unwanted.
            hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that
              contains names of the hyperparameters and all discrete values they can hold
            run_name (str): Name of the run, to be included as part of the logdir.
              If unspecified, will use current timestamp.

        Examples::

            from torch.utils.tensorboard import SummaryWriter
            with SummaryWriter() as w:
                for i in range(5):
                    w.add_hparams({'lr': 0.1*i, 'bsize': i},
                                  {'hparam/accuracy': 10*i, 'hparam/loss': 10*i})

        Expected result:

        .. image:: _static/img/tensorboard/add_hparam.png
           :scale: 50 %

        """

        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError('hparam_dict and metric_dict should be dictionary.')
        exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete)

        if not run_name:
            run_name = str(time.time())
        logdir = os.path.join(self._get_file_writer().get_logdir(), run_name)
        with SummaryWriter(log_dir=logdir) as w_hp:
            w_hp.file_writer.add_summary(exp)
            w_hp.file_writer.add_summary(ssi)
            w_hp.file_writer.add_summary(sei)
            for k, v in metric_dict.items():
                w_hp.add_scalar(k, v)
Ejemplo n.º 20
0
    def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
        """
        Log hyperparameters in form of a Dict or Namespace object to tensorboard

        :param params: Dict or Namespace object. Contains training parameters
        """
        params = self._convert_params(params)
        params = self._flatten_dict(params)
        sanitized_params = self._sanitize_params(params)

        from torch.utils.tensorboard.summary import hparams
        exp, ssi, sei = hparams(sanitized_params, {})
        writer = self._get_file_writer()
        writer.add_summary(exp)
        writer.add_summary(ssi)
        writer.add_summary(sei)
Ejemplo n.º 21
0
    def add_hparams(self, hparam_dict, metric_dict):
        """Alteration to the offical SummaryWriter from PyTorch, which creates
        a new tensorboard event file with the hyperparameters and adds additional
        scalars to the scalar-tab with the registered metric value.
        
        This is unfortunate behavior, and the below merely adds the hyperparameters
        to the existing eventfile.
        """
        torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError('hparam_dict and metric_dict should be dictionary.')
        exp, ssi, sei = hparams(hparam_dict, metric_dict)

        self._get_file_writer().add_summary(exp)
        self._get_file_writer().add_summary(ssi)
        self._get_file_writer().add_summary(sei)
Ejemplo n.º 22
0
    def test_hparams_smoke(self):
        hp = {'lr': 0.1, 'bsize': 4}
        mt = {'accuracy': 0.1, 'loss': 10}
        summary.hparams(hp, mt)  # only smoke test. Because protobuf in python2/3 serialize dictionary differently.

        hp = {'use_magic': True, 'init_string': "42"}
        mt = {'accuracy': 0.1, 'loss': 10}
        summary.hparams(hp, mt)

        mt = {'accuracy': torch.zeros(1), 'loss': torch.zeros(1)}
        summary.hparams(hp, mt)
Ejemplo n.º 23
0
    def log_hyperparams_metrics(self, params: dict, metrics: dict) -> None:
        params = self._convert_params(params)
        params = self._flatten_dict(params)
        sanitized_params = self._sanitize_params(params)
        if metrics is None:
            metrics = {}
        exp, ssi, sei = hparams(sanitized_params, metrics)
        writer = self.experiment._get_file_writer()
        writer.add_summary(exp)
        writer.add_summary(ssi)
        writer.add_summary(sei)

        # some alternative should be added
        try:
            self.tags.update(sanitized_params)
        except Exception:
            self.tags = sanitized_params
Ejemplo n.º 24
0
    def log_hyperparams(self, params: Union[Dict[str, Any],
                                            Namespace]) -> None:
        params = self._convert_params(params)

        if parse_version(torch.__version__) < parse_version("1.3.0"):
            warn(
                f"Hyperparameter logging is not available for Torch version {torch.__version__}."
                " Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable"
                " hyperparameter logging.")
        else:
            from torch.utils.tensorboard.summary import hparams
            exp, ssi, sei = hparams(params, {})
            writer = self.experiment._get_file_writer()
            writer.add_summary(exp)
            writer.add_summary(ssi)
            writer.add_summary(sei)
        # some alternative should be added
        self.tags.update(params)
Ejemplo n.º 25
0
    def add_hparams(
        self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None
    ):
        """
        Log the given hyperparameters to the same event file that is currently open.

        :param hparam_dict:            The static hyperparameters to simply log to the 'hparams' table.
        :param metric_dict:            The metrics and dynamic hyper parameters to link with the plots.
        :param hparam_domain_discrete: Not used in this SummaryWriter.
        :param run_name:               Not used in this SummaryWriter.
        """
        torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError("hparam_dict and metric_dict should be dictionary.")
        exp, ssi, sei = hparams(hparam_dict, metric_dict)
        self._get_file_writer().add_summary(exp)
        self._get_file_writer().add_summary(ssi)
        self._get_file_writer().add_summary(sei)
Ejemplo n.º 26
0
    def add_hparams(self,
                    hparam_dict,
                    metric_dict,
                    hparam_domain_discrete=None,
                    run_name=None):
        assert run_name is None  # Disabled feature. Run name init by summary writer ctor

        torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError(
                'hparam_dict and metric_dict should be dictionary.')
        exp, ssi, sei = hparams(hparam_dict, metric_dict,
                                hparam_domain_discrete)

        # run_name argument is discarded and the writer itself is used (no extra writer instantiation)
        self.file_writer.add_summary(exp)
        self.file_writer.add_summary(ssi)
        self.file_writer.add_summary(sei)
        for k, v in metric_dict.items():
            self.add_scalar(k, v)
Ejemplo n.º 27
0
    def log_hyperparams(self, params, metrics=None):
        # store params to output
        self.hparams.update(params)

        # format params into the suitable for tensorboard
        params = self._flatten_dict(params)
        params = self._sanitize_params(params)

        if metrics is None:
            if self._default_hp_metric:
                metrics = {"hp_metric": -1}
        elif not isinstance(metrics, dict):
            metrics = {"hp_metric": metrics}

        if metrics:
            exp, ssi, sei = hparams(params, metrics)
            writer = self.experiment._get_file_writer()
            writer.add_summary(exp)
            writer.add_summary(ssi)
            writer.add_summary(sei)
Ejemplo n.º 28
0
    def test_hparams_domain_discrete(self):
        hp = {"lr": 0.1, "bool_var": True, "string_var": "hi"}
        mt = {"accuracy": 0.1}
        hp_domain = {"lr": [0.1], "bool_var": [True], "string_var": ["hi"]}

        # hparam_domain_discrete keys needs to be subset of hparam_dict keys
        with self.assertRaises(TypeError):
            summary.hparams(hp, mt, hparam_domain_discrete={"wrong_key": []})

        # hparam_domain_discrete values needs to be same type as hparam_dict values
        with self.assertRaises(TypeError):
            summary.hparams(hp, mt, hparam_domain_discrete={"lr": [True]})

        # only smoke test. Because protobuf map serialization is nondeterministic.
        summary.hparams(hp, mt, hparam_domain_discrete=hp_domain)
Ejemplo n.º 29
0
    def tensorboard_log(self,
                        metrics_results,
                        data_split: str,
                        epoch: int,
                        step: int,
                        log_hparam: bool = False):
        metrics_results['epoch'] = epoch
        for i, param_group in enumerate(self.optim.param_groups):
            metrics_results[f'lr_param_group_{i}'] = param_group['lr']
        logs = {}
        for key, metric in metrics_results.items():
            metric_name = f'{key}/{data_split}'
            logs[metric_name] = metric
            self.writer.add_scalar(metric_name, metric, step)

        if log_hparam:  # write hyperparameters
            exp, ssi, sei = hparams(flatten_dict(self.hparams),
                                    flatten_dict(logs))
            self.writer.file_writer.add_summary(exp)
            self.writer.file_writer.add_summary(ssi)
            self.writer.file_writer.add_summary(sei)
Ejemplo n.º 30
0
    def add_hparams(self, hparam_dict, metric_dict):
        """Add a set of hyperparameters to be compared in TensorBoard.

        Args:
            hparam_dict (dict): Each key-value pair in the dictionary is the
              name of the hyper parameter and it's corresponding value.
            metric_dict (dict): Each key-value pair in the dictionary is the
              name of the metric and it's corresponding value. Note that the key used
              here should be unique in the tensorboard record. Otherwise the value
              you added by ``add_scalar`` will be displayed in hparam plugin. In most
              cases, this is unwanted.

        Examples::

            from torch.utils.tensorboard import SummaryWriter
            with SummaryWriter() as w:
                for i in range(5):
                    w.add_hparams({'lr': 0.1*i, 'bsize': i},
                                  {'hparam/accuracy': 10*i, 'hparam/loss': 10*i})
        """
        torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
        if type(hparam_dict) is not dict or type(metric_dict) is not dict:
            raise TypeError('hparam_dict and metric_dict should be dictionary.')
        exp, ssi, sei = hparams(hparam_dict, metric_dict)

        # ---- Previously, add_hparams() added a subfolder inside each run ----
        # logdir = os.path.join(
        #     self._get_file_writer().get_logdir(),
        #     str(time.time())
        # )

        # ---- Now, it does not add any subfolder ----
        logdir = self._get_file_writer().get_logdir()

        with SummaryWriter(log_dir=logdir) as w_hp:
            w_hp.file_writer.add_summary(exp)
            w_hp.file_writer.add_summary(ssi)
            w_hp.file_writer.add_summary(sei)
            for k, v in metric_dict.items():
                w_hp.add_scalar(k, v)