コード例 #1
0
ファイル: alchemy.py プロジェクト: rhololkeolke/catalyst-rl
    def __init__(
        self,
        metric_names: List[str] = None,
        log_on_batch_end: bool = True,
        log_on_epoch_end: bool = True,
        **logging_params,
    ):
        """
        Args:
            metric_names (List[str]): list of metric names to log,
                if none - logs everything
            log_on_batch_end (bool): logs per-batch metrics if set True
            log_on_epoch_end (bool): logs per-epoch metrics if set True
        """
        super().__init__(
            order=CallbackOrder.Logging,
            node=CallbackNode.Master,
            type=CallbackType.Experiment,
        )
        self.metrics_to_log = metric_names
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if not (self.log_on_batch_end or self.log_on_epoch_end):
            raise ValueError("You have to log something!")

        if (self.log_on_batch_end and not self.log_on_epoch_end) \
                or (not self.log_on_batch_end and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

        self.logger = Logger(**logging_params)
コード例 #2
0
    def _pre_experiment_hook(self, experiment: Experiment):
        monitoring_params = experiment.monitoring_params

        log_on_batch_end: bool = monitoring_params.pop("log_on_batch_end",
                                                       False)
        log_on_epoch_end: bool = monitoring_params.pop("log_on_epoch_end",
                                                       True)

        self._init(
            log_on_batch_end=log_on_batch_end,
            log_on_epoch_end=log_on_epoch_end,
        )
        self.logger = Logger(**monitoring_params)
コード例 #3
0
class AlchemyRunner(Runner):
    """Runner wrapper with Alchemy integration hooks.
    Read about Alchemy here https://alchemy.host

    Example:
        .. code-block:: python

            from catalyst.dl import SupervisedAlchemyRunner

            runner = SupervisedAlchemyRunner()

            runner.train(
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                loaders=loaders,
                logdir=logdir,
                num_epochs=num_epochs,
                verbose=True,
                monitoring_params={
                    "token": "...", # your Alchemy token
                    "project": "your_project_name",
                    "experiment": "your_experiment_name",
                    "group": "your_experiment_group_name"
                }
            )

    Powered by Catalyst.Ecosystem.
    """
    def _init(
        self,
        log_on_batch_end: bool = False,
        log_on_epoch_end: bool = True,
    ):
        """@TODO: Docs. Contribution is welcome."""
        super()._init()
        the_warning = DeprecatedWarning(
            self.__class__.__name__,
            deprecated_in="20.03",
            removed_in="20.04",
            details="Use AlchemyLogger instead.",
        )
        warnings.warn(the_warning, category=DeprecationWarning, stacklevel=2)
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if (self.log_on_batch_end
                and not self.log_on_epoch_end) or (not self.log_on_batch_end
                                                   and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

    def _log_metrics(self, metrics: Dict, mode: str, suffix: str = ""):
        for key, value in metrics.items():
            metric_name = f"{key}/{mode}{suffix}"
            self.logger.log_scalar(metric_name, value)

    def _pre_experiment_hook(self, experiment: Experiment):
        monitoring_params = experiment.monitoring_params

        log_on_batch_end: bool = monitoring_params.pop("log_on_batch_end",
                                                       False)
        log_on_epoch_end: bool = monitoring_params.pop("log_on_epoch_end",
                                                       True)

        self._init(
            log_on_batch_end=log_on_batch_end,
            log_on_epoch_end=log_on_epoch_end,
        )
        self.logger = Logger(**monitoring_params)

    def _post_experiment_hook(self, experiment: Experiment):
        self.logger.close()

    def _run_batch(self, batch):
        super()._run_batch(batch=batch)
        if self.log_on_batch_end and not self.state.is_distributed_worker:
            mode = self.state.loader_name
            metrics = self.state.batch_metrics
            self._log_metrics(metrics=metrics,
                              mode=mode,
                              suffix=self.batch_log_suffix)

    def _run_epoch(self, stage: str, epoch: int):
        super()._run_epoch(stage=stage, epoch=epoch)
        if self.log_on_epoch_end and not self.state.is_distributed_worker:
            mode_metrics = utils.split_dict_to_subdicts(
                dct=self.state.epoch_metrics,
                prefixes=list(self.state.loaders.keys()),
                extra_key="_base",
            )
            for mode, metrics in mode_metrics.items():
                self._log_metrics(metrics=metrics,
                                  mode=mode,
                                  suffix=self.epoch_log_suffix)

    def run_experiment(self, experiment: Experiment):
        """Starts experiment.

        Args:
            experiment (Experiment): experiment class
        """
        self._pre_experiment_hook(experiment=experiment)
        super().run_experiment(experiment=experiment)
        self._post_experiment_hook(experiment=experiment)
コード例 #4
0
class AlchemyRunner(Runner):
    """
    Runner wrapper with alchemy integration hooks.
    """
    def _init(
        self,
        log_on_batch_end: bool = False,
        log_on_epoch_end: bool = True,
    ):
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if (self.log_on_batch_end and not self.log_on_epoch_end) \
                or (not self.log_on_batch_end and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

    def _log_metrics(self, metrics: Dict, mode: str, suffix: str = ""):
        for key, value in metrics.items():
            metric_name = f"{key}/{mode}{suffix}"
            self.logger.log_scalar(metric_name, value)

    def _pre_experiment_hook(self, experiment: Experiment):
        monitoring_params = experiment.monitoring_params

        log_on_batch_end: bool = \
            monitoring_params.pop("log_on_batch_end", False)
        log_on_epoch_end: bool = \
            monitoring_params.pop("log_on_epoch_end", True)

        self._init(
            log_on_batch_end=log_on_batch_end,
            log_on_epoch_end=log_on_epoch_end,
        )
        self.logger = Logger(**monitoring_params)

    def _post_experiment_hook(self, experiment: Experiment):
        self.logger.close()

    def _run_batch(self, batch):
        super()._run_batch(batch=batch)
        if self.log_on_batch_end:
            mode = self.state.loader_name
            metrics = self.state.metrics.batch_values
            self._log_metrics(metrics=metrics,
                              mode=mode,
                              suffix=self.batch_log_suffix)

    def _run_epoch(self, loaders):
        super()._run_epoch(loaders=loaders)
        if self.log_on_epoch_end:
            for mode, metrics in self.state.metrics.epoch_values.items():
                self._log_metrics(metrics=metrics,
                                  mode=mode,
                                  suffix=self.epoch_log_suffix)

    def run_experiment(self, experiment: Experiment, check: bool = False):
        self._pre_experiment_hook(experiment=experiment)
        super().run_experiment(experiment=experiment, check=check)
        self._post_experiment_hook(experiment=experiment)
コード例 #5
0
ファイル: alchemy_logger.py プロジェクト: alyaxey/catalyst
class AlchemyLogger(Callback):
    """Logger callback, translates ``runner.*_metrics`` to Alchemy.
    Read about Alchemy here https://alchemy.host

    Example:
        .. code-block:: python

            from catalyst.dl import SupervisedRunner, AlchemyLogger

            runner = SupervisedRunner()

            runner.train(
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                loaders=loaders,
                logdir=logdir,
                num_epochs=num_epochs,
                verbose=True,
                callbacks={
                    "logger": AlchemyLogger(
                        token="...", # your Alchemy token
                        project="your_project_name",
                        experiment="your_experiment_name",
                        group="your_experiment_group_name",
                    )
                }
            )

    Powered by Catalyst.Ecosystem.
    """
    def __init__(
        self,
        metric_names: List[str] = None,
        log_on_batch_end: bool = True,
        log_on_epoch_end: bool = True,
        **logging_params,
    ):
        """
        Args:
            metric_names (List[str]): list of metric names to log,
                if none - logs everything
            log_on_batch_end (bool): logs per-batch metrics if set True
            log_on_epoch_end (bool): logs per-epoch metrics if set True
        """
        super().__init__(
            order=CallbackOrder.logging,
            node=CallbackNode.master,
            scope=CallbackScope.experiment,
        )
        self.metrics_to_log = metric_names
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if not (self.log_on_batch_end or self.log_on_epoch_end):
            raise ValueError("You have to log something!")

        if (self.log_on_batch_end
                and not self.log_on_epoch_end) or (not self.log_on_batch_end
                                                   and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

        self.logger = Logger(**logging_params)

    def __del__(self):
        """@TODO: Docs. Contribution is welcome."""
        self.logger.close()

    def _log_metrics(self,
                     metrics: Dict[str, float],
                     step: int,
                     mode: str,
                     suffix=""):
        if self.metrics_to_log is None:
            metrics_to_log = sorted(metrics.keys())
        else:
            metrics_to_log = self.metrics_to_log

        for name in metrics_to_log:
            if name in metrics:
                metric_name = f"{name}/{mode}{suffix}"
                metric_value = metrics[name]
                self.logger.log_scalar(
                    name=metric_name,
                    value=metric_value,
                    step=step,
                )

    def on_batch_end(self, runner: IRunner):
        """Translate batch metrics to Alchemy."""
        if self.log_on_batch_end:
            mode = runner.loader_name
            metrics = runner.batch_metrics
            self._log_metrics(
                metrics=metrics,
                step=runner.global_sample_step,
                mode=mode,
                suffix=self.batch_log_suffix,
            )

    def on_loader_end(self, runner: IRunner):
        """Translate loader metrics to Alchemy."""
        if self.log_on_epoch_end:
            mode = runner.loader_name
            metrics = runner.loader_metrics
            self._log_metrics(
                metrics=metrics,
                step=runner.global_epoch,
                mode=mode,
                suffix=self.epoch_log_suffix,
            )

    def on_epoch_end(self, runner: IRunner):
        """Translate epoch metrics to Alchemy."""
        extra_mode = "_base"
        splitted_epoch_metrics = utils.split_dict_to_subdicts(
            dct=runner.epoch_metrics,
            prefixes=list(runner.loaders.keys()),
            extra_key=extra_mode,
        )

        if self.log_on_epoch_end:
            self._log_metrics(
                metrics=splitted_epoch_metrics[extra_mode],
                step=runner.global_epoch,
                mode=extra_mode,
                suffix=self.epoch_log_suffix,
            )
コード例 #6
0
class AlchemyRunner(Runner):
    """
    Runner wrapper with Alchemy integration hooks.
    Read about Alchemy here https://alchemy.host
    Powered by Catalyst.Ecosystem

    Example:

        .. code-block:: python

            from catalyst.dl import SupervisedAlchemyRunner

            runner = SupervisedAlchemyRunner()

            runner.train(
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                loaders=loaders,
                logdir=logdir,
                num_epochs=num_epochs,
                verbose=True,
                monitoring_params={
                    "token": "...", # your Alchemy token
                    "experiment": "your_experiment_name",
                    "group": "your_experiment_group_name"
                }
            )
    """
    def _init(
        self,
        log_on_batch_end: bool = False,
        log_on_epoch_end: bool = True,
    ):
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if (self.log_on_batch_end and not self.log_on_epoch_end) \
                or (not self.log_on_batch_end and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

    def _log_metrics(self, metrics: Dict, mode: str, suffix: str = ""):
        for key, value in metrics.items():
            metric_name = f"{key}/{mode}{suffix}"
            self.logger.log_scalar(metric_name, value)

    def _pre_experiment_hook(self, experiment: Experiment):
        monitoring_params = experiment.monitoring_params

        log_on_batch_end: bool = \
            monitoring_params.pop("log_on_batch_end", False)
        log_on_epoch_end: bool = \
            monitoring_params.pop("log_on_epoch_end", True)

        self._init(
            log_on_batch_end=log_on_batch_end,
            log_on_epoch_end=log_on_epoch_end,
        )
        self.logger = Logger(**monitoring_params)

    def _post_experiment_hook(self, experiment: Experiment):
        self.logger.close()

    def _run_batch(self, batch):
        super()._run_batch(batch=batch)
        if self.log_on_batch_end:
            mode = self.state.loader_name
            metrics = self.state.metric_manager.batch_values
            self._log_metrics(metrics=metrics,
                              mode=mode,
                              suffix=self.batch_log_suffix)

    def _run_epoch(self, stage: str, epoch: int):
        super()._run_epoch(stage=stage, epoch=epoch)
        if self.log_on_epoch_end:
            for mode, metrics in \
                    self.state.metric_manager.epoch_values.items():
                self._log_metrics(metrics=metrics,
                                  mode=mode,
                                  suffix=self.epoch_log_suffix)

    def run_experiment(self, experiment: Experiment, check: bool = False):
        """Starts experiment

        Args:
            experiment (Experiment): experiment class
            check (bool): if ``True`` takes only 3 steps
        """
        self._pre_experiment_hook(experiment=experiment)
        super().run_experiment(experiment=experiment, check=check)
        self._post_experiment_hook(experiment=experiment)
コード例 #7
0
ファイル: example.py プロジェクト: balakhonoff/alchemy
import random

from alchemy import Logger

# insert your personal token here
token = "..."
project = "default"

for gid in range(1):
    group = f"group_{gid}"
    for eid in range(2):
        experiment = f"experiment_{eid}"
        logger = Logger(
            token=token,
            experiment=experiment,
            group=group,
            project=project,
        )
        for mid in range(4):
            metric = f"metric_{mid}"
            # let's sample some random data
            n = 300
            x = random.randint(-10, 10)
            for _ in range(n):
                logger.log_scalar(metric, x)
                x += random.randint(-1, 1)
        logger.close()
コード例 #8
0
class AlchemyLogger(Callback):
    """
    Logger callback, translates ``state.*_metrics`` to Alchemy
    Read about Alchemy here https://alchemy.host
    Powered by Catalyst.Ecosystem

    Example:

        .. code-block:: python

            from catalyst.dl import SupervisedRunner, AlchemyLogger

            runner = SupervisedRunner()

            runner.train(
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                loaders=loaders,
                logdir=logdir,
                num_epochs=num_epochs,
                verbose=True,
                callbacks={
                    "logger": AlchemyLogger(
                        "token": "...", # your Alchemy token
                        "project": "your_project_name",
                        "experiment": "your_experiment_name",
                        "group": "your_experiment_group_name",
                    )
                }
            )
    """
    def __init__(
        self,
        metric_names: List[str] = None,
        log_on_batch_end: bool = True,
        log_on_epoch_end: bool = True,
        **logging_params,
    ):
        """
        Args:
            metric_names (List[str]): list of metric names to log,
                if none - logs everything
            log_on_batch_end (bool): logs per-batch metrics if set True
            log_on_epoch_end (bool): logs per-epoch metrics if set True
        """
        super().__init__(
            order=CallbackOrder.Logging,
            node=CallbackNode.Master,
            type=CallbackType.Experiment,
        )
        self.metrics_to_log = metric_names
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if not (self.log_on_batch_end or self.log_on_epoch_end):
            raise ValueError("You have to log something!")

        if (self.log_on_batch_end and not self.log_on_epoch_end) \
                or (not self.log_on_batch_end and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

        self.logger = Logger(**logging_params)

    def __del__(self):
        self.logger.close()

    def _log_metrics(self,
                     metrics: Dict[str, float],
                     step: int,
                     mode: str,
                     suffix=""):
        if self.metrics_to_log is None:
            metrics_to_log = sorted(list(metrics.keys()))
        else:
            metrics_to_log = self.metrics_to_log

        for name in metrics_to_log:
            if name in metrics:
                metric_name = f"{name}/{mode}{suffix}"
                metric_value = metrics[name]
                self.logger.log_scalar(metric_name, metric_value)

    def on_batch_end(self, state: _State):
        """Translate batch metrics to Alchemy"""
        if state.logdir is None:
            return

        if self.log_on_batch_end:
            mode = state.loader_name
            metrics_ = state.batch_metrics
            self._log_metrics(
                metrics=metrics_,
                step=state.global_step,
                mode=mode,
                suffix=self.batch_log_suffix,
            )

    def on_loader_end(self, state: _State):
        """Translate epoch metrics to Alchemy"""
        if state.logdir is None:
            return

        if self.log_on_epoch_end:
            mode = state.loader_name
            metrics_ = state.loader_metrics
            self._log_metrics(
                metrics=metrics_,
                step=state.global_epoch,
                mode=mode,
                suffix=self.epoch_log_suffix,
            )