コード例 #1
0
ファイル: metric.py プロジェクト: py361/ignite
class Metric(with_metaclass(ABCMeta, object)):
    """
    Base class for all Metrics.

    Args:
        output_transform (callable, optional): a callable that is used to transform the
            :class:`ignite.engine.Engine`'s `process_function`'s output into the
            form expected by the metric. This can be useful if, for example, you have a multi-output model and
            you want to compute the metric with respect to one of the outputs.

    """
    def __init__(self, output_transform=lambda x: x):
        self._output_transform = output_transform
        self.reset()

    @abstractmethod
    def reset(self):
        """
        Resets the metric to to it's initial state.

        This is called at the start of each epoch.
        """
        pass

    @abstractmethod
    def update(self, output):
        """
        Updates the metric's state using the passed batch output.

        This is called once for each batch.

        Args:
            output: the is the output from the engine's process function
        """
        pass

    @abstractmethod
    def compute(self):
        """
        Computes the metric based on it's accumulated state.

        This is called at the end of each epoch.

        Returns:
            Any: the actual quantity of interest

        Raises:
            NotComputableError: raised when the metric cannot be computed
        """
        pass

    def started(self, engine):
        self.reset()

    @torch.no_grad()
    def iteration_completed(self, engine):
        output = self._output_transform(engine.state.output)
        self.update(output)

    def completed(self, engine, name):
        engine.state.metrics[name] = self.compute()

    def attach(self, engine, name):
        engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)
        if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):
            engine.add_event_handler(Events.EPOCH_STARTED, self.started)
        if not engine.has_event_handler(self.iteration_completed,
                                        Events.ITERATION_COMPLETED):
            engine.add_event_handler(Events.ITERATION_COMPLETED,
                                     self.iteration_completed)

    def __add__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x + y, self, other)

    def __radd__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x + y, other, self)

    def __sub__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x - y, self, other)

    def __rsub__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x - y, other, self)

    def __mul__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x * y, self, other)

    def __rmul__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x * y, other, self)

    def __pow__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x**y, self, other)

    def __rpow__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x**y, other, self)

    def __mod__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x % y, self, other)

    def __div__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__div__(y), self, other)

    def __rdiv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__div__(y), other, self)

    def __truediv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)

    def __rtruediv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)

    def __floordiv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x // y, self, other)
コード例 #2
0
ファイル: metric.py プロジェクト: snie2012/hacky-ignite
class Metric(with_metaclass(ABCMeta, object)):
    """
    Base class for all Metrics.

    Args:
        output_transform (callable, optional): a callable that is used to transform the
            :class:`~ignite.engine.Engine`'s `process_function`'s output into the
            form expected by the metric. This can be useful if, for example, you have a multi-output model and
            you want to compute the metric with respect to one of the outputs.
        device (str of torch.device, optional): device specification in case of distributed computation usage.
            In most of the cases, it can be defined as "cuda:local_rank" or "cuda"
            if already set `torch.cuda.set_device(local_rank)`. By default, if a distributed process group is
            initialized and available, device is set to `cuda`.

    """

    def __init__(self, output_transform=lambda x: x, device=None):
        self._output_transform = output_transform

        # Check device if distributed is initialized:
        if dist.is_available() and dist.is_initialized():

            # check if reset and update methods are decorated. Compute may not be decorated
            if not (hasattr(self.reset, "_decorated") and hasattr(self.update, "_decorated")):
                warnings.warn("{} class does not support distributed setting. Computed result is not collected "
                              "across all computing devices".format(self.__class__.__name__),
                              RuntimeWarning)
            if device is None:
                device = "cuda"
            device = torch.device(device)
        self._device = device
        self._is_reduced = False
        self.reset()

    @abstractmethod
    def reset(self):
        """
        Resets the metric to it's initial state.

        This is called at the start of each epoch.
        """
        pass

    @abstractmethod
    def update(self, output):
        """
        Updates the metric's state using the passed batch output.

        This is called once for each batch.

        Args:
            output: the is the output from the engine's process function.
        """
        pass

    @abstractmethod
    def compute(self):
        """
        Computes the metric based on it's accumulated state.

        This is called at the end of each epoch.

        Returns:
            Any: the actual quantity of interest.

        Raises:
            NotComputableError: raised when the metric cannot be computed.
        """
        pass

    def _sync_all_reduce(self, tensor):
        if not (dist.is_available() and dist.is_initialized()):
            # Nothing to reduce
            return tensor

        tensor_to_number = False
        if isinstance(tensor, numbers.Number):
            tensor = torch.tensor(tensor, device=self._device)
            tensor_to_number = True

        if isinstance(tensor, torch.Tensor):
            # check if the tensor is at specified device
            if tensor.device != self._device:
                tensor = tensor.to(self._device)
        else:
            raise TypeError("Unhandled input type {}".format(type(tensor)))

        # synchronize and reduce
        dist.barrier()
        dist.all_reduce(tensor)

        if tensor_to_number:
            return tensor.item()
        return tensor

    def started(self, engine):
        self.reset()

    @torch.no_grad()
    def iteration_completed(self, engine):
        output = self._output_transform(engine.state.output)
        self.update(output)

    def completed(self, engine, name):
        result = self.compute()
        if torch.is_tensor(result) and len(result.shape) == 0:
            result = result.item()
        engine.state.metrics[name] = result

    def attach(self, engine, name):
        engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)
        if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):
            engine.add_event_handler(Events.EPOCH_STARTED, self.started)
        if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
            engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)

    def __add__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x + y, self, other)

    def __radd__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x + y, other, self)

    def __sub__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x - y, self, other)

    def __rsub__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x - y, other, self)

    def __mul__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x * y, self, other)

    def __rmul__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x * y, other, self)

    def __pow__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x ** y, self, other)

    def __rpow__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x ** y, other, self)

    def __mod__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x % y, self, other)

    def __div__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__div__(y), self, other)

    def __rdiv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__div__(y), other, self)

    def __truediv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)

    def __rtruediv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)

    def __floordiv__(self, other):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x, y: x // y, self, other)

    def __getattr__(self, attr):
        from ignite.metrics import MetricsLambda

        def fn(x, *args, **kwargs):
            return getattr(x, attr)(*args, **kwargs)

        def wrapper(*args, **kwargs):
            return MetricsLambda(fn, self, *args, **kwargs)
        return wrapper

    def __getitem__(self, index):
        from ignite.metrics import MetricsLambda
        return MetricsLambda(lambda x: x[index], self)
コード例 #3
0
ファイル: param_scheduler.py プロジェクト: ztttttttt/ignite
class ParamScheduler(with_metaclass(ABCMeta, object)):
    """An abstract class for updating an optimizer's parameter value during
    training.

    Args:
        optimizer (`torch.optim.Optimizer` or dict): the optimizer or parameters group to use.
        param_name (str): name of optimizer's parameter to update.
        save_history (bool, optional): whether to log the parameter values to
            `engine.state.param_history`, (default=False).


    Note:
        Parameter scheduler works independently of the internal state of the attached optimizer.
        More precisely, whatever the state of the optimizer (newly created or used by another scheduler) the scheduler
        sets defined absolute values.

    """
    def __init__(self, optimizer, param_name, save_history=False):

        if isinstance(optimizer, dict):
            self.optimizer_param_groups = [optimizer]
        else:
            self.optimizer_param_groups = optimizer.param_groups
        self.param_name = param_name
        self.save_history = save_history
        self.event_index = 0

    def __call__(self, engine, name=None):

        value = self.get_param()

        for param_group in self.optimizer_param_groups:
            param_group[self.param_name] = value

        if name is None:
            name = self.param_name

        if self.save_history:
            if not hasattr(engine.state, 'param_history'):
                setattr(engine.state, 'param_history', {})
            engine.state.param_history.setdefault(name, [])
            values = [
                pg[self.param_name] for pg in self.optimizer_param_groups
            ]
            engine.state.param_history[name].append(values)

        self.event_index += 1

    @abstractmethod
    def get_param(self):
        """Method to get current optimizer's parameter value
        """
        pass

    @classmethod
    def simulate_values(cls, num_events, **scheduler_kwargs):
        """Method to simulate scheduled values during num_events events.

        Args:
            num_events (int): number of events during the simulation.
            **scheduler_kwargs : parameter scheduler configuration kwargs.

        Returns:
            list of pairs: [event_index, value]

        Examples:

        .. code-block:: python

            lr_values = np.array(LinearCyclicalScheduler.simulate_values(num_events=50, param_name='lr',
                                                                         start_value=1e-1, end_value=1e-3,
                                                                         cycle_size=10))

            plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
            plt.xlabel("events")
            plt.ylabel("values")
            plt.legend()

        """
        keys_to_remove = ['optimizer', 'save_history']
        for key in keys_to_remove:
            if key in scheduler_kwargs:
                del scheduler_kwargs[key]
        values = []
        scheduler = cls(optimizer={}, save_history=False, **scheduler_kwargs)
        for i in range(num_events):
            scheduler(engine=None)
            values.append(
                [i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
        return values
コード例 #4
0
class BaseHandler(with_metaclass(ABCMeta, object)):
    @abstractmethod
    def __call__(self, *args, **kwargs):
        pass
コード例 #5
0
class ParamScheduler(with_metaclass(ABCMeta, object)):
    """An abstract class for updating an optimizer's parameter value during
    training.

    Args:
        optimizer (`torch.optim.Optimizer` or dict): the optimizer or parameters group to use.
        param_name (str): name of optimizer's parameter to update.
        save_history (bool, optional): whether to log the parameter values to
            `engine.state.param_history`, (default=False).


    Note:
        Parameter scheduler works independently of the internal state of the attached optimizer.
        More precisely, whatever the state of the optimizer (newly created or used by another scheduler) the scheduler
        sets defined absolute values.

    """

    def __init__(self, optimizer, param_name, save_history=False):

        if isinstance(optimizer, dict):
            self.optimizer_param_groups = [optimizer]
        else:
            self.optimizer_param_groups = optimizer.param_groups
        self.param_name = param_name
        self.save_history = save_history
        self.event_index = 0

    def __call__(self, engine, name=None):

        value = self.get_param()

        for param_group in self.optimizer_param_groups:
            param_group[self.param_name] = value

        if name is None:
            name = self.param_name

        if self.save_history:
            if not hasattr(engine.state, 'param_history'):
                setattr(engine.state, 'param_history', {})
            engine.state.param_history.setdefault(name, [])
            values = [pg[self.param_name] for pg in self.optimizer_param_groups]
            engine.state.param_history[name].append(values)

        self.event_index += 1

    @abstractmethod
    def get_param(self):
        """Method to get current optimizer's parameter value
        """
        pass

    @classmethod
    def simulate_values(cls, num_events, **scheduler_kwargs):
        """Method to simulate scheduled values during `num_events` events.

        Args:
            num_events (int): number of events during the simulation.
            **scheduler_kwargs : parameter scheduler configuration kwargs.

        Returns:
            list of pairs: [event_index, value]

        Examples:

        .. code-block:: python

            lr_values = np.array(LinearCyclicalScheduler.simulate_values(num_events=50, param_name='lr',
                                                                         start_value=1e-1, end_value=1e-3,
                                                                         cycle_size=10))

            plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
            plt.xlabel("events")
            plt.ylabel("values")
            plt.legend()

        """
        keys_to_remove = ['optimizer', 'save_history']
        for key in keys_to_remove:
            if key in scheduler_kwargs:
                del scheduler_kwargs[key]
        values = []
        scheduler = cls(optimizer={}, save_history=False, **scheduler_kwargs)
        for i in range(num_events):
            scheduler(engine=None)
            values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
        return values

    @classmethod
    def plot_values(cls, num_events, **scheduler_kwargs):
        """Method to plot simulated scheduled values during `num_events` events.

        This class requires `matplotlib package <https://matplotlib.org/>`_ to be installed:

        .. code-block:: bash

            pip install matplotlib

        Args:
            num_events (int): number of events during the simulation.
            **scheduler_kwargs : parameter scheduler configuration kwargs.

        Returns:
            matplotlib.lines.Line2D

        Examples:

            .. code-block:: python

                import matplotlib.pylab as plt

                plt.figure(figsize=(10, 7))
                LinearCyclicalScheduler.plot_values(num_events=50, param_name='lr',
                                                    start_value=1e-1, end_value=1e-3, cycle_size=10))
        """
        try:
            import matplotlib.pylab as plt
        except ImportError:
            raise RuntimeError("This method requires matplotlib to be installed. "
                               "Please install it with command: \n pip install matplotlib")

        values = cls.simulate_values(num_events=num_events, **scheduler_kwargs)
        label = scheduler_kwargs.get("param_name", "learning rate")
        ax = plt.plot([e for e, _ in values], [v for _, v in values], label=label)
        plt.legend()
        plt.grid(which='both')
        return ax
コード例 #6
0
ファイル: param_scheduler.py プロジェクト: rmporsch/ignite
class ParamScheduler(with_metaclass(ABCMeta, object)):
    """An abstract class for updating an optimizer's parameter value during
    training.

    Args:
        optimizer (`torch.optim.Optimizer`): optimizer
        param_name (str): name of optimizer's parameter to update.
        save_history (bool, optional): whether to log the parameter values to
            `engine.state.param_history`, (default=False).
        param_group_index (int, optional): optimizer's parameters group to use

    Note:
        Parameter scheduler works independently of the internal state of the attached optimizer.
        More precisely, whatever the state of the optimizer (newly created or used by another scheduler) the scheduler
        sets defined absolute values.

    """
    def __init__(self,
                 optimizer,
                 param_name,
                 save_history=False,
                 param_group_index=None):

        if not isinstance(optimizer, Optimizer):
            raise TypeError(
                "Argument optimizer should be torch.optim.Optimizer")

        self.optimizer = optimizer
        self.param_group_index = param_group_index
        self.param_name = param_name
        self.save_history = save_history
        self.event_index = 0
        self._state_attrs = [
            'event_index', 'param_name', 'save_history', 'param_group_index'
        ]

    def __call__(self, engine, name=None):

        value = self.get_param()

        for param_group in self.optimizer_param_groups:
            param_group[self.param_name] = value

        if name is None:
            name = self.param_name

        if self.save_history:
            if not hasattr(engine.state, 'param_history'):
                setattr(engine.state, 'param_history', {})
            engine.state.param_history.setdefault(name, [])
            values = [
                pg[self.param_name] for pg in self.optimizer_param_groups
            ]
            engine.state.param_history[name].append(values)

        self.event_index += 1

    @property
    def optimizer_param_groups(self):
        if self.param_group_index is None:
            return self.optimizer.param_groups
        return [
            self.optimizer.param_groups[self.param_group_index],
        ]

    def state_dict(self):
        """Returns a dictionary containing a whole state of ParamScheduler.

        Returns:
            dict:
                a dictionary containing a whole state of ParamScheduler
        """
        destination = OrderedDict()
        for name in self._state_attrs:
            if hasattr(self, name):
                val = getattr(self, name)
                if hasattr(val, 'state_dict'):
                    val = val.state_dict()
                destination[name] = copy(val)
        return destination

    def load_state_dict(self, state_dict):
        """Copies parameters from :attr:`state_dict` into this ParamScheduler.

        Args:
            state_dict (dict): a dict containing parameters.
        """
        if not isinstance(state_dict, Mapping):
            raise TypeError(
                "Argument state_dict should be a dictionary, but given {}".
                format(type(state_dict)))

        for name in self._state_attrs:
            if name not in state_dict:
                raise ValueError(
                    "Required state attribute '{}' is absent in provided state_dict '{}'"
                    .format(name, state_dict.keys()))
            val = state_dict[name]
            obj = getattr(self, name)
            if isinstance(val, Mapping) and hasattr(obj, 'load_state_dict'):
                obj.load_state_dict(val)
            else:
                setattr(self, name, val)

    @abstractmethod
    def get_param(self):
        """Method to get current optimizer's parameter value
        """
        pass

    @classmethod
    def simulate_values(cls, num_events, **scheduler_kwargs):
        """Method to simulate scheduled values during `num_events` events.

        Args:
            num_events (int): number of events during the simulation.
            **scheduler_kwargs : parameter scheduler configuration kwargs.

        Returns:
            list of pairs: [event_index, value]

        Examples:

        .. code-block:: python

            lr_values = np.array(LinearCyclicalScheduler.simulate_values(num_events=50, param_name='lr',
                                                                         start_value=1e-1, end_value=1e-3,
                                                                         cycle_size=10))

            plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
            plt.xlabel("events")
            plt.ylabel("values")
            plt.legend()

        """
        keys_to_remove = ['optimizer', 'save_history']
        for key in keys_to_remove:
            if key in scheduler_kwargs:
                del scheduler_kwargs[key]
        values = []
        scheduler = cls(optimizer=_get_fake_optimizer(),
                        save_history=False,
                        **scheduler_kwargs)
        for i in range(num_events):
            scheduler(engine=None)
            values.append(
                [i, scheduler.optimizer_param_groups[0][scheduler.param_name]])
        return values

    @classmethod
    def plot_values(cls, num_events, **scheduler_kwargs):
        """Method to plot simulated scheduled values during `num_events` events.

        This class requires `matplotlib package <https://matplotlib.org/>`_ to be installed:

        .. code-block:: bash

            pip install matplotlib

        Args:
            num_events (int): number of events during the simulation.
            **scheduler_kwargs : parameter scheduler configuration kwargs.

        Returns:
            matplotlib.lines.Line2D

        Examples:

            .. code-block:: python

                import matplotlib.pylab as plt

                plt.figure(figsize=(10, 7))
                LinearCyclicalScheduler.plot_values(num_events=50, param_name='lr',
                                                    start_value=1e-1, end_value=1e-3, cycle_size=10))
        """
        try:
            import matplotlib.pylab as plt
        except ImportError:
            raise RuntimeError(
                "This method requires matplotlib to be installed. "
                "Please install it with command: \n pip install matplotlib")

        values = cls.simulate_values(num_events=num_events, **scheduler_kwargs)
        label = scheduler_kwargs.get("param_name", "learning rate")
        ax = plt.plot([e for e, _ in values], [v for _, v in values],
                      label=label)
        plt.legend()
        plt.grid(which='both')
        return ax