Exemplo n.º 1
0
 def setUp(self):
     self.parent = optimizer.Hyperparameter()
     self.parent.x = 1
     self.parent.y = 2
     self.child = optimizer.Hyperparameter(self.parent)
     self.child.y = 3
     self.child.z = 4
Exemplo n.º 2
0
from __future__ import division
import math

import numpy

from chainer import backend
from chainer.backends import cuda
from chainer import optimizer

_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.alpha = 0.001
_default_hyperparam.beta1 = 0.9
_default_hyperparam.beta2 = 0.999
_default_hyperparam.eps = 1e-8
_default_hyperparam.eta = 1.0
_default_hyperparam.weight_decay_rate = 0
_default_hyperparam.amsgrad = False


def _learning_rate(hp, t):
    if t == 0:
        raise RuntimeError(
            'Can\'t determine the learning rate of Adam optimizer '
            'because the update steps have not been started.')
    fix1 = 1. - math.pow(hp.beta1, t)
    fix2 = 1. - math.pow(hp.beta2, t)
    return hp.alpha * math.sqrt(fix2) / fix1


class AdamRule(optimizer.UpdateRule):
    """Update rule of Adam optimization algorithm.
Exemplo n.º 3
0
Arquivo: sgd.py Projeto: zwcdp/chainer
from chainer.backends import intel64
from chainer import optimizer
from chainer import types

if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class SGDHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of vanilla stochastic gradient descent.

        This is only for PEP 544 compliant static type checkers.
        """
        lr = None  # type: float


_default_hyperparam = optimizer.Hyperparameter(
)  # type: SGDHyperparameter # NOQA
_default_hyperparam.lr = 0.01


class SGDRule(optimizer.UpdateRule):
    """Update rule of vanilla stochastic gradient descent.

    See :class:`~chainer.optimizers.SGD` for the default values of the
    hyperparameters.

    Args:
        parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
            that provides the default values.
        lr (float): Learning rate.

    """
Exemplo n.º 4
0
if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class MSVAGHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of M-SVAG.

        This is only for PEP 544 compliant static type checkers.
        """
        lr = None  # type: float
        beta = None  # type: float
        eta = None  # type: float
        weight_decay_rate = None  # type: float


_default_hyperparam = optimizer.Hyperparameter(
)  # type: MSVAGHyperparameter # NOQA
_default_hyperparam.lr = 0.1
_default_hyperparam.beta = 0.9
_default_hyperparam.eta = 1.0
_default_hyperparam.weight_decay_rate = 0


class MSVAGRule(optimizer.UpdateRule):
    """Update rule of the M-SVAG optimization algorithm.

    See: `Dissecting Adam: The Sign, Magnitude and Variance of Stochastic \
          Gradients <https://arxiv.org/abs/1705.07774>`_

    Modified for proper weight decay.

    See: `Fixing Weight Decay Regularization in Adam \
Exemplo n.º 5
0
from chainer import types


if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class CorrectedMomentumSGDHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of corrected momentum SGD.

        This is only for PEP 544 compliant static type checkers.
        """
        lr = None  # type: float
        momentum = None  # type: float


_default_hyperparam = optimizer.Hyperparameter()  # type: CorrectedMomentumSGDHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9


class CorrectedMomentumSGDRule(optimizer.UpdateRule):

    """Update rule for the corrected momentum SGD.

    See :class:`~chainer.optimizers.CorrectedMomentumSGD` for the default
    values of the hyperparameters.

    Args:
        parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
            that provides the default values.
        lr (float): Learning rate.
Exemplo n.º 6
0
if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class RMSpropHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of RMSprop.

        This is only for PEP 544 compliant static type checkers.
        """
        lr = None  # type: float
        alpha = None  # type: float
        eps = None  # type: float
        eps_inside_sqrt = None  # type: bool


_default_hyperparam = optimizer.Hyperparameter(
)  # type: RMSpropHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.alpha = 0.99
_default_hyperparam.eps = 1e-8
_default_hyperparam.eps_inside_sqrt = False


class RMSpropRule(optimizer.UpdateRule):
    """Update rule for RMSprop.

    See :class:`~chainer.optimizers.RMSprop` for the default values of the
    hyperparameters.

    Args:
        parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
            that provides the default values.
Exemplo n.º 7
0
from chainer import optimizer
from chainer import types

if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class AdaDeltaHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of Zeiler's ADADELTA.

        This is only for PEP 544 compliant static type checkers.
        """
        rho = None  # type: float
        eps = None  # type: float


_default_hyperparam = optimizer.Hyperparameter(
)  # type: AdaDeltaHyperparameter # NOQA
_default_hyperparam.rho = 0.95
_default_hyperparam.eps = 1e-6


class AdaDeltaRule(optimizer.UpdateRule):
    """Update rule of Zeiler's ADADELTA.

    See :class:`~chainer.optimizers.AdaDelta` for the default values of the
    hyperparameters.

    Args:
        parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
            that provides the default values.
        rho (float): Exponential decay rate of the first and second order
            moments.
Exemplo n.º 8
0
from chainer import optimizer
from chainer import types

if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class AdaGradHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of AdaGrad.

        This is only for PEP 544 compliant static type checkers.
        """
        lr = None  # type: float
        eps = None  # type: float


_default_hyperparam = optimizer.Hyperparameter(
)  # type: AdaGradHyperparameter # NOQA
_default_hyperparam.lr = 0.001
_default_hyperparam.eps = 1e-8


class AdaGradRule(optimizer.UpdateRule):
    """Update rule of AdaGrad.

    See :class:`~chainer.optimizers.AdaGrad` for the default values of the
    hyperparameters.

    Args:
        parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
            that provides the default values.
        lr (float): Learning rate.
        eps (float): Small value for the numerical stability.
Exemplo n.º 9
0
from chainer import optimizer
from chainer import types

if types.TYPE_CHECKING:
    import typing_extensions as tpe

    class NesterovAGHyperparameter(tpe.Protocol):
        """Protocol class for hyperparameter of Nesterov's Accelerated Gradient.

        This is only for PEP 544 compliant static type checkers.
        """
        lr = None  # type: float
        momentum = None  # type: float


_default_hyperparam = optimizer.Hyperparameter(
)  # type: NesterovAGHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9


class NesterovAGRule(optimizer.UpdateRule):
    """Update rule for Nesterov's Accelerated Gradient.

    See :class:`~chainer.optimizers.NesterovAG` for the default values of the
    hyperparameters.

    Args:
        parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
            that provides the default values.
        lr (float): Learning rate.
        momentum (float): Exponential decay rate of the first order moment.