Beispiel #1
0
def _check_param_value(decay_steps, warmup_steps, start_learning_rate,
                       end_learning_rate, power, beta1, beta2, eps, weight_decay, prim_name):
    """Check the type of inputs."""
    validator.check_value_type("start_learning_rate", start_learning_rate, [float], prim_name)
    validator.check_number_range("start_learning_rate rate", start_learning_rate, 0.0, float("inf"), Rel.INC_LEFT,
                                 prim_name)
    validator.check_value_type("end_learning_rate", end_learning_rate, [float], prim_name)
    validator.check_number_range("end_learning_rate", end_learning_rate, 0.0, float("inf"), Rel.INC_LEFT,
                                 prim_name)
    validator.check_float_positive('power', power, prim_name)
    validator.check_float_legal_value('power', power, prim_name)
    validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
    validator.check_integer('warmup_steps', warmup_steps, 0, Rel.GE, prim_name)
    validator.check_value_type("beta1", beta1, [float], prim_name)
    validator.check_value_type("beta2", beta2, [float], prim_name)
    validator.check_value_type("eps", eps, [float], prim_name)
    validator.check_value_type(
        "weight_dacay", weight_decay, [float], prim_name)
    validator.check_number_range(
        "beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
    validator.check_number_range(
        "beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
    validator.check_number_range(
        "eps", eps, 0.0, float("inf"), Rel.INC_NEITHER, prim_name)
    validator.check_number_range(
        "weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
Beispiel #2
0
def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair):
    validator.check_integer('total_step', total_step, 0, Rel.GT, None)
    validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT, None)
    validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT, None)
    validator.check_float_positive('learning_rate', learning_rate, None)
    validator.check_float_legal_value('learning_rate', learning_rate, None)
    validator.check_float_positive('decay_rate', decay_rate, None)
    validator.check_float_legal_value('decay_rate', decay_rate, None)
    validator.check_value_type('is_stair', is_stair, [bool], None)
Beispiel #3
0
def _check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, prim_name):
    """Check the type of inputs."""
    validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
    validator.check_number_range("learning_rate", learning_rate, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
    validator.check_value_type("end_learning_rate", end_learning_rate, [float], prim_name)
    validator.check_number_range("end_learning_rate", end_learning_rate, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
    validator.check_float_positive('power', power, prim_name)
    validator.check_float_legal_value('power', power, prim_name)
    validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
Beispiel #4
0
def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch,
                  decay_epoch, is_stair):
    validator.check_positive_int(total_step, 'total_step')
    validator.check_positive_int(step_per_epoch, 'step_per_epoch')
    validator.check_positive_int(decay_epoch, 'decay_epoch')
    validator.check_float_positive('learning_rate', learning_rate, None)
    validator.check_float_legal_value('learning_rate', learning_rate, None)
    validator.check_float_positive('decay_rate', decay_rate, None)
    validator.check_float_legal_value('decay_rate', decay_rate, None)
    validator.check_value_type('is_stair', is_stair, [bool], None)
Beispiel #5
0
    def _init_group_params(self, parameters, learning_rate, weight_decay):
        """Init learning rate or weight decay in group params."""
        origin_dynamic_lr = self.dynamic_lr
        self._parse_group_params(parameters, learning_rate)
        if self.dynamic_lr and not origin_dynamic_lr:
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32), name='global_step')

        params_store = []
        for group_param in parameters:
            if 'order_params' in group_param.keys():
                ordered_parameters = group_param['order_params']
                continue

            self.group_params += group_param['params']
            if 'lr' in group_param.keys():
                params_dynamic_lr = isinstance(group_param['lr'], (Iterable, Tensor))
                if self.dynamic_lr and not params_dynamic_lr:
                    lr = Tensor(np.array([group_param['lr']] * self.dynamic_lr_length).astype(np.float32))
                else:
                    lr = self._get_single_lr(group_param['lr'])
            else:
                if self.dynamic_lr and not origin_dynamic_lr:
                    lr = Tensor(np.array([self.scalar_lr] * self.dynamic_lr_length).astype(np.float32))
                else:
                    lr = learning_rate

            if 'weight_decay' in group_param.keys():
                validator.check_float_legal_value('weight_decay', group_param['weight_decay'], None)
                validator.check_number_range('weight_decay', group_param['weight_decay'], 0.0, float("inf"),
                                             Rel.INC_LEFT, self.cls_name)
                weight_decay_ = group_param['weight_decay'] * self.loss_scale
            else:
                weight_decay_ = weight_decay * self.loss_scale

            for key in group_param.keys():
                if key not in ('params', 'lr', 'weight_decay'):
                    logger.warning(f"The optimizer cannot parse '{key}' when setting parameter groups.")

            for param in group_param['params']:
                validator.check_value_type("parameter", param, [Parameter], self.cls_name)
                if param.name in params_store:
                    raise RuntimeError(f"The {param.name} parameter has appeared in parameter groups.")

                params_store.append(param.name)
                self.group_lr.append(Parameter(lr, name="lr_" + param.name))
                self.group_weight_decay.append(weight_decay_)

        if self.is_group_params_ordered:
            self._order_and_adjust_group_params(ordered_parameters, learning_rate, weight_decay)
Beispiel #6
0
def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch):
    r"""
    Calculate learning rate base on cosine decay function.

    For the i-th step, the formula of computing decayed_learning_rate[i] is:

    .. math::
        decayed\_learning\_rate[i] = min\_learning\_rate + 0.5 * (max\_learning\_rate - min\_learning\_rate) *
        (1 + cos(\frac{current\_epoch}{decay\_epoch}\pi))

    Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`.

    Args:
        min_lr (float): The minimum value of learning rate.
        max_lr (float): The maximum value of learning rate.
        total_step (int): The total number of steps.
        step_per_epoch (int): The number of steps in per epoch.
        decay_epoch (int): A value used to calculate decayed learning rate.

    Returns:
        list[float]. The size of list is `total_step`.

    Examples:
        >>> min_lr = 0.01
        >>> max_lr = 0.1
        >>> total_step = 6
        >>> step_per_epoch = 2
        >>> decay_epoch = 2
        >>> cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
        [0.1, 0.1, 0.05500000000000001, 0.05500000000000001, 0.01, 0.01]
    """
    if not isinstance(min_lr, float):
        raise TypeError("min_lr must be float.")
    validator.check_number_range("min_lr", min_lr, 0.0, float("inf"),
                                 Rel.INC_LEFT, None)
    validator.check_float_positive('max_lr', max_lr, None)
    validator.check_float_legal_value('max_lr', max_lr, None)
    validator.check_integer('total_step', total_step, 0, Rel.GT, None)
    validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT, None)
    validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT, None)
    if min_lr >= max_lr:
        raise ValueError('`max_lr` should be greater than `min_lr`.')

    delta = 0.5 * (max_lr - min_lr)
    lr = []
    for i in range(total_step):
        tmp_epoch = min(math.floor(i / step_per_epoch), decay_epoch)
        lr.append(min_lr + delta *
                  (1 + math.cos(math.pi * tmp_epoch / decay_epoch)))
    return lr
Beispiel #7
0
def piecewise_constant_lr(milestone, learning_rates):
    r"""
    Get piecewise constant learning rate.

    Calculate learning rate by given `milestone` and `learning_rates`. Let the value of `milestone` be
    :math:`(M_1, M_2, ..., M_N)` and the value of `learning_rates` be :math:`(x_1, x_2, ..., x_N)`. N is the length of
    `milestone`. Let the output learning rate be `y`.

    .. math::
        y[i] = x_t,\ for\ i \in [M_{t-1}, M_t)

    Args:
        milestone (Union[list[int], tuple[int]]): A list of milestone. This list is a monotone increasing list.
            Every element is a milestone step, and must be greater than 0.
        learning_rates (Union[list[float], tuple[float]]): A list of learning rates.

    Returns:
        list[float]. The size of list is :math:`M_N`.

    Examples:
        >>> milestone = [2, 5, 10]
        >>> learning_rates = [0.1, 0.05, 0.01]
        >>> piecewise_constant_lr(milestone, learning_rates)
        [0.1, 0.1, 0.05, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01]
    """
    validator.check_value_type('milestone', milestone, (tuple, list), None)
    validator.check_value_type('learning_rates', learning_rates, (tuple, list),
                               None)
    if len(milestone) != len(learning_rates):
        raise ValueError(
            'The size of `milestone` must be same with the size of `learning_rates`.'
        )

    lr = []
    last_item = 0
    for i, item in enumerate(milestone):
        validator.check_integer(f'milestone[{i}]', item, 0, Rel.GT, None)
        validator.check_float_legal_value(f'learning_rates[{i}]',
                                          learning_rates[i], None)
        if item < last_item:
            raise ValueError(
                f'The value of milestone[{i}] must be greater than milestone[{i - 1}]'
            )
        lr += [learning_rates[i]] * (item - last_item)
        last_item = item

    return lr
Beispiel #8
0
def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power,
                        update_decay_epoch=False):
    r"""
    Calculate learning rate base on polynomial decay function.

    For the i-th step, the formula of computing decayed_learning_rate[i] is:

    .. math::
        decayed\_learning\_rate[i] = (learning\_rate - end\_learning\_rate) *
        (1 - tmp\_epoch / tmp\_decay\_epoch)^{power} + end\_learning\_rate

    Where :math:`tmp\_epoch=min(current\_epoch, decay\_epoch),\ current\_epoch=floor(\frac{i}{step\_per\_epoch})`, and
    :math:`tmp\_decay\_epoch = decay\_epoch`. If `update_decay_epoch` is true, update the value of `tmp_decay_epoch`
    every epoch. The formula is :math:`tmp\_decay\_epoch = decay\_epoch * ceil(current\_epoch / decay\_epoch)`

    Args:
        learning_rate (float): The initial value of learning rate.
        end_learning_rate (float): The end value of learning rate.
        total_step (int): The total number of steps.
        step_per_epoch (int): The number of steps in per epoch.
        decay_epoch (int): A value used to calculate decayed learning rate.
        power (float): A value used to calculate decayed learning rate. This parameter should be greater than 0.
        update_decay_epoch (bool): If true, update `decay_epoch`. Default: False.

    Returns:
        list[float]. The size of list is `total_step`.

    Examples:
        >>> learning_rate = 0.1
        >>> end_learning_rate = 0.01
        >>> total_step = 6
        >>> step_per_epoch = 2
        >>> decay_epoch = 2
        >>> power = 0.5
        >>> polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
        [0.1, 0.1, 0.07363961030678928, 0.07363961030678928, 0.01, 0.01]
    """
    validator.check_float_positive('learning_rate', learning_rate, None)
    validator.check_float_legal_value('learning_rate', learning_rate, None)
    validator.check_float_positive('end_learning_rate', end_learning_rate, None)
    validator.check_float_legal_value('end_learning_rate', end_learning_rate, None)
    validator.check_float_positive('power', power, None)
    validator.check_float_legal_value('power', power, None)
    validator.check_integer('total_step', total_step, 0, Rel.GT, None)
    validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT, None)
    validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT, None)
    validator.check_value_type('update_decay_epoch', update_decay_epoch, [bool], None)

    origin_decay_epoch = decay_epoch
    function = lambda x, y: (x, min(x, y))
    if update_decay_epoch:
        function = lambda x, y: (origin_decay_epoch * max(math.ceil(y / origin_decay_epoch), 1), y)

    lr = []
    delta = learning_rate - end_learning_rate
    for i in range(total_step):
        current_epoch = math.floor(i / step_per_epoch)
        decay_epoch, tmp_epoch = function(decay_epoch, current_epoch)
        lr.append(delta * (1 - tmp_epoch / decay_epoch) ** power + end_learning_rate)
    return lr
Beispiel #9
0
def _check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, prim_name):
    """Check the type of inputs."""
    validator.check_float_positive('learning_rate', learning_rate, prim_name)
    validator.check_float_legal_value('learning_rate', learning_rate, prim_name)
    validator.check_float_positive('end_learning_rate', end_learning_rate, prim_name)
    validator.check_float_legal_value('end_learning_rate', end_learning_rate, prim_name)
    validator.check_float_positive('power', power, prim_name)
    validator.check_float_legal_value('power', power, prim_name)
    validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
Beispiel #10
0
    def _init_group_params(self, parameters, learning_rate, weight_decay):
        """Init learning rate or weight decay in group params."""
        origin_dynamic_lr = self.dynamic_lr
        if self.dynamic_lr:
            dynamic_lr_length = learning_rate.size()
        else:
            dynamic_lr_length = 0

        for group_param in parameters:
            lr_length = dynamic_lr_length
            if 'lr' in group_param.keys():
                self._get_single_lr(group_param['lr'])
                if isinstance(group_param['lr'], Iterable):
                    lr_length = len(group_param['lr'])
                    self.dynamic_lr = True
                elif isinstance(group_param['lr'], Tensor):
                    lr_length = group_param['lr'].size()
                    self.dynamic_lr = True
            if dynamic_lr_length not in (lr_length, 0):
                raise ValueError(
                    "The dynamic learning rate in group should be the same size."
                )
            dynamic_lr_length = lr_length

        if self.dynamic_lr and not origin_dynamic_lr:
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')

        params_store = []
        for group_param in parameters:
            self.params += group_param['params']
            if 'lr' in group_param.keys():
                params_dynamic_lr = isinstance(group_param['lr'],
                                               (Iterable, Tensor))

                if self.dynamic_lr and not params_dynamic_lr:
                    lr = Tensor(
                        np.array([group_param['lr']] *
                                 dynamic_lr_length).astype(np.float32))
                else:
                    lr = self._get_single_lr(group_param['lr'])
            else:
                if self.dynamic_lr and not origin_dynamic_lr:
                    lr = Tensor(
                        np.array([self.scalar_lr] * dynamic_lr_length).astype(
                            np.float32))
                else:
                    lr = learning_rate

            if 'weight_decay' in group_param.keys():
                validator.check_float_legal_value('weight_decay',
                                                  group_param['weight_decay'],
                                                  None)
                validator.check_number_range('weight_decay',
                                             group_param['weight_decay'], 0.0,
                                             float("inf"), Rel.INC_LEFT,
                                             self.cls_name)
                weight_decay_ = group_param['weight_decay'] * self.loss_scale
            else:
                weight_decay_ = weight_decay * self.loss_scale

            for param in group_param['params']:
                if param in params_store:
                    raise RuntimeError(
                        f"The {param.name} parameter has appeared in parameter groups."
                    )
                params_store.append(param)
                self.group_lr.append(Parameter(lr, name="lr_" + param.name))
                self.group_weight_decay.append(weight_decay_)
Beispiel #11
0
    def __init__(self,
                 learning_rate,
                 parameters,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Optimizer, self).__init__(auto_prefix=False)
        if isinstance(learning_rate, float):
            self.dynamic_lr = False
            self.gather = None
            self.assignadd = None
            self.global_step = None
            validator.check_number_range("learning rate", learning_rate, 0.0,
                                         float("inf"), Rel.INC_LEFT,
                                         self.cls_name)
            learning_rate = Tensor(learning_rate, mstype.float32)
        else:
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')
            if isinstance(learning_rate, Iterable):
                learning_rate = Tensor(
                    np.array(list(learning_rate)).astype(np.float32))
            elif isinstance(learning_rate, Tensor):
                if learning_rate.dim() > 1:
                    raise ValueError(
                        "Learning rate should be a 0 or 1 dim `Tensor`,"
                        f"but got {learning_rate.dim()}.")
                if learning_rate.dim() == 1 and learning_rate.size() < 2:
                    logger.warning(
                        "If want to use the dynamic learning rate, please make sure that the number "
                        "of elements in the list, tuple or tensor passed is greater than 1."
                    )
            else:
                raise TypeError(
                    "Learning rate should be float, Tensor or Iterable.")

        if isinstance(weight_decay, int):
            weight_decay = float(weight_decay)

        validator.check_float_legal_value('weight_decay', weight_decay, None)

        if isinstance(loss_scale, int):
            loss_scale = float(loss_scale)

        validator.check_float_legal_value('loss_scale', loss_scale, None)

        if loss_scale <= 0.0:
            raise ValueError(
                "Loss scale should be greater than 0, but got {}".format(
                    loss_scale))
        self.loss_scale = loss_scale

        if weight_decay < 0.0:
            raise ValueError(
                "Weight decay should be equal or greater than 0, but got {}".
                format(weight_decay))

        self.learning_rate = Parameter(learning_rate, name="learning_rate")
        self.parameters = ParameterTuple(parameters)
        self.reciprocal_scale = 1.0 / loss_scale
        self.weight_decay = weight_decay * loss_scale
        self.decay_flags = tuple(decay_filter(x) for x in self.parameters)

        if not self.parameters:
            raise ValueError("optimizer got an empty parameter list.")