Ejemplo n.º 1
0
def _check_param_value(decay_steps, warmup_steps, start_learning_rate,
                       end_learning_rate, power, beta1, beta2, eps, weight_decay, prim_name):
    """Check the type of inputs."""
    validator.check_value_type("start_learning_rate", start_learning_rate, [float], prim_name)
    validator.check_number_range("start_learning_rate rate", start_learning_rate, 0.0, float("inf"), Rel.INC_LEFT,
                                 prim_name)
    validator.check_value_type("end_learning_rate", end_learning_rate, [float], prim_name)
    validator.check_number_range("end_learning_rate", end_learning_rate, 0.0, float("inf"), Rel.INC_LEFT,
                                 prim_name)
    validator.check_float_positive('power', power, prim_name)
    validator.check_float_legal_value('power', power, prim_name)
    validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
    validator.check_integer('warmup_steps', warmup_steps, 0, Rel.GE, prim_name)
    validator.check_value_type("beta1", beta1, [float], prim_name)
    validator.check_value_type("beta2", beta2, [float], prim_name)
    validator.check_value_type("eps", eps, [float], prim_name)
    validator.check_value_type(
        "weight_dacay", weight_decay, [float], prim_name)
    validator.check_number_range(
        "beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
    validator.check_number_range(
        "beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER, prim_name)
    validator.check_number_range(
        "eps", eps, 0.0, float("inf"), Rel.INC_NEITHER, prim_name)
    validator.check_number_range(
        "weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
Ejemplo n.º 2
0
def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch,
                  decay_epoch, is_stair):
    validator.check_integer('total_step', total_step, 0, Rel.GT, None)
    validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT, None)
    validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT, None)
    validator.check_float_positive('learning_rate', learning_rate, None)
    validator.check_float_positive('decay_rate', decay_rate, None)
    validator.check_value_type('is_stair', is_stair, [bool], None)
Ejemplo n.º 3
0
def _check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, prim_name):
    """Check the type of inputs."""
    validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
    validator.check_number_range("learning_rate", learning_rate, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
    validator.check_value_type("end_learning_rate", end_learning_rate, [float], prim_name)
    validator.check_number_range("end_learning_rate", end_learning_rate, 0.0, float("inf"), Rel.INC_LEFT, prim_name)
    validator.check_float_positive('power', power, prim_name)
    validator.check_float_legal_value('power', power, prim_name)
    validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)
Ejemplo n.º 4
0
def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch,
                  decay_epoch, is_stair):
    validator.check_positive_int(total_step, 'total_step')
    validator.check_positive_int(step_per_epoch, 'step_per_epoch')
    validator.check_positive_int(decay_epoch, 'decay_epoch')
    validator.check_float_positive('learning_rate', learning_rate, None)
    validator.check_float_legal_value('learning_rate', learning_rate, None)
    validator.check_float_positive('decay_rate', decay_rate, None)
    validator.check_float_legal_value('decay_rate', decay_rate, None)
    validator.check_value_type('is_stair', is_stair, [bool], None)
Ejemplo n.º 5
0
def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch):
    r"""
    Calculate learning rate base on cosine decay function.

    For the i-th step, the formula of computing decayed_learning_rate[i] is:

    .. math::
        decayed\_learning\_rate[i] = min\_learning\_rate + 0.5 * (max\_learning\_rate - min\_learning\_rate) *
        (1 + cos(\frac{current\_epoch}{decay\_epoch}\pi))

    Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`.

    Args:
        min_lr (float): The minimum value of learning rate.
        max_lr (float): The maximum value of learning rate.
        total_step (int): The total number of steps.
        step_per_epoch (int): The number of steps in per epoch.
        decay_epoch (int): A value used to calculate decayed learning rate.

    Returns:
        list[float]. The size of list is `total_step`.

    Examples:
        >>> min_lr = 0.01
        >>> max_lr = 0.1
        >>> total_step = 6
        >>> step_per_epoch = 2
        >>> decay_epoch = 2
        >>> cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
        [0.1, 0.1, 0.05500000000000001, 0.05500000000000001, 0.01, 0.01]
    """
    if not isinstance(min_lr, float):
        raise TypeError("min_lr must be float.")
    validator.check_number_range("min_lr", min_lr, 0.0, float("inf"),
                                 Rel.INC_LEFT, None)
    validator.check_float_positive('max_lr', max_lr, None)
    validator.check_float_legal_value('max_lr', max_lr, None)
    validator.check_integer('total_step', total_step, 0, Rel.GT, None)
    validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT, None)
    validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT, None)
    if min_lr >= max_lr:
        raise ValueError('`max_lr` should be greater than `min_lr`.')

    delta = 0.5 * (max_lr - min_lr)
    lr = []
    for i in range(total_step):
        tmp_epoch = min(math.floor(i / step_per_epoch), decay_epoch)
        lr.append(min_lr + delta *
                  (1 + math.cos(math.pi * tmp_epoch / decay_epoch)))
    return lr
Ejemplo n.º 6
0
 def __init__(self,
              max_val=1.0,
              power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
              filter_size=11,
              filter_sigma=1.5,
              k1=0.01,
              k2=0.03):
     super(MSSSIM, self).__init__()
     validator.check_value_type('max_val', max_val, [int, float],
                                self.cls_name)
     validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
     self.max_val = max_val
     validator.check_value_type('power_factors', power_factors,
                                [tuple, list], self.cls_name)
     self.filter_size = validator.check_integer('filter_size', filter_size,
                                                1, Rel.GE, self.cls_name)
     self.filter_sigma = validator.check_float_positive(
         'filter_sigma', filter_sigma, self.cls_name)
     self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
     self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
     window = _create_window(filter_size, filter_sigma)
     self.level = len(power_factors)
     self.conv = []
     for i in range(self.level):
         self.conv.append(_conv2d(1, 1, filter_size, Tensor(window)))
         self.conv[i].weight.requires_grad = False
     self.multi_convs_list = CellList(self.conv)
     self.weight_tensor = Tensor(power_factors, mstype.float32)
     self.avg_pool = AvgPool2d(kernel_size=2, stride=2, pad_mode='valid')
     self.relu = ReLU()
     self.reduce_mean = P.ReduceMean()
     self.prod = P.ReduceProd()
     self.pow = P.Pow()
     self.pack = P.Pack(axis=-1)
     self.concat = P.Concat(axis=1)
Ejemplo n.º 7
0
 def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
     super(SSIM, self).__init__()
     validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
     validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
     self.max_val = max_val
     self.filter_size = validator.check_integer('filter_size', filter_size, 1, Rel.GE, self.cls_name)
     self.filter_sigma = validator.check_float_positive('filter_sigma', filter_sigma, self.cls_name)
     validator.check_value_type('k1', k1, [float], self.cls_name)
     self.k1 = validator.check_number_range('k1', k1, 0.0, 1.0, Rel.INC_NEITHER, self.cls_name)
     validator.check_value_type('k2', k2, [float], self.cls_name)
     self.k2 = validator.check_number_range('k2', k2, 0.0, 1.0, Rel.INC_NEITHER, self.cls_name)
     self.mean = P.DepthwiseConv2dNative(channel_multiplier=1, kernel_size=filter_size)
Ejemplo n.º 8
0
def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power,
                        update_decay_epoch=False):
    r"""
    Calculate learning rate base on polynomial decay function.

    For the i-th step, the formula of computing decayed_learning_rate[i] is:

    .. math::
        decayed\_learning\_rate[i] = (learning\_rate - end\_learning\_rate) *
        (1 - tmp\_epoch / tmp\_decay\_epoch)^{power} + end\_learning\_rate

    Where :math:`tmp\_epoch=min(current\_epoch, decay\_epoch),\ current\_epoch=floor(\frac{i}{step\_per\_epoch})`, and
    :math:`tmp\_decay\_epoch = decay\_epoch`. If `update_decay_epoch` is true, update the value of `tmp_decay_epoch`
    every epoch. The formula is :math:`tmp\_decay\_epoch = decay\_epoch * ceil(current\_epoch / decay\_epoch)`

    Args:
        learning_rate (float): The initial value of learning rate.
        end_learning_rate (float): The end value of learning rate.
        total_step (int): The total number of steps.
        step_per_epoch (int): The number of steps in per epoch.
        decay_epoch (int): A value used to calculate decayed learning rate.
        power (float): A value used to calculate decayed learning rate. This parameter should be greater than 0.
        update_decay_epoch (bool): If true, update `decay_epoch`. Default: False.

    Returns:
        list[float]. The size of list is `total_step`.

    Examples:
        >>> learning_rate = 0.1
        >>> end_learning_rate = 0.01
        >>> total_step = 6
        >>> step_per_epoch = 2
        >>> decay_epoch = 2
        >>> power = 0.5
        >>> polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
        [0.1, 0.1, 0.07363961030678928, 0.07363961030678928, 0.01, 0.01]
    """
    validator.check_float_positive('learning_rate', learning_rate, None)
    validator.check_float_legal_value('learning_rate', learning_rate, None)
    validator.check_float_positive('end_learning_rate', end_learning_rate, None)
    validator.check_float_legal_value('end_learning_rate', end_learning_rate, None)
    validator.check_float_positive('power', power, None)
    validator.check_float_legal_value('power', power, None)
    validator.check_integer('total_step', total_step, 0, Rel.GT, None)
    validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT, None)
    validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT, None)
    validator.check_value_type('update_decay_epoch', update_decay_epoch, [bool], None)

    origin_decay_epoch = decay_epoch
    function = lambda x, y: (x, min(x, y))
    if update_decay_epoch:
        function = lambda x, y: (origin_decay_epoch * max(math.ceil(y / origin_decay_epoch), 1), y)

    lr = []
    delta = learning_rate - end_learning_rate
    for i in range(total_step):
        current_epoch = math.floor(i / step_per_epoch)
        decay_epoch, tmp_epoch = function(decay_epoch, current_epoch)
        lr.append(delta * (1 - tmp_epoch / decay_epoch) ** power + end_learning_rate)
    return lr
Ejemplo n.º 9
0
 def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
     super(SSIM, self).__init__()
     validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
     validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
     self.max_val = max_val
     self.filter_size = validator.check_integer('filter_size', filter_size, 1, Rel.GE, self.cls_name)
     self.filter_sigma = validator.check_float_positive('filter_sigma', filter_sigma, self.cls_name)
     self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
     self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
     window = _create_window(filter_size, filter_sigma)
     self.conv = _conv2d(1, 1, filter_size, Tensor(window))
     self.conv.weight.requires_grad = False
     self.reduce_mean = P.ReduceMean()
     self.concat = P.Concat(axis=1)
Ejemplo n.º 10
0
def _check_learning_rate_value(learning_rate, end_learning_rate, decay_steps, power, prim_name):
    """Check the type of inputs."""
    validator.check_float_positive('learning_rate', learning_rate, prim_name)
    validator.check_float_legal_value('learning_rate', learning_rate, prim_name)
    validator.check_float_positive('end_learning_rate', end_learning_rate, prim_name)
    validator.check_float_legal_value('end_learning_rate', end_learning_rate, prim_name)
    validator.check_float_positive('power', power, prim_name)
    validator.check_float_legal_value('power', power, prim_name)
    validator.check_integer('decay_steps', decay_steps, 0, Rel.GT, prim_name)