Example #1
0
    def __init__(self,
                 learning_rate,
                 parameters,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Optimizer, self).__init__()
        if isinstance(learning_rate, float):
            self.dynamic_lr = False
            self.gather = None
            self.assignadd = None
            self.global_step = None
            validator.check_number_range("learning rate", learning_rate, 0.0,
                                         float("inf"), Rel.INC_LEFT)
        else:
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')
            if isinstance(learning_rate, Iterable):
                learning_rate = Tensor(
                    np.array(list(learning_rate)).astype(np.float32))
            elif isinstance(learning_rate, Tensor):
                if learning_rate.dim() > 1:
                    raise ValueError(
                        "Learning rate should be a 0 or 1 dim `Tensor`,"
                        f"but got {learning_rate.dim()}.")
                if learning_rate.dim() == 1 and learning_rate.size() < 2:
                    logger.warning(
                        "If want to use the dynamic learning rate, please make sure that the number "
                        "of elements in the list, tuple or tensor passed is greater than 1."
                    )
            else:
                raise TypeError(
                    "Learning rate should be float, Tensor or Iterable.")

        if loss_scale <= 0.0:
            raise ValueError(
                "Loss scale should be greater than 0, but got {}".format(
                    loss_scale))
        if weight_decay < 0.0:
            raise ValueError(
                "Weight decay should be equal or greater than 0, but got {}".
                format(weight_decay))

        self.learning_rate = Parameter(learning_rate, name="learning_rate")
        self.parameters = ParameterTuple(parameters)
        self.reciprocal_scale = 1.0 / loss_scale
        self.weight_decay = weight_decay * loss_scale
        self.decay_flags = tuple(decay_filter(x) for x in self.parameters)

        if not self.parameters:
            raise ValueError("optimizer got an empty parameter list.")
Example #2
0
    def __init__(self,
                 params,
                 learning_rate=1e-3,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-8,
                 use_locking=False,
                 use_nesterov=False,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Adam, self).__init__(learning_rate, params)
        _check_param_value(beta1, beta2, eps, weight_decay)
        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("use_nesterov", use_nesterov, [bool])
        validator.check_type("loss_scale", loss_scale, [float])
        validator.check_number_range("loss_scale", loss_scale, 1.0,
                                     float("inf"), Rel.INC_LEFT)

        self.dynamic_lr = False
        if isinstance(learning_rate, Iterable) or \
                (isinstance(learning_rate, Tensor) and learning_rate.dim() == 1):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0

        self.beta1 = Tensor(beta1, mstype.float32)
        self.beta2 = Tensor(beta2, mstype.float32)
        self.beta1_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta1_power")
        self.beta2_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta2_power")
        self.eps = eps

        self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
        self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')

        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.hyper_map = C.HyperMap()
        self.opt = P.Adam(use_locking, use_nesterov)
        self.weight_decay = weight_decay * loss_scale
        self.reciprocal_scale = 1.0 / loss_scale

        self.pow = P.Pow()
        self.sqrt = P.Sqrt()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.realdiv = P.RealDiv()
Example #3
0
def _check_param_value(decay_steps, warmup_steps, start_learning_rate,
                       end_learning_rate, power, beta1, beta2, eps, weight_decay):

    """Check the type of inputs."""
    validator.check_type("decay_steps", decay_steps, [int])
    validator.check_type("warmup_steps", warmup_steps, [int])
    validator.check_type("start_learning_rate", start_learning_rate, [float])
    validator.check_type("end_learning_rate", end_learning_rate, [float])
    validator.check_type("power", power, [float])
    validator.check_type("beta1", beta1, [float])
    validator.check_type("beta2", beta2, [float])
    validator.check_type("eps", eps, [float])
    validator.check_type("weight_dacay", weight_decay, [float])
    validator.check_number_range("decay_steps", decay_steps, 1, float("inf"), Rel.INC_LEFT)
    validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("eps", eps, 0.0, float("inf"), Rel.INC_NEITHER)
    validator.check_number_range("weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT)
Example #4
0
def _check_param_value(beta1, beta2, eps, weight_decay):
    """Check the type of inputs."""
    validator.check_type("beta1", beta1, [float])
    validator.check_type("beta2", beta2, [float])
    validator.check_type("eps", eps, [float])
    validator.check_type("weight_dacay", weight_decay, [float])
    validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("eps", eps, 0.0, float("inf"),
                                 Rel.INC_NEITHER)
    validator.check_number_range("weight_decay", weight_decay, 0.0,
                                 float("inf"), Rel.INC_LEFT)
Example #5
0
    def __init__(self, learning_rate, parameters):
        super(Optimizer, self).__init__()
        if isinstance(learning_rate, float):
            validator.check_number_range("learning rate", learning_rate, 0.0, float("inf"), Rel.INC_LEFT)
        elif isinstance(learning_rate, Iterable):
            learning_rate = Tensor(np.array(list(learning_rate)).astype(np.float32))
        elif isinstance(learning_rate, Tensor):
            if learning_rate.dim() > 1:
                raise ValueError("Learning rate should be a 0 or 1 dim `Tensor`,"
                                 f"but got {learning_rate.dim()}.")
        else:
            raise TypeError("Learning rate should be float, Tensor or Iterable.")

        if isinstance(learning_rate, Tensor) and learning_rate.dim() == 1 and learning_rate.size() < 2:
            logger.warning("If want to use the dynamic learning rate, please make sure that "
                           "the number of elements in the list, tuple or tensor passed is greater than 1.")
        self.learning_rate = Parameter(learning_rate, name="learning_rate")
        self.parameters = ParameterTuple(parameters)
        if not self.parameters:
            raise ValueError("optimizer got an empty parameter list.")
Example #6
0
    def __init__(self,
                 params,
                 learning_rate=1e-3,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-8,
                 use_locking=False,
                 use_nesterov=False,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Adam, self).__init__(learning_rate, params, weight_decay,
                                   loss_scale, decay_filter)
        _check_param_value(beta1, beta2, eps, weight_decay)
        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("use_nesterov", use_nesterov, [bool])
        validator.check_type("loss_scale", loss_scale, [float])
        validator.check_number_range("loss_scale", loss_scale, 1.0,
                                     float("inf"), Rel.INC_LEFT)

        self.beta1 = Tensor(beta1, mstype.float32)
        self.beta2 = Tensor(beta2, mstype.float32)
        self.beta1_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta1_power")
        self.beta2_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta2_power")
        self.eps = eps

        self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
        self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')

        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.hyper_map = C.HyperMap()
        self.opt = P.Adam(use_locking, use_nesterov)

        self.pow = P.Pow()
        self.sqrt = P.Sqrt()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.realdiv = P.RealDiv()
Example #7
0
 def __init__(self,
              max_val=1.0,
              filter_size=11,
              filter_sigma=1.5,
              k1=0.01,
              k2=0.03):
     super(SSIM, self).__init__()
     validator.check_type('max_val', max_val, [int, float])
     validator.check('max_val', max_val, '', 0.0, Rel.GT)
     self.max_val = max_val
     self.filter_size = validator.check_integer('filter_size', filter_size,
                                                1, Rel.GE)
     self.filter_sigma = validator.check_float_positive(
         'filter_sigma', filter_sigma)
     validator.check_type('k1', k1, [float])
     self.k1 = validator.check_number_range('k1', k1, 0.0, 1.0,
                                            Rel.INC_NEITHER)
     validator.check_type('k2', k2, [float])
     self.k2 = validator.check_number_range('k2', k2, 0.0, 1.0,
                                            Rel.INC_NEITHER)
     self.mean = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=filter_size)