Example #1
0
    def __init__(self, kernel_size, stride, pad_mode):
        name = self.__class__.__name__
        super(_PoolNd, self).__init__()
        validator.check_type('kernel_size', kernel_size, [int, tuple])
        validator.check_type('stride', stride, [int, tuple])
        self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(),
                                               ['VALID', 'SAME'])

        if isinstance(kernel_size, int):
            validator.check_integer("kernel_size", kernel_size, 1, Rel.GE)
        else:
            if (len(kernel_size) != 2 or (not isinstance(kernel_size[0], int))
                    or (not isinstance(kernel_size[1], int))
                    or kernel_size[0] <= 0 or kernel_size[1] <= 0):
                raise ValueError(
                    f'The kernel_size passed to cell {name} should be an positive int number or'
                    f'a tuple of two positive int numbers, but got {kernel_size}'
                )
        self.kernel_size = kernel_size

        if isinstance(stride, int):
            validator.check_integer("stride", stride, 1, Rel.GE)
        else:
            if (len(stride) != 2 or (not isinstance(stride[0], int))
                    or (not isinstance(stride[1], int)) or stride[0] <= 0
                    or stride[1] <= 0):
                raise ValueError(
                    f'The stride passed to cell {name} should be an positive int number or'
                    f'a tuple of two positive int numbers, but got {stride}')
        self.stride = stride
Example #2
0
    def __init__(self, params, learning_rate=0.1, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False,
                 loss_scale=1.0):

        super(SGD, self).__init__(learning_rate, params, weight_decay, loss_scale)

        if not isinstance(momentum, float):
            raise TypeError("momentum should be float number!")

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))

        if not isinstance(dampening, float):
            raise TypeError("dampening should be float number")

        if isinstance(dampening, int):
            dampening = float(dampening)

        if dampening < 0.0:
            raise ValueError("dampening should be at least 0.0, but got dampening {}".format(dampening))
        self.dampening = dampening

        validator.check_type("nesterov", nesterov, [bool])
        self.nesterov = nesterov

        self.opt = P.SGD(dampening, weight_decay, nesterov)

        self.momentum = Parameter(momentum, name="momentum")
        self.accum = self.parameters.clone(prefix="accum", init='zeros')
        self.stat = self.parameters.clone(prefix="stat", init='ones')
        self.hyper_map = C.HyperMap()
Example #3
0
    def __init__(self,
                 params,
                 learning_rate=0.1,
                 decay=0.9,
                 momentum=0.0,
                 epsilon=1e-10,
                 use_locking=False,
                 centered=False,
                 loss_scale=1.0,
                 weight_decay=0.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(RMSProp, self).__init__(learning_rate, params)

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))

        if decay < 0.0:
            raise ValueError(
                "decay should be at least 0.0, but got dampening {}".format(
                    decay))
        self.decay = decay
        self.epsilon = epsilon

        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("centered", centered, [bool])
        self.centered = centered
        if centered:
            self.opt = P.ApplyCenteredRMSProp(use_locking)
            self.mg = self.parameters.clone(prefix="mean_grad", init='zeros')
        else:
            self.opt = P.ApplyRMSProp(use_locking)

        self.dynamic_lr = False
        if not isinstance(learning_rate, float):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0
            self.one = Tensor(1, mstype.int32)

        self.momentum = momentum

        self.ms = self.parameters.clone(prefix="mean_square", init='zeros')
        self.moment = self.parameters.clone(prefix="moment", init='zeros')
        self.hyper_map = C.HyperMap()

        self.decay = decay
        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.reciprocal_scale = 1.0 / loss_scale
        self.weight_decay = weight_decay * loss_scale
Example #4
0
    def __init__(self,
                 params,
                 learning_rate=0.1,
                 momentum=0.0,
                 dampening=0.0,
                 weight_decay=0.0,
                 nesterov=False,
                 loss_scale=1.0):

        super(SGD, self).__init__(learning_rate, params)

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))

        if dampening < 0.0:
            raise ValueError(
                "dampening should be at least 0.0, but got dampening {}".
                format(dampening))
        self.dampening = dampening

        if weight_decay < 0.0:
            raise ValueError(
                "weight_decay should be at least 0.0, but got weight_decay {}".
                format(weight_decay))
        self.weight_decay = weight_decay

        validator.check_type("nesterov", nesterov, [bool])
        self.nesterov = nesterov

        self.opt = P.SGD(dampening, weight_decay, nesterov)

        self.dynamic_lr = False
        self.gather = None
        self.global_step = None
        self.axis = None
        if not isinstance(learning_rate, float):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0
        self.momentum = Parameter(momentum, name="momentum")
        self.params = self.parameters
        self.accum = self.params.clone(prefix="accum", init='zeros')
        self.stat = self.params.clone(prefix="stat", init='ones')
        self.hyper_map = C.HyperMap()

        self.weight_decay = weight_decay * loss_scale
        self.reciprocal_scale = 1.0 / loss_scale
Example #5
0
def conv2d(x,
           weight,
           bias=None,
           stride=1,
           pad=0,
           dilation=1,
           groups=1,
           padding_mode='zeros'):
    """Convolution 2D."""
    # pylint: disable=unused-argument
    validator.check_type('stride', stride, (int, tuple))
    if isinstance(stride, int):
        stride = (stride, stride)
    elif len(stride) == 4:
        stride = (stride[2], stride[3])
    if len(stride) != 2 or (not isinstance(stride[0], int)) or \
                           (not isinstance(stride[1], int)) or \
                           stride[0] < 1 or stride[1] < 1:
        raise ValueError(
            f"The \'stride\' of \'conv2d\' should be an positive int number or "
            f"a tuple of two positive int numbers, but got {stride}")
    stride_h = stride[0]
    stride_w = stride[1]
    validator.check_type('dilation', dilation, (int, tuple))
    if isinstance(dilation, int):
        dilation = (dilation, dilation)
    elif len(dilation) == 4:
        dilation = (dilation[2], dilation[3])
    if len(dilation) != 2 or (not isinstance(dilation[0], int)) or \
                           (not isinstance(dilation[1], int)) or \
                           dilation[0] < 1 or dilation[1] < 1:
        raise ValueError(
            f"The \'dilation\' of \'conv2d\' should be an positive int number or "
            f"a tuple of two positive int numbers, but got {dilation}")
    dilation_h = dilation[0]
    dilation_w = dilation[1]

    batch_num, _, x_h, x_w = x.shape
    filter_num, _, filter_h, filter_w = weight.shape
    out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) *
                     (dilation_h - 1)) / stride_h)
    out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) *
                     (dilation_w - 1)) / stride_w)
    col = im2col(x, filter_h, filter_w, stride, pad, dilation)
    col_w = np.reshape(weight, (filter_num, -1)).T
    out = np.dot(col, col_w)
    out = out.reshape(batch_num, out_h, out_w, -1).transpose(0, 3, 1, 2)
    if bias is not None:
        out += bias
    return out
Example #6
0
    def register_decorator(func):
        validator.check_type("op_info", op_info, [str])
        op_lib = Oplib()
        file_path = os.path.realpath(inspect.getfile(func))
        # keep the path custom ops implementation.
        imply_path = "" if BUILT_IN_OPS_REGISTER_PATH in file_path else file_path
        if not op_lib.reg_op(op_info, imply_path):
            raise ValueError('Invalid op info {}:\n{}\n'.format(
                file_path, op_info))

        def wrapped_function(*args, **kwargs):
            return func(*args, **kwargs)

        return wrapped_function
Example #7
0
def _check_param_value(beta1, beta2, eps, weight_decay):
    """Check the type of inputs."""
    validator.check_type("beta1", beta1, [float])
    validator.check_type("beta2", beta2, [float])
    validator.check_type("eps", eps, [float])
    validator.check_type("weight_dacay", weight_decay, [float])
    validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("eps", eps, 0.0, float("inf"),
                                 Rel.INC_NEITHER)
    validator.check_number_range("weight_decay", weight_decay, 0.0,
                                 float("inf"), Rel.INC_LEFT)
Example #8
0
    def __init__(self,
                 params,
                 learning_rate=0.1,
                 decay=0.9,
                 momentum=0.0,
                 epsilon=1e-10,
                 use_locking=False,
                 centered=False,
                 loss_scale=1.0,
                 weight_decay=0.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(RMSProp, self).__init__(learning_rate, params, weight_decay,
                                      loss_scale, decay_filter)

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))

        if decay < 0.0:
            raise ValueError(
                "decay should be at least 0.0, but got dampening {}".format(
                    decay))
        self.decay = decay
        self.epsilon = epsilon

        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("centered", centered, [bool])
        self.centered = centered
        if centered:
            self.opt = P.ApplyCenteredRMSProp(use_locking)
            self.mg = self.parameters.clone(prefix="mean_grad", init='zeros')
        else:
            self.opt = P.ApplyRMSProp(use_locking)

        self.momentum = momentum

        self.ms = self.parameters.clone(prefix="mean_square", init='zeros')
        self.moment = self.parameters.clone(prefix="moment", init='zeros')
        self.hyper_map = C.HyperMap()

        self.decay = decay
Example #9
0
    def eval(self, average=False):
        """
        Computes the fbeta.

        Args:
            average (bool): Whether to calculate the average fbeta. Default value is False.

        Returns:
            Float, computed result.
        """
        validator.check_type("average", average, [bool])
        if self._class_num == 0:
            raise RuntimeError('Input number of samples can not be 0.')

        fbeta = (1.0 + self.beta ** 2) * self._true_positives / \
                (self.beta ** 2 * self._actual_positives + self._positives + self.eps)

        if average:
            return fbeta.mean()
        return fbeta
Example #10
0
    def eval(self, average=False):
        """
        Computes the recall.

        Args:
            average (bool): Specify whether calculate the average recall. Default value is False.

        Returns:
            Float, the computed result.
        """
        if self._class_num == 0:
            raise RuntimeError('Input number of samples can not be 0.')

        validator.check_type("average", average, [bool])
        result = self._true_positives / (self._actual_positives + self.eps)

        if average:
            if self._type == "multilabel":
                result = self._true_positives_average / (self._actual_positives_average + self.eps)
            return result.mean()
        return result
Example #11
0
    def __init__(self,
                 params,
                 learning_rate=1e-3,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-8,
                 use_locking=False,
                 use_nesterov=False,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Adam, self).__init__(learning_rate, params)
        _check_param_value(beta1, beta2, eps, weight_decay)
        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("use_nesterov", use_nesterov, [bool])
        validator.check_type("loss_scale", loss_scale, [float])
        validator.check_number_range("loss_scale", loss_scale, 1.0,
                                     float("inf"), Rel.INC_LEFT)

        self.dynamic_lr = False
        if isinstance(learning_rate, Iterable) or \
                (isinstance(learning_rate, Tensor) and learning_rate.dim() == 1):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0

        self.beta1 = Tensor(beta1, mstype.float32)
        self.beta2 = Tensor(beta2, mstype.float32)
        self.beta1_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta1_power")
        self.beta2_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta2_power")
        self.eps = eps

        self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
        self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')

        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.hyper_map = C.HyperMap()
        self.opt = P.Adam(use_locking, use_nesterov)
        self.weight_decay = weight_decay * loss_scale
        self.reciprocal_scale = 1.0 / loss_scale

        self.pow = P.Pow()
        self.sqrt = P.Sqrt()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.realdiv = P.RealDiv()
Example #12
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0,
                 bidirectional=False):
        super(LSTM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.has_bias = has_bias
        self.batch_first = validator.check_type("batch_first", batch_first, [bool])
        self.dropout = float(dropout)
        self.bidirectional = bidirectional

        if self.batch_first:
            self.transpose1 = P.Transpose()
            self.transpose2 = P.Transpose()
        self.lstm = P.LSTM(input_size=self.input_size,
                           hidden_size=self.hidden_size,
                           num_layers=self.num_layers,
                           has_bias=self.has_bias,
                           bidirectional=self.bidirectional,
                           dropout=self.dropout)

        num_directions = 2 if self.bidirectional else 1

        weight_size = 0
        gate_size = 4 * self.hidden_size
        for layer in range(self.num_layers):
            input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions
            increment_size = gate_size * input_layer_size
            increment_size += gate_size * self.hidden_size
            if self.has_bias:
                increment_size += 2 * gate_size
            weight_size += increment_size * num_directions

        self.weight = Parameter(initializer(0.0, [weight_size, 1, 1]), name='weight')

        self.fill = P.Fill()
        self.shape = P.Shape()
Example #13
0
    def __init__(self,
                 params,
                 learning_rate=1e-3,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-8,
                 use_locking=False,
                 use_nesterov=False,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Adam, self).__init__(learning_rate, params, weight_decay,
                                   loss_scale, decay_filter)
        _check_param_value(beta1, beta2, eps, weight_decay)
        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("use_nesterov", use_nesterov, [bool])
        validator.check_type("loss_scale", loss_scale, [float])
        validator.check_number_range("loss_scale", loss_scale, 1.0,
                                     float("inf"), Rel.INC_LEFT)

        self.beta1 = Tensor(beta1, mstype.float32)
        self.beta2 = Tensor(beta2, mstype.float32)
        self.beta1_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta1_power")
        self.beta2_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta2_power")
        self.eps = eps

        self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
        self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')

        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.hyper_map = C.HyperMap()
        self.opt = P.Adam(use_locking, use_nesterov)

        self.pow = P.Pow()
        self.sqrt = P.Sqrt()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.realdiv = P.RealDiv()
Example #14
0
 def __init__(self,
              max_val=1.0,
              filter_size=11,
              filter_sigma=1.5,
              k1=0.01,
              k2=0.03):
     super(SSIM, self).__init__()
     validator.check_type('max_val', max_val, [int, float])
     validator.check('max_val', max_val, '', 0.0, Rel.GT)
     self.max_val = max_val
     self.filter_size = validator.check_integer('filter_size', filter_size,
                                                1, Rel.GE)
     self.filter_sigma = validator.check_float_positive(
         'filter_sigma', filter_sigma)
     validator.check_type('k1', k1, [float])
     self.k1 = validator.check_number_range('k1', k1, 0.0, 1.0,
                                            Rel.INC_NEITHER)
     validator.check_type('k2', k2, [float])
     self.k2 = validator.check_number_range('k2', k2, 0.0, 1.0,
                                            Rel.INC_NEITHER)
     self.mean = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=filter_size)
Example #15
0
 def __init__(self, max_val=1.0):
     super(PSNR, self).__init__()
     validator.check_type('max_val', max_val, [int, float])
     validator.check('max_val', max_val, '', 0.0, Rel.GT)
     self.max_val = max_val
Example #16
0
def _check_param_value(decay_steps, warmup_steps, start_learning_rate,
                       end_learning_rate, power, beta1, beta2, eps,
                       weight_decay):
    """Check the type of inputs."""
    validator.check_type("decay_steps", decay_steps, [int])
    validator.check_type("warmup_steps", warmup_steps, [int])
    validator.check_type("start_learning_rate", start_learning_rate, [float])
    validator.check_type("end_learning_rate", end_learning_rate, [float])
    validator.check_type("power", power, [float])
    validator.check_type("beta1", beta1, [float])
    validator.check_type("beta2", beta2, [float])
    validator.check_type("eps", eps, [float])
    validator.check_type("weight_dacay", weight_decay, [float])
    validator.check_number_range("decay_steps", decay_steps, 1, float("inf"),
                                 Rel.INC_LEFT)
    validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("eps", eps, 0.0, float("inf"),
                                 Rel.INC_NEITHER)
    validator.check_number_range("weight_decay", weight_decay, 0.0,
                                 float("inf"), Rel.INC_LEFT)
Example #17
0
def _check_param(initial_accum,
                 learning_rate,
                 lr_power,
                 l1,
                 l2,
                 use_locking,
                 loss_scale=1.0,
                 weight_decay=0.0):
    validator.check_type("initial_accum", initial_accum, [float])
    validator.check("initial_accum", initial_accum, "", 0.0, Rel.GE)

    validator.check_type("learning_rate", learning_rate, [float])
    validator.check("learning_rate", learning_rate, "", 0.0, Rel.GT)

    validator.check_type("lr_power", lr_power, [float])
    validator.check("lr_power", lr_power, "", 0.0, Rel.LE)

    validator.check_type("l1", l1, [float])
    validator.check("l1", l1, "", 0.0, Rel.GE)

    validator.check_type("l2", l2, [float])
    validator.check("l2", l2, "", 0.0, Rel.GE)

    validator.check_type("use_locking", use_locking, [bool])

    validator.check_type("loss_scale", loss_scale, [float])
    validator.check("loss_scale", loss_scale, "", 1.0, Rel.GE)

    validator.check_type("weight_decay", weight_decay, [float])
    validator.check("weight_decay", weight_decay, "", 0.0, Rel.GE)