Пример #1
0
def avg_pooling(x, pool_h, pool_w, stride):
    """
    Applies average pooling over an input array.

    Args:
        x (numpy.ndarray): The input array to be average pooled.
        pool_h (int): Height of the pooling window.
        pool_w (int): Width of the pooling window.
        stride (int): The stride of the sliding window.

    Returns:
        numpy.ndarray, an output array after applying average pooling on input array.
    """
    validator.check_integer("stride", stride, 0, Rel.GT)
    num, channel, height, width = x.shape
    out_h = (height - pool_h) // stride + 1
    out_w = (width - pool_w) // stride + 1

    col = im2col(x, pool_h, pool_w, stride)
    col = col.reshape(-1, pool_h * pool_w)

    out = np.mean(col, axis=1)
    out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2)

    return out
Пример #2
0
    def __init__(self, params, learning_rate=0.1, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False,
                 loss_scale=1.0):

        super(SGD, self).__init__(learning_rate, params, weight_decay, loss_scale)

        if not isinstance(momentum, float):
            raise TypeError("momentum should be float number!")

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))

        if not isinstance(dampening, float):
            raise TypeError("dampening should be float number")

        if isinstance(dampening, int):
            dampening = float(dampening)

        if dampening < 0.0:
            raise ValueError("dampening should be at least 0.0, but got dampening {}".format(dampening))
        self.dampening = dampening

        validator.check_type("nesterov", nesterov, [bool])
        self.nesterov = nesterov

        self.opt = P.SGD(dampening, weight_decay, nesterov)

        self.momentum = Parameter(momentum, name="momentum")
        self.accum = self.parameters.clone(prefix="accum", init='zeros')
        self.stat = self.parameters.clone(prefix="stat", init='ones')
        self.hyper_map = C.HyperMap()
Пример #3
0
    def __init__(self,
                 learning_rate,
                 parameters,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Optimizer, self).__init__()
        if isinstance(learning_rate, float):
            self.dynamic_lr = False
            self.gather = None
            self.assignadd = None
            self.global_step = None
            validator.check_number_range("learning rate", learning_rate, 0.0,
                                         float("inf"), Rel.INC_LEFT)
        else:
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mindspore.int32),
                                         name='global_step')
            if isinstance(learning_rate, Iterable):
                learning_rate = Tensor(
                    np.array(list(learning_rate)).astype(np.float32))
            elif isinstance(learning_rate, Tensor):
                if learning_rate.dim() > 1:
                    raise ValueError(
                        "Learning rate should be a 0 or 1 dim `Tensor`,"
                        f"but got {learning_rate.dim()}.")
                if learning_rate.dim() == 1 and learning_rate.size() < 2:
                    logger.warning(
                        "If want to use the dynamic learning rate, please make sure that the number "
                        "of elements in the list, tuple or tensor passed is greater than 1."
                    )
            else:
                raise TypeError(
                    "Learning rate should be float, Tensor or Iterable.")

        if loss_scale <= 0.0:
            raise ValueError(
                "Loss scale should be greater than 0, but got {}".format(
                    loss_scale))
        if weight_decay < 0.0:
            raise ValueError(
                "Weight decay should be equal or greater than 0, but got {}".
                format(weight_decay))

        self.learning_rate = Parameter(learning_rate, name="learning_rate")
        self.parameters = ParameterTuple(parameters)
        self.reciprocal_scale = 1.0 / loss_scale
        self.weight_decay = weight_decay * loss_scale
        self.decay_flags = tuple(decay_filter(x) for x in self.parameters)

        if not self.parameters:
            raise ValueError("optimizer got an empty parameter list.")
Пример #4
0
    def __init__(self,
                 params,
                 learning_rate=0.1,
                 decay=0.9,
                 momentum=0.0,
                 epsilon=1e-10,
                 use_locking=False,
                 centered=False,
                 loss_scale=1.0,
                 weight_decay=0.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(RMSProp, self).__init__(learning_rate, params)

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))

        if decay < 0.0:
            raise ValueError(
                "decay should be at least 0.0, but got dampening {}".format(
                    decay))
        self.decay = decay
        self.epsilon = epsilon

        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("centered", centered, [bool])
        self.centered = centered
        if centered:
            self.opt = P.ApplyCenteredRMSProp(use_locking)
            self.mg = self.parameters.clone(prefix="mean_grad", init='zeros')
        else:
            self.opt = P.ApplyRMSProp(use_locking)

        self.dynamic_lr = False
        if not isinstance(learning_rate, float):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0
            self.one = Tensor(1, mstype.int32)

        self.momentum = momentum

        self.ms = self.parameters.clone(prefix="mean_square", init='zeros')
        self.moment = self.parameters.clone(prefix="moment", init='zeros')
        self.hyper_map = C.HyperMap()

        self.decay = decay
        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.reciprocal_scale = 1.0 / loss_scale
        self.weight_decay = weight_decay * loss_scale
Пример #5
0
    def __init__(self, kernel_size, stride, pad_mode):
        name = self.__class__.__name__
        super(_PoolNd, self).__init__()
        validator.check_type('kernel_size', kernel_size, [int, tuple])
        validator.check_type('stride', stride, [int, tuple])
        self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(),
                                               ['VALID', 'SAME'])

        if isinstance(kernel_size, int):
            validator.check_integer("kernel_size", kernel_size, 1, Rel.GE)
        else:
            if (len(kernel_size) != 2 or (not isinstance(kernel_size[0], int))
                    or (not isinstance(kernel_size[1], int))
                    or kernel_size[0] <= 0 or kernel_size[1] <= 0):
                raise ValueError(
                    f'The kernel_size passed to cell {name} should be an positive int number or'
                    f'a tuple of two positive int numbers, but got {kernel_size}'
                )
        self.kernel_size = kernel_size

        if isinstance(stride, int):
            validator.check_integer("stride", stride, 1, Rel.GE)
        else:
            if (len(stride) != 2 or (not isinstance(stride[0], int))
                    or (not isinstance(stride[1], int)) or stride[0] <= 0
                    or stride[1] <= 0):
                raise ValueError(
                    f'The stride passed to cell {name} should be an positive int number or'
                    f'a tuple of two positive int numbers, but got {stride}')
        self.stride = stride
Пример #6
0
    def __init__(self,
                 params,
                 learning_rate=0.1,
                 momentum=0.0,
                 dampening=0.0,
                 weight_decay=0.0,
                 nesterov=False,
                 loss_scale=1.0):

        super(SGD, self).__init__(learning_rate, params)

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))

        if dampening < 0.0:
            raise ValueError(
                "dampening should be at least 0.0, but got dampening {}".
                format(dampening))
        self.dampening = dampening

        if weight_decay < 0.0:
            raise ValueError(
                "weight_decay should be at least 0.0, but got weight_decay {}".
                format(weight_decay))
        self.weight_decay = weight_decay

        validator.check_type("nesterov", nesterov, [bool])
        self.nesterov = nesterov

        self.opt = P.SGD(dampening, weight_decay, nesterov)

        self.dynamic_lr = False
        self.gather = None
        self.global_step = None
        self.axis = None
        if not isinstance(learning_rate, float):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0
        self.momentum = Parameter(momentum, name="momentum")
        self.params = self.parameters
        self.accum = self.params.clone(prefix="accum", init='zeros')
        self.stat = self.params.clone(prefix="stat", init='ones')
        self.hyper_map = C.HyperMap()

        self.weight_decay = weight_decay * loss_scale
        self.reciprocal_scale = 1.0 / loss_scale
Пример #7
0
def max_pool_with_argmax(x, pool_h, pool_w, stride, pad):
    """Max pooling with argmax."""
    validator.check_integer("stride", stride, 0, Rel.GT)
    num, channel, height, width = x.shape
    out_h = (height + 2*pad - pool_h)//stride + 1
    out_w = (width + 2*pad - pool_w)//stride + 1
    col = im2col(x, pool_h, pool_w, stride, pad)
    col = col.reshape(-1, pool_h*pool_w)
    out = np.max(col, axis=1)
    out_argmax = np.argmax(col, axis=1)
    out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2)
    out_argmax = out_argmax.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2)
    return out, out_argmax
Пример #8
0
def conv2d(x,
           weight,
           bias=None,
           stride=1,
           pad=0,
           dilation=1,
           groups=1,
           padding_mode='zeros'):
    """Convolution 2D."""
    # pylint: disable=unused-argument
    validator.check_type('stride', stride, (int, tuple))
    if isinstance(stride, int):
        stride = (stride, stride)
    elif len(stride) == 4:
        stride = (stride[2], stride[3])
    if len(stride) != 2 or (not isinstance(stride[0], int)) or \
                           (not isinstance(stride[1], int)) or \
                           stride[0] < 1 or stride[1] < 1:
        raise ValueError(
            f"The \'stride\' of \'conv2d\' should be an positive int number or "
            f"a tuple of two positive int numbers, but got {stride}")
    stride_h = stride[0]
    stride_w = stride[1]
    validator.check_type('dilation', dilation, (int, tuple))
    if isinstance(dilation, int):
        dilation = (dilation, dilation)
    elif len(dilation) == 4:
        dilation = (dilation[2], dilation[3])
    if len(dilation) != 2 or (not isinstance(dilation[0], int)) or \
                           (not isinstance(dilation[1], int)) or \
                           dilation[0] < 1 or dilation[1] < 1:
        raise ValueError(
            f"The \'dilation\' of \'conv2d\' should be an positive int number or "
            f"a tuple of two positive int numbers, but got {dilation}")
    dilation_h = dilation[0]
    dilation_w = dilation[1]

    batch_num, _, x_h, x_w = x.shape
    filter_num, _, filter_h, filter_w = weight.shape
    out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) *
                     (dilation_h - 1)) / stride_h)
    out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) *
                     (dilation_w - 1)) / stride_w)
    col = im2col(x, filter_h, filter_w, stride, pad, dilation)
    col_w = np.reshape(weight, (filter_num, -1)).T
    out = np.dot(col, col_w)
    out = out.reshape(batch_num, out_h, out_w, -1).transpose(0, 3, 1, 2)
    if bias is not None:
        out += bias
    return out
Пример #9
0
    def register_decorator(func):
        validator.check_type("op_info", op_info, [str])
        op_lib = Oplib()
        file_path = os.path.realpath(inspect.getfile(func))
        # keep the path custom ops implementation.
        imply_path = "" if BUILT_IN_OPS_REGISTER_PATH in file_path else file_path
        if not op_lib.reg_op(op_info, imply_path):
            raise ValueError('Invalid op info {}:\n{}\n'.format(
                file_path, op_info))

        def wrapped_function(*args, **kwargs):
            return func(*args, **kwargs)

        return wrapped_function
Пример #10
0
def max_pooling(x, pool_h, pool_w, stride):
    """Max pooling."""
    validator.check_integer("stride", stride, 0, Rel.GT)
    num, channel, height, width = x.shape
    out_h = (height - pool_h) // stride + 1
    out_w = (width - pool_w) // stride + 1

    col = im2col(x, pool_h, pool_w, stride)
    col = col.reshape(-1, pool_h * pool_w)

    out = np.max(col, axis=1)
    out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2)

    return out
Пример #11
0
def conv2d(x, weight, bias=None, stride=1, pad=0,
           dilation=1, groups=1, padding_mode='zeros'):
    """Convolution 2D."""
    # pylint: disable=unused-argument
    validator.check_integer("stride", stride, 0, Rel.GT)
    batch_num, _, x_h, x_w = x.shape
    filter_num, _, filter_h, filter_w = weight.shape
    out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) * (dilation - 1)) / stride)
    out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) * (dilation - 1)) / stride)
    col = im2col(x, filter_h, filter_w, stride, pad, dilation)
    col_w = np.reshape(weight, (filter_num, -1)).T
    out = np.dot(col, col_w)
    out = out.reshape(batch_num, out_h, out_w, -1).transpose(0, 3, 1, 2)
    if bias is not None:
        out += bias
    return out
Пример #12
0
 def __init__(self, kernel_size, stride, pad_mode, padding=0, pool=None):
     super(_PoolNd, self).__init__()
     self.kernel_size = kernel_size
     self.stride = stride
     self.pad_mode = pad_mode
     self.padding = validator.check_integer('padding', padding, 0, Rel.GE)
     self.pool = pool
     if self.pool is None:
         raise NotImplementedError
Пример #13
0
 def _init_depthwise_conv2d(self):
     """Init depthwise conv2d op"""
     if context.get_context("device_target") == "Ascend" and self.group > 1:
         self.dilation = self._dilation
         validator.check_integer('group', self.group, self.in_channels,
                                 Rel.EQ)
         validator.check_integer('group', self.group, self.out_channels,
                                 Rel.EQ)
         self.conv2d = P.DepthwiseConv2dNative(channel_multiplier=1,
                                               kernel_size=self.kernel_size,
                                               pad_mode=self.pad_mode,
                                               pad=self.padding,
                                               stride=self.stride,
                                               dilation=self.dilation)
         weight_shape = [1, self.in_channels, *self.kernel_size]
         self.weight = Parameter(initializer(self.weight_init,
                                             weight_shape),
                                 name='weight')
Пример #14
0
def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1):
    """Rearranges an image to row vector."""
    validator.check_integer("stride", stride, 0, Rel.GT)
    batch_num, channel, height, width = img.shape
    out_h = (height + 2*pad - filter_h- (filter_h - 1) * (dilation - 1))//stride + 1
    out_w = (width + 2*pad - filter_w- (filter_w - 1) * (dilation - 1))//stride + 1

    img = np.pad(img, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant')
    col = np.zeros((batch_num, channel, filter_h, filter_w, out_h, out_w)).astype(img.dtype)

    for y in range(filter_h):
        y_max = y + stride*out_h
        for x in range(filter_w):
            x_max = x + stride*out_w
            col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]

    col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num*out_h*out_w, -1)
    return col
Пример #15
0
    def __init__(self,
                 params,
                 learning_rate=0.1,
                 decay=0.9,
                 momentum=0.0,
                 epsilon=1e-10,
                 use_locking=False,
                 centered=False,
                 loss_scale=1.0,
                 weight_decay=0.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(RMSProp, self).__init__(learning_rate, params, weight_decay,
                                      loss_scale, decay_filter)

        if isinstance(momentum, float) and momentum < 0.0:
            raise ValueError(
                "momentum should be at least 0.0, but got momentum {}".format(
                    momentum))

        if decay < 0.0:
            raise ValueError(
                "decay should be at least 0.0, but got dampening {}".format(
                    decay))
        self.decay = decay
        self.epsilon = epsilon

        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("centered", centered, [bool])
        self.centered = centered
        if centered:
            self.opt = P.ApplyCenteredRMSProp(use_locking)
            self.mg = self.parameters.clone(prefix="mean_grad", init='zeros')
        else:
            self.opt = P.ApplyRMSProp(use_locking)

        self.momentum = momentum

        self.ms = self.parameters.clone(prefix="mean_square", init='zeros')
        self.moment = self.parameters.clone(prefix="moment", init='zeros')
        self.hyper_map = C.HyperMap()

        self.decay = decay
Пример #16
0
    def __init__(self, learning_rate, parameters):
        super(Optimizer, self).__init__()
        if isinstance(learning_rate, float):
            validator.check_number_range("learning rate", learning_rate, 0.0, float("inf"), Rel.INC_LEFT)
        elif isinstance(learning_rate, Iterable):
            learning_rate = Tensor(np.array(list(learning_rate)).astype(np.float32))
        elif isinstance(learning_rate, Tensor):
            if learning_rate.dim() > 1:
                raise ValueError("Learning rate should be a 0 or 1 dim `Tensor`,"
                                 f"but got {learning_rate.dim()}.")
        else:
            raise TypeError("Learning rate should be float, Tensor or Iterable.")

        if isinstance(learning_rate, Tensor) and learning_rate.dim() == 1 and learning_rate.size() < 2:
            logger.warning("If want to use the dynamic learning rate, please make sure that "
                           "the number of elements in the list, tuple or tensor passed is greater than 1.")
        self.learning_rate = Parameter(learning_rate, name="learning_rate")
        self.parameters = ParameterTuple(parameters)
        if not self.parameters:
            raise ValueError("optimizer got an empty parameter list.")
Пример #17
0
    def eval(self, average=False):
        """
        Computes the fbeta.

        Args:
            average (bool): Whether to calculate the average fbeta. Default value is False.

        Returns:
            Float, computed result.
        """
        validator.check_type("average", average, [bool])
        if self._class_num == 0:
            raise RuntimeError('Input number of samples can not be 0.')

        fbeta = (1.0 + self.beta ** 2) * self._true_positives / \
                (self.beta ** 2 * self._actual_positives + self._positives + self.eps)

        if average:
            return fbeta.mean()
        return fbeta
Пример #18
0
    def __init__(self,
                 params,
                 learning_rate=1e-3,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-8,
                 use_locking=False,
                 use_nesterov=False,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Adam, self).__init__(learning_rate, params)
        _check_param_value(beta1, beta2, eps, weight_decay)
        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("use_nesterov", use_nesterov, [bool])
        validator.check_type("loss_scale", loss_scale, [float])
        validator.check_number_range("loss_scale", loss_scale, 1.0,
                                     float("inf"), Rel.INC_LEFT)

        self.dynamic_lr = False
        if isinstance(learning_rate, Iterable) or \
                (isinstance(learning_rate, Tensor) and learning_rate.dim() == 1):
            self.dynamic_lr = True
            self.gather = P.GatherV2()
            self.assignadd = P.AssignAdd()
            self.global_step = Parameter(initializer(0, [1], mstype.int32),
                                         name="global_step")
            self.axis = 0

        self.beta1 = Tensor(beta1, mstype.float32)
        self.beta2 = Tensor(beta2, mstype.float32)
        self.beta1_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta1_power")
        self.beta2_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta2_power")
        self.eps = eps

        self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
        self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')

        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.hyper_map = C.HyperMap()
        self.opt = P.Adam(use_locking, use_nesterov)
        self.weight_decay = weight_decay * loss_scale
        self.reciprocal_scale = 1.0 / loss_scale

        self.pow = P.Pow()
        self.sqrt = P.Sqrt()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.realdiv = P.RealDiv()
Пример #19
0
def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0):
    """Rearranges a row vector to an image."""
    validator.check_integer("stride", stride, 0, Rel.GT)
    batch_num, channel, height, width = input_shape
    out_h = (height + 2*pad - filter_h)//stride + 1
    out_w = (width + 2*pad - filter_w)//stride + 1
    col = col.reshape(batch_num, out_h, out_w, channel, filter_h, filter_w) \
             .transpose(0, 3, 4, 5, 1, 2)

    img = np.zeros((batch_num,
                    channel,
                    height + 2*pad + stride - 1,
                    width + 2*pad + stride - 1)) \
            .astype(col.dtype)
    for y in range(filter_h):
        y_max = y + stride*out_h
        for x in range(filter_w):
            x_max = x + stride*out_w
            img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]

    return img[:, :, pad:height + pad, pad:width + pad]
Пример #20
0
    def eval(self, average=False):
        """
        Computes the recall.

        Args:
            average (bool): Specify whether calculate the average recall. Default value is False.

        Returns:
            Float, the computed result.
        """
        if self._class_num == 0:
            raise RuntimeError('Input number of samples can not be 0.')

        validator.check_type("average", average, [bool])
        result = self._true_positives / (self._actual_positives + self.eps)

        if average:
            if self._type == "multilabel":
                result = self._true_positives_average / (self._actual_positives_average + self.eps)
            return result.mean()
        return result
Пример #21
0
    def __init__(self, kernel_size=1, stride=1, pad_mode="VALID", padding=0):
        max_pool = P.MaxPool(ksize=kernel_size,
                             strides=stride,
                             padding=pad_mode)
        self.is_autodiff_backend = False
        if self.is_autodiff_backend:

            # At present, pad mode of max pool is not unified, so it is a temporarily avoided
            pad_mode = validator.check_string('pad_mode', pad_mode.lower(),
                                              ['valid', 'same'])

            max_pool = P.MaxPoolWithArgmax(window=kernel_size,
                                           stride=stride,
                                           pad_mode=pad_mode,
                                           pad=padding)
        super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, padding,
                                        max_pool)
Пример #22
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(DepthwiseConv2d, self).__init__()
     self.kernel_size = twice(kernel_size)
     self.stride = twice(stride)
     self.dilation = twice(dilation)
     self.in_channels = check_int_positive(in_channels)
     self.out_channels = check_int_positive(out_channels)
     validator.check_integer('group', group, in_channels, Rel.EQ)
     validator.check_integer('group', group, out_channels, Rel.EQ)
     validator.check_integer('group', group, 1, Rel.GE)
     self.pad_mode = pad_mode
     self.dilation = dilation
     self.group = group
     self.has_bias = has_bias
     self.weight_init = weight_init
     self.bias_init = bias_init
     Validator.check_value_type('padding', padding, (int, tuple),
                                self.cls_name)
     if isinstance(padding, tuple):
         Validator.check_integer('padding size', len(padding), 4, Rel.EQ,
                                 self.cls_name)
     self.padding = padding
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=self.kernel_size,
                                         pad_mode=self.pad_mode,
                                         pad=self.padding,
                                         stride=self.stride,
                                         dilation=self.dilation)
     self.bias_add = P.BiasAdd()
     weight_shape = [1, in_channels, *self.kernel_size]
     self.weight = Parameter(initializer(weight_init, weight_shape),
                             name='weight')
     if check_bool(has_bias):
         self.bias = Parameter(initializer(bias_init, [out_channels]),
                               name='bias')
     else:
         if bias_init != 'zeros':
             logger.warning(
                 "value of `has_bias` is False, value of `bias_init` will be ignore."
             )
         self.bias = None
Пример #23
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 has_bias=True,
                 batch_first=False,
                 dropout=0,
                 bidirectional=False):
        super(LSTM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.has_bias = has_bias
        self.batch_first = validator.check_type("batch_first", batch_first, [bool])
        self.dropout = float(dropout)
        self.bidirectional = bidirectional

        if self.batch_first:
            self.transpose1 = P.Transpose()
            self.transpose2 = P.Transpose()
        self.lstm = P.LSTM(input_size=self.input_size,
                           hidden_size=self.hidden_size,
                           num_layers=self.num_layers,
                           has_bias=self.has_bias,
                           bidirectional=self.bidirectional,
                           dropout=self.dropout)

        num_directions = 2 if self.bidirectional else 1

        weight_size = 0
        gate_size = 4 * self.hidden_size
        for layer in range(self.num_layers):
            input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions
            increment_size = gate_size * input_layer_size
            increment_size += gate_size * self.hidden_size
            if self.has_bias:
                increment_size += 2 * gate_size
            weight_size += increment_size * num_directions

        self.weight = Parameter(initializer(0.0, [weight_size, 1, 1]), name='weight')

        self.fill = P.Fill()
        self.shape = P.Shape()
Пример #24
0
    def __init__(self,
                 params,
                 learning_rate=1e-3,
                 beta1=0.9,
                 beta2=0.999,
                 eps=1e-8,
                 use_locking=False,
                 use_nesterov=False,
                 weight_decay=0.0,
                 loss_scale=1.0,
                 decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in
                 x.name):
        super(Adam, self).__init__(learning_rate, params, weight_decay,
                                   loss_scale, decay_filter)
        _check_param_value(beta1, beta2, eps, weight_decay)
        validator.check_type("use_locking", use_locking, [bool])
        validator.check_type("use_nesterov", use_nesterov, [bool])
        validator.check_type("loss_scale", loss_scale, [float])
        validator.check_number_range("loss_scale", loss_scale, 1.0,
                                     float("inf"), Rel.INC_LEFT)

        self.beta1 = Tensor(beta1, mstype.float32)
        self.beta2 = Tensor(beta2, mstype.float32)
        self.beta1_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta1_power")
        self.beta2_power = Parameter(initializer(1, [1], mstype.float32),
                                     name="beta2_power")
        self.eps = eps

        self.moment1 = self.parameters.clone(prefix="moment1", init='zeros')
        self.moment2 = self.parameters.clone(prefix="moment2", init='zeros')

        self.decay_tf = tuple(decay_filter(x) for x in self.parameters)
        self.hyper_map = C.HyperMap()
        self.opt = P.Adam(use_locking, use_nesterov)

        self.pow = P.Pow()
        self.sqrt = P.Sqrt()
        self.one = Tensor(np.array([1.0]).astype(np.float32))
        self.realdiv = P.RealDiv()
Пример #25
0
 def __init__(self,
              max_val=1.0,
              filter_size=11,
              filter_sigma=1.5,
              k1=0.01,
              k2=0.03):
     super(SSIM, self).__init__()
     validator.check_type('max_val', max_val, [int, float])
     validator.check('max_val', max_val, '', 0.0, Rel.GT)
     self.max_val = max_val
     self.filter_size = validator.check_integer('filter_size', filter_size,
                                                1, Rel.GE)
     self.filter_sigma = validator.check_float_positive(
         'filter_sigma', filter_sigma)
     validator.check_type('k1', k1, [float])
     self.k1 = validator.check_number_range('k1', k1, 0.0, 1.0,
                                            Rel.INC_NEITHER)
     validator.check_type('k2', k2, [float])
     self.k2 = validator.check_number_range('k2', k2, 0.0, 1.0,
                                            Rel.INC_NEITHER)
     self.mean = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=filter_size)
Пример #26
0
 def __init__(self, max_val=1.0):
     super(PSNR, self).__init__()
     validator.check_type('max_val', max_val, [int, float])
     validator.check('max_val', max_val, '', 0.0, Rel.GT)
     self.max_val = max_val
Пример #27
0
def _check_param_value(decay_steps, warmup_steps, start_learning_rate,
                       end_learning_rate, power, beta1, beta2, eps,
                       weight_decay):
    """Check the type of inputs."""
    validator.check_type("decay_steps", decay_steps, [int])
    validator.check_type("warmup_steps", warmup_steps, [int])
    validator.check_type("start_learning_rate", start_learning_rate, [float])
    validator.check_type("end_learning_rate", end_learning_rate, [float])
    validator.check_type("power", power, [float])
    validator.check_type("beta1", beta1, [float])
    validator.check_type("beta2", beta2, [float])
    validator.check_type("eps", eps, [float])
    validator.check_type("weight_dacay", weight_decay, [float])
    validator.check_number_range("decay_steps", decay_steps, 1, float("inf"),
                                 Rel.INC_LEFT)
    validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("eps", eps, 0.0, float("inf"),
                                 Rel.INC_NEITHER)
    validator.check_number_range("weight_decay", weight_decay, 0.0,
                                 float("inf"), Rel.INC_LEFT)
Пример #28
0
def _check_param_value(beta1, beta2, eps, weight_decay):
    """Check the type of inputs."""
    validator.check_type("beta1", beta1, [float])
    validator.check_type("beta2", beta2, [float])
    validator.check_type("eps", eps, [float])
    validator.check_type("weight_dacay", weight_decay, [float])
    validator.check_number_range("beta1", beta1, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("beta2", beta2, 0.0, 1.0, Rel.INC_NEITHER)
    validator.check_number_range("eps", eps, 0.0, float("inf"),
                                 Rel.INC_NEITHER)
    validator.check_number_range("weight_decay", weight_decay, 0.0,
                                 float("inf"), Rel.INC_LEFT)
Пример #29
0
def _check_param(initial_accum,
                 learning_rate,
                 lr_power,
                 l1,
                 l2,
                 use_locking,
                 loss_scale=1.0,
                 weight_decay=0.0):
    validator.check_type("initial_accum", initial_accum, [float])
    validator.check("initial_accum", initial_accum, "", 0.0, Rel.GE)

    validator.check_type("learning_rate", learning_rate, [float])
    validator.check("learning_rate", learning_rate, "", 0.0, Rel.GT)

    validator.check_type("lr_power", lr_power, [float])
    validator.check("lr_power", lr_power, "", 0.0, Rel.LE)

    validator.check_type("l1", l1, [float])
    validator.check("l1", l1, "", 0.0, Rel.GE)

    validator.check_type("l2", l2, [float])
    validator.check("l2", l2, "", 0.0, Rel.GE)

    validator.check_type("use_locking", use_locking, [bool])

    validator.check_type("loss_scale", loss_scale, [float])
    validator.check("loss_scale", loss_scale, "", 1.0, Rel.GE)

    validator.check_type("weight_decay", weight_decay, [float])
    validator.check("weight_decay", weight_decay, "", 0.0, Rel.GE)