Ejemplo n.º 1
0
def test_multiple_conv2d():
    conv1 = P.Conv2D(out_channel=2, pad_mode="pad", kernel_size=(5, 5), pad=0, stride=2, dilation=1)
    conv2 = P.Conv2D(out_channel=2, pad_mode="pad", kernel_size=(5, 5), pad=1, stride=2, dilation=1)

    def get_conv(x, w1, w2):
        return conv2(conv1(x, w1), w2)

    return get_conv
Ejemplo n.º 2
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     kernel_size = twice(kernel_size)
     super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, pad_mode, padding, dilation,
                                  group, has_bias, weight_init, bias_init)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group)
     self.bias_add = P.BiasAdd()
     if pad_mode not in ('valid', 'same', 'pad'):
         raise ValueError(
             'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
             ', should be one of values in \'valid\', \'same\', \'pad\'.')
Ejemplo n.º 3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init=None,
                 bias_init=None,
                 quant_delay=0,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dQuant, self).__init__()
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = has_bias
        self.stride = twice(stride)
        self.dilation = twice(dilation)
        self.pad_mode = pad_mode
        self.padding = padding
        self.group = group
        self.quant_delay = quant_delay

        if weight_init is None:
            weight_init = initializer(
                'normal',
                [out_channels, in_channels // group, *self.kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if bias_init is None:
            bias_init = initializer('zeros', [out_channels])
        if has_bias:
            self.bias = Parameter(bias_init, name='bias')
            self.bias_add = P.BiasAdd()

        self.conv = P.Conv2D(out_channel=self.out_channels,
                             kernel_size=self.kernel_size,
                             mode=1,
                             pad_mode=self.pad_mode,
                             pad=self.padding,
                             stride=self.stride,
                             dilation=self.dilation,
                             group=self.group)
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
Ejemplo n.º 4
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              data_format='NCHW'):
     kernel_size = twice(kernel_size)
     stride = twice(stride)
     self._dilation = dilation
     dilation = twice(dilation)
     super(Conv2d,
           self).__init__(in_channels, out_channels, kernel_size, stride,
                          pad_mode, padding, dilation, group, has_bias,
                          weight_init, bias_init, data_format)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group,
                            data_format=self.format)
     self._init_depthwise_conv2d()
     self.bias_add = P.BiasAdd()
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              strategy=None):
     kernel_size = twice(kernel_size)
     super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, pad_mode, padding, dilation,
                                  group, has_bias, weight_init, bias_init)
     self.add = P.TensorAdd(strategy)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group,
                            strategy=None)
     self.bias_add = P.BiasAdd()
Ejemplo n.º 6
0
    def __init__(self, in_planes, kernel_size, stride):
        super(DepthWiseConv, self).__init__()
        platform = context.get_context("device_target")
        weight_shape = [1, kernel_size, in_planes]
        weight_init = _initialize_weight_goog(shape=weight_shape)

        if platform == "GPU":
            self.depthwise_conv = P.Conv2D(out_channel=in_planes*1,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           pad=int(kernel_size/2),
                                           pad_mode="pad",
                                           group=in_planes)

            self.weight = Parameter(initializer(weight_init,
                                                [in_planes*1, 1, kernel_size, kernel_size]), name='depthwise_weight')

        else:
            self.depthwise_conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                                          kernel_size=kernel_size,
                                                          stride=stride, pad_mode='pad',
                                                          pad=int(kernel_size/2))

            self.weight = Parameter(initializer(weight_init,
                                                [1, in_planes, kernel_size, kernel_size]), name='depthwise_weight')
Ejemplo n.º 7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):

        Validator.check_value_type("kernel_size", kernel_size, [int],
                                   self.cls_name)
        Validator.check_value_type("stride", stride, [int], self.cls_name)
        Validator.check_value_type("padding", padding, [int], self.cls_name)
        Validator.check_value_type("dilation", dilation, [int], self.cls_name)
        Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE,
                                self.cls_name)
        Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
        Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
        Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
        kernel_size = (1, kernel_size)
        stride = (1, stride)
        dilation = (1, dilation)
        get_shape = P.Shape()
        get_dtype = P.DType()
        if isinstance(weight_init, Tensor):
            weight_init_shape = get_shape(weight_init)
            Validator.check_integer('weight_init_shape',
                                    len(weight_init_shape), 3, Rel.EQ,
                                    self.cls_name)
            weight_init_dtype = get_dtype(weight_init)
            weight_init_value = weight_init.asnumpy()
            weight_init_value = np.expand_dims(weight_init_value, 2)
            weight_init = Tensor(weight_init_value, weight_init_dtype)

        super(Conv1d, self).__init__(in_channels, out_channels, kernel_size,
                                     stride, pad_mode, padding, dilation,
                                     group, has_bias, weight_init, bias_init)
        self.padding = (0, 0, padding, padding)
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)
        self.bias_add = P.BiasAdd()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv1d\' Op passed ' + str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.expand_dims = P.ExpandDims()
        self.squeeze = P.Squeeze(2)
        self.shape = P.Shape()
Ejemplo n.º 8
0
 def __init__(self):
     super(ConvBN, self).__init__()
     self.conv = P.Conv2D(32, 3)
     self.conv_weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
     self.scale = Tensor(np.ones([32]), mindspore.float32)
     self.bias = Tensor(np.ones([32]), mindspore.float32)
     self.mean = Tensor(np.ones([32]), mindspore.float32)
     self.variance = Tensor(np.ones([32]), mindspore.float32)
     self.bn = P.BatchNorm()
Ejemplo n.º 9
0
 def __init__(self):
     super(NetConv2d, self).__init__()
     out_channel = 2
     kernel_size = 1
     self.conv = P.Conv2D(out_channel,
                          kernel_size,
                          mode=1,
                          pad_mode="valid",
                          pad=0,
                          stride=1,
                          dilation=1,
                          group=1)
Ejemplo n.º 10
0
def test_conv2d(out_channel, kernel_size, pad, stride, dilation):
    conv = P.Conv2D(out_channel=out_channel,
                    kernel_size=kernel_size,
                    pad_mode="pad",
                    pad=pad,
                    stride=stride,
                    dilation=dilation)

    def get_conv(x, w):
        return conv(x, w)

    return get_conv
Ejemplo n.º 11
0
 def construct(self, x):
     out_channel = 16
     kernel_size = 3
     conv2d = P.Conv2D(out_channel,
                       kernel_size,
                       1,
                       pad_mode='valid',
                       pad=0,
                       stride=1,
                       dilation=1,
                       group=1)
     return conv2d(x, self.w)
Ejemplo n.º 12
0
 def __init__(self):
     super(Net, self).__init__()
     out_channel = 512
     kernel_size = 2048
     self.conv = P.Conv2D(out_channel, (kernel_size, kernel_size),
                          mode=1,
                          pad_mode="same",
                          pad=3,
                          stride=2,
                          dilation=1,
                          group=1)
     self.w = Parameter(initializer('normal', [512, 2048, 1, 1]), name='w')
Ejemplo n.º 13
0
 def __init__(self, axis=0, out_nums=1):
     super(NetConv2dDynamic, self).__init__()
     self.dynshape = inner.GpuConvertToDynamicShape()
     out_channel = 2
     kernel_size = 1
     self.conv = P.Conv2D(out_channel,
                          kernel_size,
                          mode=1,
                          pad_mode="valid",
                          pad=0,
                          stride=1,
                          dilation=1,
                          group=1)
Ejemplo n.º 14
0
 def __init__(self):
   super(ConvNet, self).__init__()
   out_channel = 16
   kernel_size = 3
   self.conv = P.Conv2D(out_channel,
                kernel_size,
                mode=1,
                pad_mode="pad",
                pad=0,
                stride=1,
                dilation=2,
                group=1)
   self.w = Parameter(Tensor(np.ones([16, 16, 3, 3]).astype(np.float32)), name='w')
Ejemplo n.º 15
0
 def __init__(self):
     super(Net, self).__init__()
     out_channel = 64
     kernel_size = 7
     self.conv = P.Conv2D(out_channel,
                          kernel_size,
                          mode=1,
                          pad_mode="valid",
                          pad=0,
                          stride=1,
                          dilation=1,
                          group=1)
     self.w = Parameter(initializer('normal', [64, 3, 7, 7]), name='w')
Ejemplo n.º 16
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              quant_delay=0,
              num_bits=8,
              per_channel=False,
              symmetric=False,
              narrow_range=False):
     kernel_size = twice(kernel_size)
     super(Conv2dQuant,
           self).__init__(in_channels, out_channels, kernel_size, stride,
                          pad_mode, padding, dilation, group, has_bias,
                          weight_init, bias_init)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group)
     self.bias_add = P.BiasAdd()
     if pad_mode not in ('valid', 'same', 'pad'):
         raise ValueError(
             'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
             ', should be one of values in \'valid\', \'same\', \'pad\'.')
     self.fake_quant_weight = nn.FakeQuantWithMinMax(
         min_init=-6,
         max_init=6,
         ema=False,
         num_bits=num_bits,
         quant_delay=quant_delay,
         per_channel=per_channel,
         channel_size=out_channels,
         symmetric=symmetric,
         narrow_range=narrow_range)
Ejemplo n.º 17
0
 def __init__(self):
     super(NetConv2d, self).__init__()
     out_channel = 2
     kernel_size = 1
     self.conv = P.Conv2D(out_channel,
                          kernel_size,
                          mode=1,
                          pad_mode="valid",
                          pad=0,
                          stride=1,
                          dilation=1,
                          group=1)
     self.w = Parameter(initializer(
         Tensor(
             np.arange(2 * 3 * 1 * 1).reshape(2, 3, 1,
                                              1).astype(np.float32)),
         [2, 3, 1, 1]),
                        name='w')
     self.x = Parameter(initializer(
         Tensor(
             np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3,
                                              3).astype(np.float32)),
         [1, 3, 3, 3]),
                        name='x')
Ejemplo n.º 18
0
     'desc_inputs':
     [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]
 }),
 ('AvgPool2d_1', {
     'block': nn.AvgPool2d(5, pad_mode='same'),
     'desc_inputs':
     [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]
 }),
 ('AvgPool2d_2', {
     'block': nn.AvgPool2d(5, pad_mode='valid'),
     'desc_inputs':
     [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]
 }),
 ('Conv2D_1', {
     'block':
     P.Conv2D(1, 6, pad_mode='same', pad=0),
     'desc_inputs': [
         Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
         Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))
     ]
 }),
 ('Conv2D_2', {
     'block':
     P.Conv2D(1, 6, pad_mode='valid', pad=0),
     'desc_inputs': [
         Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
         Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))
     ]
 }),
 ('Conv2D_3', {
     'block':
Ejemplo n.º 19
0
        'block': PReLUGradNet(),
        'desc_inputs': [Tensor(np.ones([1, 3, 4, 4], np.float32)),
                        Tensor(np.ones([1, 3, 4, 4], np.float32)),
                        Tensor(np.ones(3, np.float32))],
    }),
]

test_cases_for_verify_exception = [
    ('ApplyMomentum_Error', {
        'block': (P.ApplyMomentum(), {'exception': TypeError}),
        'desc_inputs': [[2], [128, 32, 32, 64], [128, 32, 32, 64], [128, 32, 32, 64], [128, 32, 32, 64]],
        'desc_bprop': [[128, 32, 32, 64]],
        'skip': ['backward']
    }),
    ('Conv2d_ValueError_1', {
        'block': (lambda _: P.Conv2D(3, 4, mode=-2.0), {'exception': TypeError}),
        'desc_inputs': [0],
    }),
    ('Conv2d_ValueError_2', {
        'block': (lambda _: P.Conv2D(3, 4, mode=-2), {'exception': ValueError}),
        'desc_inputs': [0],
    }),
    ('MaxPoolWithArgmax_ValueError_1', {
        'block': (lambda _: P.MaxPoolWithArgmax(padding='sane'), {'exception': ValueError}),
        'desc_inputs': [0],
    }),
    ('MaxPoolWithArgmax_ValueError_2', {
        'block': (lambda _: P.MaxPoolWithArgmax(ksize='1'), {'exception': TypeError}),
        'desc_inputs': [0],
    }),
    ('MaxPoolWithArgmax_ValueError_3', {
Ejemplo n.º 20
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 data_format='NCHW',
                 has_bias=False,
                 weight_init='normal',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 bias_init='zeros'):
        self.thor = True
        ksizes = (1, kernel_size, kernel_size, 1)
        self.hw = kernel_size * kernel_size
        strides = (1, stride, stride, 1)
        kernel_size = twice(kernel_size)
        super(Conv2d_Thor, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            data_format,
            has_bias,
            weight_init,
            bias_init,
        )
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group
                               )

        self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.transpose02314 = P.CusTranspose02314()
        self.matrix_A_dim = self.in_channels * self.kernel_size[0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        self.matrix_A_device_shape, self.matrix_A_device_dim = caculate_device_shape(self.matrix_A_dim,
                                                                                     self.in_channels, True)
        self.matrix_G_device_shape, self.matrix_G_device_dim = caculate_device_shape(self.matrix_G_dim,
                                                                                     self.in_channels, False)
        self.matrix_A_device_temp_shape = (
            self.matrix_A_device_shape[0], self.matrix_A_device_shape[2], self.matrix_A_device_shape[1],
            self.matrix_A_device_shape[3])
        self.matrix_G_device_temp_shape = (
            self.matrix_G_device_shape[0], self.matrix_G_device_shape[2], self.matrix_G_device_shape[1],
            self.matrix_G_device_shape[3])
        self.matrix_A_inv = Parameter(
            Tensor(np.reshape(np.identity(self.matrix_A_device_dim).astype(np.float16), self.matrix_A_device_shape)),
            name='matrix_A_inv', requires_grad=False)
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False)
        self.matrix_G_inv = Parameter(
            Tensor(np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape)),
            name="matrix_G_inv", requires_grad=False)

        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False)
        self.fake_G = Tensor(
            np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape))

        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.vector_matmul = P.CusBatchMatMul()
        self.diag_block_dim = 128
        self.channels_slice_flag = False
        if self.in_channels % C0 != 0:
            self.channels_slice_flag = True

        self.padA_flag = False
        if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \
                and self.matrix_A_dim > self.diag_block_dim:
            self.padA_flag = True
            pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim
            self.padA = P.Pad(((0, pad_dim), (0, pad_dim)))
        self.device_shape_pad_flag = False
        if self.matrix_A_dim != self.matrix_A_device_dim:
            self.device_shape_pad_flag = True
            self.device_shape_pad = P.Pad(((0, 0), (0, C0 - self.in_channels), (0, 0), (0, C0 - self.in_channels)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.freq = Tensor(frequency, mstype.int32)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.axis = 0

        dampingA_dim = self.matrix_A_dim
        if (self.matrix_A_dim % self.diag_block_dim) != 0 and self.matrix_A_dim > self.diag_block_dim:
            dampingA_dim = (self.matrix_A_dim // self.diag_block_dim + 1) * self.diag_block_dim
        dampingG_dim = self.matrix_G_dim
        if (self.matrix_G_dim % self.diag_block_dim) != 0 and self.matrix_G_dim > self.diag_block_dim:
            dampingG_dim = (self.matrix_G_dim // self.diag_block_dim + 1) * self.diag_block_dim

        self.dampingA = Tensor(np.identity(dampingA_dim), mstype.float32)
        self.dampingG = Tensor(np.identity(dampingG_dim), mstype.float32)
        self.fused_abs_max1 = P.CusFusedAbsMax1([self.matrix_A_dim, self.matrix_A_dim])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
Ejemplo n.º 21
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 eps=1e-5,
                 momentum=0.997,
                 weight_init=None,
                 beta_init=None,
                 gamma_init=None,
                 mean_init=None,
                 var_init=None,
                 quant_delay=0,
                 freeze_bn=100000,
                 fake=True,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dBatchNormQuant, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = twice(dilation)
        self.stride = twice(stride)
        self.group = group
        self.fake = fake
        self.freeze_bn = freeze_bn
        self.momentum = momentum
        self.quant_delay = quant_delay
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size

        if weight_init is None:
            weight_init = initializer(
                'normal',
                [out_channels, in_channels // group, *self.kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if gamma_init is None:
            gamma_init = initializer('ones', [out_channels])
        self.gamma = Parameter(gamma_init, name='gamma')
        if beta_init is None:
            beta_init = initializer('zeros', [out_channels])
        self.beta = Parameter(beta_init, name='beta')
        if mean_init is None:
            mean_init = initializer('zeros', [out_channels])
        self.moving_mean = Parameter(mean_init,
                                     name='moving_mean',
                                     requires_grad=False)
        if var_init is None:
            var_init = initializer('ones', [out_channels])
        self.moving_variance = Parameter(var_init,
                                         name='moving_variance',
                                         requires_grad=False)

        self.step = Parameter(initializer('normal', [1], dtype=mstype.int32),
                              name='step',
                              requires_grad=False)

        self.conv = P.Conv2D(out_channel=self.out_channels,
                             kernel_size=self.kernel_size,
                             mode=1,
                             pad_mode=self.pad_mode,
                             pad=self.padding,
                             stride=self.stride,
                             dilation=self.dilation,
                             group=self.group)
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
        self.batchnorm_fold_train = P.BatchNormFold(epsilon=eps,
                                                    momentum=momentum,
                                                    is_training=True,
                                                    freeze_bn=freeze_bn)
        self.batchnorm_fold_infer = P.BatchNormFold(epsilon=eps,
                                                    momentum=momentum,
                                                    is_training=False,
                                                    freeze_bn=freeze_bn)
        self.correct_mul = P.CorrectionMul()
        self.relu = P.ReLU()
        self.batchnorm_fold2 = P.BatchNormFold2(freeze_bn=freeze_bn)
        self.batchnorm_fold2_infer = P.BatchNormFold2(freeze_bn=0)
        self.one = Tensor(1, mstype.int32)
        self.assignadd = P.AssignAdd()
Ejemplo n.º 22
0
     'block': nn.Dense(1, 6, has_bias=False, bias_init=Tensor(np.ones([6]).astype(np.float32))),
     'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))]}),
 ('MaxPool2d_1', {
     'block': nn.MaxPool2d(5, pad_mode='same', padding=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
 ('MaxPool2d_2', {
     'block': nn.MaxPool2d(5, pad_mode='valid', padding=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
 ('AvgPool2d_1', {
     'block': nn.AvgPool2d(5, pad_mode='same', padding=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
 ('AvgPool2d_2', {
     'block': nn.AvgPool2d(5, pad_mode='valid', padding=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
 ('Conv2D_1', {
     'block': P.Conv2D(1, 6, pad_mode='same', pad=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
                     Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
 ('Conv2D_2', {
     'block': P.Conv2D(1, 6, pad_mode='valid', pad=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
                     Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
 ('Conv2D_3', {
     'block': P.Conv2D(1, 6, pad_mode='pad', pad=0),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
                     Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
 ('Conv2D_4', {
     'block': P.Conv2D(1, 6, pad_mode='pad', pad=7),
     'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
                     Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
 ('MatMul_1', {
Ejemplo n.º 23
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        self._dilation = dilation
        dilation = twice(dilation)
        super(Conv2d_Thor,
              self).__init__(in_channels, out_channels, kernel_size, stride,
                             pad_mode, padding, dilation, group, has_bias,
                             weight_init, bias_init)
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)
        self._init_depthwise_conv2d(weight_init)
        self.bias_add = P.BiasAdd()

        self.thor = True
        self.hw = kernel_size[0] * kernel_size[1]
        self.matrix_A_dim = self.in_channels * self.kernel_size[
            0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.A_normalizer = Parameter(initializer(0, [1], mstype.float32),
                                      name="A_normalizer",
                                      requires_grad=False)
        self.G_normalizer = Parameter(initializer(0, [1], mstype.float32),
                                      name="G_normalizer",
                                      requires_grad=False)
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            ksizes = (1, kernel_size[0], kernel_size[1], 1)
            strides = (1, stride[0], stride[1], 1)
            self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.transpose02314 = P.CusTranspose02314()
            dampingA_dim = self.matrix_A_dim
            self.diag_block_dim = 128
            if (self.matrix_A_dim % self.diag_block_dim
                ) != 0 and self.matrix_A_dim > self.diag_block_dim:
                dampingA_dim = (self.matrix_A_dim // self.diag_block_dim +
                                1) * self.diag_block_dim
            dampingG_dim = self.matrix_G_dim
            if (self.matrix_G_dim % self.diag_block_dim
                ) != 0 and self.matrix_G_dim > self.diag_block_dim:
                dampingG_dim = (self.matrix_G_dim // self.diag_block_dim +
                                1) * self.diag_block_dim
            self.matrix_A_cov = Parameter(Tensor(
                np.zeros([dampingA_dim, dampingA_dim]).astype(np.float32)),
                                          name='matrix_A',
                                          requires_grad=False)
            self.matrix_G_cov = Parameter(Tensor(
                np.zeros([dampingG_dim, dampingG_dim]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)

            self.channels_slice_flag = False
            self.C0 = 16
            if self.in_channels % self.C0 != 0:
                self.channels_slice_flag = True
            self.padA_flag = False
            if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \
                    and self.matrix_A_dim > self.diag_block_dim:
                self.padA_flag = True
                pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim
                self.padA = P.Pad(((0, pad_dim), (0, pad_dim)))
            self.slice = P.Slice()
        else:
            self.is_Ascend = False
            self.img2col = P.Im2Col(kernel_size=kernel_size,
                                    stride=stride,
                                    pad_mode="same")
            self.matmul = P.MatMul(transpose_b=True)
            self.reduce_mean = P.ReduceMean(keep_dims=False)
            self.matrix_A_cov = Parameter(Tensor(
                np.zeros([self.matrix_A_dim,
                          self.matrix_A_dim]).astype(np.float32)),
                                          name='matrix_A',
                                          requires_grad=False)
            self.matrix_G_cov = Parameter(Tensor(
                np.zeros([self.matrix_G_dim,
                          self.matrix_G_dim]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
        self.getG = P.InsertGradientOf(self.save_gradient)
Ejemplo n.º 24
0
            'exception': ValueError,
            'error_keywords': ['BatchNorm']
        }),
        'desc_inputs': [
            Tensor(np.ones([5, 3]).astype(np.float32)),
            Tensor(np.ones([3]).astype(np.float32)),
            Tensor(np.ones([3]).astype(np.float32)),
            Tensor(np.ones([5]).astype(np.float32)),
            Tensor(np.ones([5]).astype(np.float32))
        ],
        'skip': ['backward']
    }),

    # input is scalar
    ('Conv2D0', {
        'block': (P.Conv2D(2, (5, 5)), {
            'exception': TypeError,
            'error_keywords': ['Conv2D']
        }),
        'desc_inputs': [5.0, 5.0],
        'skip': ['backward']
    }),
    # input is Tensor(bool)
    ('Conv2D1', {
        'block': (P.Conv2D(2, (5, 5)), {
            'exception': TypeError,
            'error_keywords': ['Conv2D']
        }),
        'desc_inputs': [
            Tensor(np.ones([5]).astype(np.bool_)),
            Tensor(np.ones([5]).astype(np.bool_))
Ejemplo n.º 25
0
    def infer_shape(self, logits_shape, label_shape):
        return []

    def infer_dtype(self, logits_type, labels_type):
        # pylint: disable=unused-argument
        return dtype.float64


tensorToScalar = TensorToScalar()


def get_tensor_to_scalar(logits, labels):
    return tensorToScalar(logits, labels)


conv2d = P.Conv2D(64, (3, 3), pad_mode="pad", pad=1, stride=2)


def get_conv2d(x, w):
    return conv2d(x, w)


conv2dNative = P.DepthwiseConv2dNative(3, (3, 3),
                                       pad_mode="pad",
                                       pad=1,
                                       stride=2)


def get_conv2d_native(x, w):
    return conv2dNative(x, w)
Ejemplo n.º 26
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 data_format='NCHW',
                 has_bias=False,
                 weight_init='normal',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32,
                 bias_init='zeros'):
        self.skfac = True
        self.hw = kernel_size * kernel_size
        kernel_size = twice(kernel_size)
        super(Conv2d_SKFAC_GPU, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            data_format,
            has_bias,
            weight_init,
            bias_init,
        )
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)

        self.matrix_A_dim = self.in_channels * self.kernel_size[
            0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        split_dim = 128
        self.matrix_A_inv = Parameter(np.zeros(
            (self.matrix_A_dim, self.matrix_A_dim)).astype(np.float32),
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(np.zeros(
            (self.matrix_G_dim, self.matrix_G_dim)).astype(np.float32),
                                      requires_grad=False)

        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  requires_grad=False)
        self.img2col = P.Im2Col(kernel_size=kernel_size,
                                stride=stride,
                                pad_mode="same")
        self.matmul = P.MatMul(transpose_a=True)
        self.matmul_ = P.MatMul()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.batch_size = Tensor(batch_size, mstype.float16)
        self.transpose = P.Transpose()
        self.cast = P.Cast()
        self.gather = P.Gather()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.sqrt = P.Sqrt()
        self.reduce_mean = P.ReduceMean(keep_dims=False)
        self.damping = Parameter(Tensor(damping), requires_grad=False)
        self.dampingA = Tensor(np.identity(batch_size), mstype.float32)
        self.dampingG = Tensor(np.identity(batch_size), mstype.float32)
        self.I_G = Tensor(np.identity(out_channels), mstype.float32)
        self.I_A = Tensor(np.identity(self.matrix_A_dim), mstype.float32)
        self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
        self.vector_matmul = P.BatchMatMul(transpose_a=True)
        self.batch_coefficient = Tensor((1 / 32)**0.5, mstype.float32)
Ejemplo n.º 27
0
        'skip': ['backward']
    }),
]

test_cases_for_verify_exception = [
    ('ApplyMomentum_Error', {
        'block': (P.ApplyMomentum(), {
            'exception': TypeError
        }),
        'desc_inputs': [[2], [128, 32, 32, 64], [128, 32, 32, 64],
                        [128, 32, 32, 64], [128, 32, 32, 64]],
        'desc_bprop': [[128, 32, 32, 64]],
        'skip': ['backward']
    }),
    ('Conv2d_ValueError_1', {
        'block': (lambda _: P.Conv2D(3, 4, mode=-2.0), {
            'exception': TypeError
        }),
        'desc_inputs': [0],
    }),
    ('Conv2d_ValueError_2', {
        'block': (lambda _: P.Conv2D(3, 4, mode=-2), {
            'exception': ValueError
        }),
        'desc_inputs': [0],
    }),
    ('MaxPoolWithArgmax_ValueError_1', {
        'block': (lambda _: P.MaxPoolWithArgmax(padding='sane'), {
            'exception': ValueError
        }),
        'desc_inputs': [0],
Ejemplo n.º 28
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

from mindspore.ops import operations as P
from mindspore.ops import Primitive

make_tuple = Primitive('make_tuple')
tuple_getitem = Primitive('tuple_getitem')
conv = P.Conv2D(out_channel=64,
                kernel_size=7,
                mode=1,
                pad_mode="valid",
                pad=0,
                stride=1,
                dilation=1,
                group=1)
bn = P.FusedBatchNorm()
relu = P.ReLU()
conv_bn1 = Primitive('ConvBN1')
bn2_relu = Primitive('BN2Relu')


class FnDict:
    def __init__(self):
        self.fnDict = {}

    def __call__(self, fn):
        self.fnDict[fn.__name__] = fn
Ejemplo n.º 29
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_prior_fn=NormalPrior,
                 weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
                 bias_prior_fn=NormalPrior,
                 bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        super(_ConvVariational, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            has_bias,
            weight_init='normal',
            bias_init='zeros'
        )
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed '
                             + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')

        # convolution args
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = dilation
        self.group = group
        self.has_bias = has_bias

        # distribution trainable parameters
        self.shape = [self.out_channels,
                      self.in_channels // self.group, *self.kernel_size]

        self.weight.requires_grad = False

        if isinstance(weight_prior_fn, Cell):
            self.weight_prior = weight_prior_fn
        else:
            self.weight_prior = weight_prior_fn()
        for prior_name, prior_dist in self.weight_prior.name_cells().items():
            if prior_name != 'normal':
                raise TypeError("The type of distribution of `weight_prior_fn` should be `normal`")
            if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
                    isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                raise TypeError("The input form of `weight_prior_fn` is incorrect")

        try:
            self.weight_posterior = weight_posterior_fn(shape=self.shape, name='bnn_weight')
        except TypeError:
            raise TypeError('The input form of `weight_posterior_fn` is incorrect')
        for posterior_name, _ in self.weight_posterior.name_cells().items():
            if posterior_name != 'normal':
                raise TypeError("The type of distribution of `weight_posterior_fn` should be `normal`")

        if self.has_bias:
            self.bias.requires_grad = False

            if isinstance(bias_prior_fn, Cell):
                self.bias_prior = bias_prior_fn
            else:
                self.bias_prior = bias_prior_fn()
            for prior_name, prior_dist in self.bias_prior.name_cells().items():
                if prior_name != 'normal':
                    raise TypeError("The type of distribution of `bias_prior_fn` should be `normal`")
                if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
                        isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                    raise TypeError("The input form of `bias_prior_fn` is incorrect")

            try:
                self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')
            for posterior_name, _ in self.bias_posterior.name_cells().items():
                if posterior_name != 'normal':
                    raise TypeError("The type of distribution of `bias_posterior_fn` should be `normal`")

        # mindspore operations
        self.bias_add = P.BiasAdd()
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)

        self.log = P.Log()
        self.sum = P.ReduceSum()
Ejemplo n.º 30
0
    """Convolution 2D"""
    batch_num, _, x_h, x_w = x.shape
    filter_num, _, filter_h, filter_w = weight.shape
    out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) *
                     (dilation[2] - 1)) / stride[2])
    out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) *
                     (dilation[3] - 1)) / stride[3])
    col = im2col(x, filter_h, filter_w, stride, pad, dilation)
    col_w = np.reshape(weight, (filter_num, -1)).T
    out = np.dot(col, col_w)
    out = out.reshape(batch_num, out_h, out_w, -1).transpose(0, 3, 1, 2)
    if bias is not None:
        out += bias
    return out


@vm_impl_getters.register(P.Conv2D)
def vm_impl_conv2d(self):
    """Generate vm_impl function for Conv2D"""
    def vm_impl(x, w):
        x = x.asnumpy()
        weight = w.asnumpy()
        bias = None
        out = conv2d(x, weight, bias, self.stride, self.pad, self.dilation)
        return Tensor(out)

    return vm_impl


conv2d_prim = P.Conv2D(64, (3, 3), pad_mode='pad', pad=1, stride=2)