コード例 #1
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              data_format='NCHW'):
     kernel_size = twice(kernel_size)
     stride = twice(stride)
     self._dilation = dilation
     dilation = twice(dilation)
     super(Conv2d,
           self).__init__(in_channels, out_channels, kernel_size, stride,
                          pad_mode, padding, dilation, group, has_bias,
                          weight_init, bias_init, data_format)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group,
                            data_format=self.format)
     self._init_depthwise_conv2d()
     self.bias_add = P.BiasAdd()
コード例 #2
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     kernel_size = twice(kernel_size)
     stride = twice(stride)
     dilation = twice(dilation)
     super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, pad_mode, padding, dilation,
                                  group, has_bias, weight_init, bias_init)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group)
     self.bias_add = P.BiasAdd()
     if pad_mode not in ('valid', 'same', 'pad'):
         raise ValueError(
             'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
             ', should be one of values in \'valid\', \'same\', \'pad\'.')
コード例 #3
0
ファイル: quant.py プロジェクト: lyc4614/mindspore
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init=None,
                 bias_init=None,
                 quant_delay=0,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dQuant, self).__init__()
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = has_bias
        self.stride = twice(stride)
        self.dilation = twice(dilation)
        self.pad_mode = pad_mode
        self.padding = padding
        self.group = group
        self.quant_delay = quant_delay

        if weight_init is None:
            weight_init = initializer(
                'normal',
                [out_channels, in_channels // group, *self.kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if bias_init is None:
            bias_init = initializer('zeros', [out_channels])
        if has_bias:
            self.bias = Parameter(bias_init, name='bias')
            self.bias_add = P.BiasAdd()

        self.conv = P.Conv2D(out_channel=self.out_channels,
                             kernel_size=self.kernel_size,
                             mode=1,
                             pad_mode=self.pad_mode,
                             pad=self.padding,
                             stride=self.stride,
                             dilation=self.dilation,
                             group=self.group)
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
コード例 #4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        # out_channels and in_channels swap.
        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
        # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
        super(Conv2dTranspose, self).__init__(in_channels,
                                              out_channels,
                                              kernel_size,
                                              stride,
                                              pad_mode,
                                              padding,
                                              dilation,
                                              group,
                                              has_bias,
                                              weight_init,
                                              bias_init,
                                              transposed=True)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.shape = P.Shape()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed ' +
                str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.is_valid = self.pad_mode == 'valid'
        self.is_same = self.pad_mode == 'same'
        self.is_pad = self.pad_mode == 'pad'
        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name='bias')

        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
        self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
                                                      kernel_size=kernel_size,
                                                      mode=1,
                                                      pad_mode=pad_mode,
                                                      pad=padding,
                                                      stride=stride,
                                                      dilation=dilation,
                                                      group=group)
        self.bias_add = P.BiasAdd()
コード例 #5
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(DepthwiseConv2d, self).__init__()
     self.kernel_size = twice(kernel_size)
     self.stride = twice(stride)
     self.dilation = twice(dilation)
     self.in_channels = check_int_positive(in_channels)
     self.out_channels = check_int_positive(out_channels)
     validator.check_integer('group', group, in_channels, Rel.EQ)
     validator.check_integer('group', group, out_channels, Rel.EQ)
     validator.check_integer('group', group, 1, Rel.GE)
     self.pad_mode = pad_mode
     self.dilation = dilation
     self.group = group
     self.has_bias = has_bias
     self.weight_init = weight_init
     self.bias_init = bias_init
     Validator.check_value_type('padding', padding, (int, tuple),
                                self.cls_name)
     if isinstance(padding, tuple):
         Validator.check_integer('padding size', len(padding), 4, Rel.EQ,
                                 self.cls_name)
     self.padding = padding
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=self.kernel_size,
                                         pad_mode=self.pad_mode,
                                         pad=self.padding,
                                         stride=self.stride,
                                         dilation=self.dilation)
     self.bias_add = P.BiasAdd()
     weight_shape = [1, in_channels, *self.kernel_size]
     self.weight = Parameter(initializer(weight_init, weight_shape),
                             name='weight')
     if check_bool(has_bias):
         self.bias = Parameter(initializer(bias_init, [out_channels]),
                               name='bias')
     else:
         if bias_init != 'zeros':
             logger.warning(
                 "value of `has_bias` is False, value of `bias_init` will be ignore."
             )
         self.bias = None
コード例 #6
0
 def __init__(self,
              in_channels,
              channel_multiplier,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              weight_init='normal'):
     kernel_size = twice(kernel_size)
     super(DepthwiseConv2dNative, self).__init__(
         in_channels,
         channel_multiplier,
         kernel_size,
         stride,
         pad_mode,
         padding,
         dilation,
         group,
         weight_init)
     self.depthwise_conv2d_native = P.DepthwiseConv2dNative(channel_multiplier=self.channel_multiplier,
                                                            kernel_size=self.kernel_size,
                                                            mode=3,
                                                            pad_mode=self.pad_mode,
                                                            pad=self.padding,
                                                            stride=self.stride,
                                                            dilation=self.dilation,
                                                            group=self.group)
コード例 #7
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              strategy=None):
     kernel_size = twice(kernel_size)
     super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, pad_mode, padding, dilation,
                                  group, has_bias, weight_init, bias_init)
     self.add = P.TensorAdd(strategy)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group,
                            strategy=None)
     self.bias_add = P.BiasAdd()
コード例 #8
0
ファイル: conv.py プロジェクト: lg12170226/mindspore
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(DepthwiseConv2d, self).__init__()
     self.kernel_size = twice(kernel_size)
     self.stride = twice(stride)
     self.dilation = twice(dilation)
     self.in_channels = check_int_positive(in_channels)
     self.out_channels = check_int_positive(out_channels)
     self.pad_mode = pad_mode
     self.padding = padding
     self.dilation = dilation
     self.has_bias = has_bias
     self.weight_init = weight_init
     self.bias_init = bias_init
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=self.kernel_size,
                                         pad_mode=self.pad_mode,
                                         pad=self.padding,
                                         stride=self.stride,
                                         dilation=self.dilation)
     self.bias_add = P.BiasAdd()
     weight_shape = [1, in_channels, *self.kernel_size]
     self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
     if check_bool(has_bias):
         self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
     else:
         if bias_init != 'zeros':
             logger.warning("value of `has_bias` is False, value of `bias_init` will be ignore.")
         self.bias = None
コード例 #9
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              quant_delay=0,
              num_bits=8,
              per_channel=False,
              symmetric=False,
              narrow_range=False):
     kernel_size = twice(kernel_size)
     super(Conv2dQuant,
           self).__init__(in_channels, out_channels, kernel_size, stride,
                          pad_mode, padding, dilation, group, has_bias,
                          weight_init, bias_init)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group)
     self.bias_add = P.BiasAdd()
     if pad_mode not in ('valid', 'same', 'pad'):
         raise ValueError(
             'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
             ', should be one of values in \'valid\', \'same\', \'pad\'.')
     self.fake_quant_weight = nn.FakeQuantWithMinMax(
         min_init=-6,
         max_init=6,
         ema=False,
         num_bits=num_bits,
         quant_delay=quant_delay,
         per_channel=per_channel,
         channel_size=out_channels,
         symmetric=symmetric,
         narrow_range=narrow_range)
コード例 #10
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_prior_fn=NormalPrior,
                 weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
                 bias_prior_fn=NormalPrior,
                 bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        super(_ConvVariational, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            has_bias,
            weight_init='normal',
            bias_init='zeros'
        )
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed '
                             + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')

        # convolution args
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = dilation
        self.group = group
        self.has_bias = has_bias

        # distribution trainable parameters
        self.shape = [self.out_channels,
                      self.in_channels // self.group, *self.kernel_size]

        self.weight.requires_grad = False

        if isinstance(weight_prior_fn, Cell):
            self.weight_prior = weight_prior_fn
        else:
            self.weight_prior = weight_prior_fn()
        for prior_name, prior_dist in self.weight_prior.name_cells().items():
            if prior_name != 'normal':
                raise TypeError("The type of distribution of `weight_prior_fn` should be `normal`")
            if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
                    isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                raise TypeError("The input form of `weight_prior_fn` is incorrect")

        try:
            self.weight_posterior = weight_posterior_fn(shape=self.shape, name='bnn_weight')
        except TypeError:
            raise TypeError('The input form of `weight_posterior_fn` is incorrect')
        for posterior_name, _ in self.weight_posterior.name_cells().items():
            if posterior_name != 'normal':
                raise TypeError("The type of distribution of `weight_posterior_fn` should be `normal`")

        if self.has_bias:
            self.bias.requires_grad = False

            if isinstance(bias_prior_fn, Cell):
                self.bias_prior = bias_prior_fn
            else:
                self.bias_prior = bias_prior_fn()
            for prior_name, prior_dist in self.bias_prior.name_cells().items():
                if prior_name != 'normal':
                    raise TypeError("The type of distribution of `bias_prior_fn` should be `normal`")
                if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
                        isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
                    raise TypeError("The input form of `bias_prior_fn` is incorrect")

            try:
                self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')
            for posterior_name, _ in self.bias_posterior.name_cells().items():
                if posterior_name != 'normal':
                    raise TypeError("The type of distribution of `bias_posterior_fn` should be `normal`")

        # mindspore operations
        self.bias_add = P.BiasAdd()
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)

        self.log = P.Log()
        self.sum = P.ReduceSum()
コード例 #11
0
ファイル: thor_layer.py プロジェクト: louis100/mindspore
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 data_format='NCHW',
                 has_bias=False,
                 weight_init='normal',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 bias_init='zeros'):
        self.thor = True
        ksizes = (1, kernel_size, kernel_size, 1)
        self.hw = kernel_size * kernel_size
        strides = (1, stride, stride, 1)
        kernel_size = twice(kernel_size)
        super(Conv2d_Thor, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            data_format,
            has_bias,
            weight_init,
            bias_init,
        )
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group
                               )

        self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.transpose02314 = P.CusTranspose02314()
        self.matrix_A_dim = self.in_channels * self.kernel_size[0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        self.matrix_A_device_shape, self.matrix_A_device_dim = caculate_device_shape(self.matrix_A_dim,
                                                                                     self.in_channels, True)
        self.matrix_G_device_shape, self.matrix_G_device_dim = caculate_device_shape(self.matrix_G_dim,
                                                                                     self.in_channels, False)
        self.matrix_A_device_temp_shape = (
            self.matrix_A_device_shape[0], self.matrix_A_device_shape[2], self.matrix_A_device_shape[1],
            self.matrix_A_device_shape[3])
        self.matrix_G_device_temp_shape = (
            self.matrix_G_device_shape[0], self.matrix_G_device_shape[2], self.matrix_G_device_shape[1],
            self.matrix_G_device_shape[3])
        self.matrix_A_inv = Parameter(
            Tensor(np.reshape(np.identity(self.matrix_A_device_dim).astype(np.float16), self.matrix_A_device_shape)),
            name='matrix_A_inv', requires_grad=False)
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False)
        self.matrix_G_inv = Parameter(
            Tensor(np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape)),
            name="matrix_G_inv", requires_grad=False)

        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False)
        self.fake_G = Tensor(
            np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape))

        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.vector_matmul = P.CusBatchMatMul()
        self.diag_block_dim = 128
        self.channels_slice_flag = False
        if self.in_channels % C0 != 0:
            self.channels_slice_flag = True

        self.padA_flag = False
        if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \
                and self.matrix_A_dim > self.diag_block_dim:
            self.padA_flag = True
            pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim
            self.padA = P.Pad(((0, pad_dim), (0, pad_dim)))
        self.device_shape_pad_flag = False
        if self.matrix_A_dim != self.matrix_A_device_dim:
            self.device_shape_pad_flag = True
            self.device_shape_pad = P.Pad(((0, 0), (0, C0 - self.in_channels), (0, 0), (0, C0 - self.in_channels)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.freq = Tensor(frequency, mstype.int32)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.axis = 0

        dampingA_dim = self.matrix_A_dim
        if (self.matrix_A_dim % self.diag_block_dim) != 0 and self.matrix_A_dim > self.diag_block_dim:
            dampingA_dim = (self.matrix_A_dim // self.diag_block_dim + 1) * self.diag_block_dim
        dampingG_dim = self.matrix_G_dim
        if (self.matrix_G_dim % self.diag_block_dim) != 0 and self.matrix_G_dim > self.diag_block_dim:
            dampingG_dim = (self.matrix_G_dim // self.diag_block_dim + 1) * self.diag_block_dim

        self.dampingA = Tensor(np.identity(dampingA_dim), mstype.float32)
        self.dampingG = Tensor(np.identity(dampingG_dim), mstype.float32)
        self.fused_abs_max1 = P.CusFusedAbsMax1([self.matrix_A_dim, self.matrix_A_dim])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
コード例 #12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 eps=1e-5,
                 momentum=0.997,
                 weight_init=None,
                 beta_init=None,
                 gamma_init=None,
                 mean_init=None,
                 var_init=None,
                 quant_delay=0,
                 freeze_bn=100000,
                 fake=True,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        """init Conv2dBatchNormQuant layer"""
        super(Conv2dBatchNormQuant, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = twice(kernel_size)
        self.stride = twice(stride)
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = twice(dilation)
        self.group = group
        self.eps = eps
        self.momentum = momentum
        self.quant_delay = quant_delay
        self.freeze_bn = freeze_bn
        self.fake = fake
        self.num_bits = num_bits
        self.per_channel = per_channel
        self.symmetric = symmetric
        self.narrow_range = narrow_range
        self.is_gpu = context.get_context('device_target') == "GPU"

        # initialize convolution op and Parameter
        if context.get_context('device_target') == "Ascend" and group > 1:
            validator.check_integer('group', group, in_channels, Rel.EQ,
                                    'Conv2dBatchNormQuant')
            validator.check_integer('group', group, out_channels, Rel.EQ,
                                    'Conv2dBatchNormQuant')
            self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                                kernel_size=self.kernel_size,
                                                pad_mode=pad_mode,
                                                pad=padding,
                                                stride=self.stride,
                                                dilation=self.dilation)
            if weight_init is None:
                weight_init = initializer('normal',
                                          [1, in_channels, *self.kernel_size])
            channel_axis = 1
        else:
            self.conv = P.Conv2D(out_channel=out_channels,
                                 kernel_size=self.kernel_size,
                                 pad_mode=pad_mode,
                                 pad=padding,
                                 stride=self.stride,
                                 dilation=self.dilation,
                                 group=group)
            if weight_init is None:
                weight_init = initializer(
                    'normal',
                    [out_channels, in_channels // group, *self.kernel_size])
            channel_axis = 0
        self.weight = Parameter(weight_init, name='weight')

        # initialize batchnorm Parameter
        if gamma_init is None:
            gamma_init = initializer('ones', [out_channels])
        self.gamma = Parameter(gamma_init, name='gamma')
        if beta_init is None:
            beta_init = initializer('zeros', [out_channels])
        self.beta = Parameter(beta_init, name='beta')
        if mean_init is None:
            mean_init = initializer('zeros', [out_channels])
        self.moving_mean = Parameter(mean_init,
                                     name='moving_mean',
                                     requires_grad=False)
        if var_init is None:
            var_init = initializer('ones', [out_channels])
        self.moving_variance = Parameter(var_init,
                                         name='moving_variance',
                                         requires_grad=False)

        # initialize fake ops
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
        self.batchnorm_fold = BatchNormFoldCell(epsilon=eps,
                                                momentum=momentum,
                                                freeze_bn=freeze_bn)
        self.correct_mul = P.CorrectionMul(channel_axis)
        if context.get_context('device_target') == "Ascend":
            self.batchnorm_fold2_train = P.BatchNormFold2_D(
                freeze_bn=freeze_bn)
            self.batchnorm_fold2_infer = P.BatchNormFold2_D(freeze_bn=0)
        elif context.get_context('device_target') == "GPU":
            self.batchnorm_fold2_train = P.BatchNormFold2(freeze_bn=freeze_bn)
            self.batchnorm_fold2_infer = P.BatchNormFold2(freeze_bn=0)
        else:
            raise ValueError("Unsupported platform: {}".format(
                context.get_context('device_target')))
        self.step = Parameter(initializer('normal', [1], dtype=mstype.int32),
                              name='step',
                              requires_grad=False)
        self.one = Tensor(1, mstype.int32)
        self.assignadd = P.AssignAdd()
コード例 #13
0
def test_twice_6():
    with pytest.raises(TypeError):
        twice((3.3, 4))
コード例 #14
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        self._dilation = dilation
        dilation = twice(dilation)
        super(Conv2d_Thor,
              self).__init__(in_channels, out_channels, kernel_size, stride,
                             pad_mode, padding, dilation, group, has_bias,
                             weight_init, bias_init)
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)
        self._init_depthwise_conv2d(weight_init)
        self.bias_add = P.BiasAdd()

        self.thor = True
        self.hw = kernel_size[0] * kernel_size[1]
        self.matrix_A_dim = self.in_channels * self.kernel_size[
            0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.A_normalizer = Parameter(initializer(0, [1], mstype.float32),
                                      name="A_normalizer",
                                      requires_grad=False)
        self.G_normalizer = Parameter(initializer(0, [1], mstype.float32),
                                      name="G_normalizer",
                                      requires_grad=False)
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            ksizes = (1, kernel_size[0], kernel_size[1], 1)
            strides = (1, stride[0], stride[1], 1)
            self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.transpose02314 = P.CusTranspose02314()
            dampingA_dim = self.matrix_A_dim
            self.diag_block_dim = 128
            if (self.matrix_A_dim % self.diag_block_dim
                ) != 0 and self.matrix_A_dim > self.diag_block_dim:
                dampingA_dim = (self.matrix_A_dim // self.diag_block_dim +
                                1) * self.diag_block_dim
            dampingG_dim = self.matrix_G_dim
            if (self.matrix_G_dim % self.diag_block_dim
                ) != 0 and self.matrix_G_dim > self.diag_block_dim:
                dampingG_dim = (self.matrix_G_dim // self.diag_block_dim +
                                1) * self.diag_block_dim
            self.matrix_A_cov = Parameter(Tensor(
                np.zeros([dampingA_dim, dampingA_dim]).astype(np.float32)),
                                          name='matrix_A',
                                          requires_grad=False)
            self.matrix_G_cov = Parameter(Tensor(
                np.zeros([dampingG_dim, dampingG_dim]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)

            self.channels_slice_flag = False
            self.C0 = 16
            if self.in_channels % self.C0 != 0:
                self.channels_slice_flag = True
            self.padA_flag = False
            if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \
                    and self.matrix_A_dim > self.diag_block_dim:
                self.padA_flag = True
                pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim
                self.padA = P.Pad(((0, pad_dim), (0, pad_dim)))
            self.slice = P.Slice()
        else:
            self.is_Ascend = False
            self.img2col = P.Im2Col(kernel_size=kernel_size,
                                    stride=stride,
                                    pad_mode="same")
            self.matmul = P.MatMul(transpose_b=True)
            self.reduce_mean = P.ReduceMean(keep_dims=False)
            self.matrix_A_cov = Parameter(Tensor(
                np.zeros([self.matrix_A_dim,
                          self.matrix_A_dim]).astype(np.float32)),
                                          name='matrix_A',
                                          requires_grad=False)
            self.matrix_G_cov = Parameter(Tensor(
                np.zeros([self.matrix_G_dim,
                          self.matrix_G_dim]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
        self.getG = P.InsertGradientOf(self.save_gradient)
コード例 #15
0
def test_twice_4():
    with pytest.raises(TypeError):
        twice("str")
コード例 #16
0
def test_twice_5():
    with pytest.raises(TypeError):
        twice((1, 2, 3))
コード例 #17
0
def test_twice_2():
    assert twice((3, 3)) == (3, 3)
コード例 #18
0
def test_twice_3():
    with pytest.raises(TypeError):
        twice(0.5)
コード例 #19
0
def test_twice_1():
    assert twice(3) == (3, 3)
コード例 #20
0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test check parameter """
import pytest
import numpy as np
from mindspore._checkparam import Validator, twice

kernel_size = 5
kernel_size1 = twice(kernel_size)
assert kernel_size1 == (5, 5)


def test_check_int1():
    a = np.random.randint(-100, 100)
    assert Validator.check_is_int(a) == a


def test_check_int2():
    with pytest.raises(TypeError):
        Validator.check_is_int(3.3)


def test_check_int3():
    with pytest.raises(TypeError):
コード例 #21
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_prior_fn=NormalPrior,
                 weight_posterior_fn=lambda name, shape: NormalPosterior(
                     name=name, shape=shape),
                 bias_prior_fn=NormalPrior,
                 bias_posterior_fn=lambda name, shape: NormalPosterior(
                     name=name, shape=shape)):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        super(_ConvVariational, self).__init__(in_channels,
                                               out_channels,
                                               kernel_size,
                                               stride,
                                               pad_mode,
                                               padding,
                                               dilation,
                                               group,
                                               has_bias,
                                               weight_init='normal',
                                               bias_init='zeros')
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')

        # convolution args
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = dilation
        self.group = group
        self.has_bias = has_bias

        # distribution trainable parameters
        self.shape = [
            self.out_channels, self.in_channels // self.group,
            *self.kernel_size
        ]

        self.weight.requires_grad = False

        if isinstance(weight_prior_fn, Cell):
            self.weight_prior = weight_prior_fn
        else:
            self.weight_prior = weight_prior_fn()

        self.weight_posterior = weight_posterior_fn(shape=self.shape,
                                                    name='bnn_weight')

        if self.has_bias:
            self.bias.requires_grad = False

            if isinstance(bias_prior_fn, Cell):
                self.bias_prior = bias_prior_fn
            else:
                self.bias_prior = bias_prior_fn()

            self.bias_posterior = bias_posterior_fn(shape=[self.out_channels],
                                                    name='bnn_bias')

        # mindspore operations
        self.bias_add = P.BiasAdd()
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)

        self.log = P.Log()
        self.sum = P.ReduceSum()
コード例 #22
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 data_format='NCHW',
                 has_bias=False,
                 weight_init='normal',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32,
                 bias_init='zeros'):
        self.skfac = True
        self.hw = kernel_size * kernel_size
        kernel_size = twice(kernel_size)
        super(Conv2d_SKFAC_GPU, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            data_format,
            has_bias,
            weight_init,
            bias_init,
        )
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)

        self.matrix_A_dim = self.in_channels * self.kernel_size[
            0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        split_dim = 128
        self.matrix_A_inv = Parameter(np.zeros(
            (self.matrix_A_dim, self.matrix_A_dim)).astype(np.float32),
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(np.zeros(
            (self.matrix_G_dim, self.matrix_G_dim)).astype(np.float32),
                                      requires_grad=False)

        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  requires_grad=False)
        self.img2col = P.Im2Col(kernel_size=kernel_size,
                                stride=stride,
                                pad_mode="same")
        self.matmul = P.MatMul(transpose_a=True)
        self.matmul_ = P.MatMul()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.batch_size = Tensor(batch_size, mstype.float16)
        self.transpose = P.Transpose()
        self.cast = P.Cast()
        self.gather = P.Gather()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.sqrt = P.Sqrt()
        self.reduce_mean = P.ReduceMean(keep_dims=False)
        self.damping = Parameter(Tensor(damping), requires_grad=False)
        self.dampingA = Tensor(np.identity(batch_size), mstype.float32)
        self.dampingG = Tensor(np.identity(batch_size), mstype.float32)
        self.I_G = Tensor(np.identity(out_channels), mstype.float32)
        self.I_A = Tensor(np.identity(self.matrix_A_dim), mstype.float32)
        self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
        self.vector_matmul = P.BatchMatMul(transpose_a=True)
        self.batch_coefficient = Tensor((1 / 32)**0.5, mstype.float32)
コード例 #23
0
ファイル: quant.py プロジェクト: lyc4614/mindspore
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 eps=1e-5,
                 momentum=0.997,
                 weight_init=None,
                 beta_init=None,
                 gamma_init=None,
                 mean_init=None,
                 var_init=None,
                 quant_delay=0,
                 freeze_bn=100000,
                 fake=True,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dBatchNormQuant, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.pad_mode = pad_mode
        self.padding = padding
        self.dilation = twice(dilation)
        self.stride = twice(stride)
        self.group = group
        self.fake = fake
        self.freeze_bn = freeze_bn
        self.momentum = momentum
        self.quant_delay = quant_delay
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size

        if weight_init is None:
            weight_init = initializer(
                'normal',
                [out_channels, in_channels // group, *self.kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if gamma_init is None:
            gamma_init = initializer('ones', [out_channels])
        self.gamma = Parameter(gamma_init, name='gamma')
        if beta_init is None:
            beta_init = initializer('zeros', [out_channels])
        self.beta = Parameter(beta_init, name='beta')
        if mean_init is None:
            mean_init = initializer('zeros', [out_channels])
        self.moving_mean = Parameter(mean_init,
                                     name='moving_mean',
                                     requires_grad=False)
        if var_init is None:
            var_init = initializer('ones', [out_channels])
        self.moving_variance = Parameter(var_init,
                                         name='moving_variance',
                                         requires_grad=False)

        self.step = Parameter(initializer('normal', [1], dtype=mstype.int32),
                              name='step',
                              requires_grad=False)

        self.conv = P.Conv2D(out_channel=self.out_channels,
                             kernel_size=self.kernel_size,
                             mode=1,
                             pad_mode=self.pad_mode,
                             pad=self.padding,
                             stride=self.stride,
                             dilation=self.dilation,
                             group=self.group)
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
        self.batchnorm_fold_train = P.BatchNormFold(epsilon=eps,
                                                    momentum=momentum,
                                                    is_training=True,
                                                    freeze_bn=freeze_bn)
        self.batchnorm_fold_infer = P.BatchNormFold(epsilon=eps,
                                                    momentum=momentum,
                                                    is_training=False,
                                                    freeze_bn=freeze_bn)
        self.correct_mul = P.CorrectionMul()
        self.relu = P.ReLU()
        self.batchnorm_fold2 = P.BatchNormFold2(freeze_bn=freeze_bn)
        self.batchnorm_fold2_infer = P.BatchNormFold2(freeze_bn=0)
        self.one = Tensor(1, mstype.int32)
        self.assignadd = P.AssignAdd()