Exemplo n.º 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True):
        super(GNNFeatureTransform, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.has_bias = has_bias

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
               weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(
            initializer(weight_init, [out_channels, in_channels]))

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]))

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
Exemplo n.º 2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True):
        super(GNNFeatureTransform, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
Exemplo n.º 3
0
 def __init__(self, input_channels, output_channels, weight_init):
     super(DenseNoTranpose, self).__init__()
     self.weight = Parameter(initializer(weight_init, [input_channels, output_channels], mstype.float32),
                             name="weight")
     self.bias = Parameter(initializer("zeros", [output_channels], mstype.float32), name="bias")
     self.matmul = P.MatMul(transpose_b=False)
     self.bias_add = P.BiasAdd()
Exemplo n.º 4
0
    def __init__(self,
                 in_planes,
                 kernel_size,
                 stride,
                 pad_mode,
                 pad,
                 channel_multiplier=1,
                 has_bias=False):
        super(DepthwiseConv, self).__init__()
        self.has_bias = has_bias
        self.in_channels = in_planes
        self.channel_multiplier = channel_multiplier
        self.out_channels = in_planes * channel_multiplier
        self.kernel_size = (kernel_size, kernel_size)
        self.depthwise_conv = P.DepthwiseConv2dNative(
            channel_multiplier=channel_multiplier,
            kernel_size=self.kernel_size,
            stride=stride,
            pad_mode=pad_mode,
            pad=pad)
        self.bias_add = P.BiasAdd()
        weight_shape = [channel_multiplier, in_planes, *self.kernel_size]
        self.weight = Parameter(initializer('ones', weight_shape),
                                name='weight')

        if has_bias:
            bias_shape = [channel_multiplier * in_planes]
            self.bias = Parameter(initializer('zeros', bias_shape),
                                  name='bias')
        else:
            self.bias = None
Exemplo n.º 5
0
 def __init__(self,
              input_dim,
              output_dim,
              weight_bias_init,
              act_str,
              keep_prob=0.8,
              scale_coef=1.0,
              use_activation=True,
              convert_dtype=True,
              drop_out=False):
     super(DenseLayer, self).__init__()
     weight_init, bias_init = weight_bias_init
     self.weight = init_method(weight_init, [input_dim, output_dim],
                               name="weight")
     self.bias = init_method(bias_init, [output_dim], name="bias")
     self.act_func = self._init_activation(act_str)
     self.matmul = P.MatMul(transpose_b=False)
     self.bias_add = P.BiasAdd()
     self.cast = P.Cast()
     self.dropout = Dropout(keep_prob=keep_prob)
     self.mul = P.Mul()
     self.realDiv = P.RealDiv()
     self.scale_coef = scale_coef
     self.use_activation = use_activation
     self.convert_dtype = convert_dtype
     self.drop_out = drop_out
Exemplo n.º 6
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     kernel_size = twice(kernel_size)
     super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, pad_mode, padding, dilation,
                                  group, has_bias, weight_init, bias_init)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group)
     self.bias_add = P.BiasAdd()
     if pad_mode not in ('valid', 'same', 'pad'):
         raise ValueError(
             'Attr \'pad_mode\' of \'Conv2d\' Op passed ' + str(pad_mode) +
             ', should be one of values in \'valid\', \'same\', \'pad\'.')
Exemplo n.º 7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init=None,
                 bias_init=None,
                 quant_delay=0,
                 num_bits=8,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(Conv2dQuant, self).__init__()
        if isinstance(kernel_size, int):
            self.kernel_size = (kernel_size, kernel_size)
        else:
            self.kernel_size = kernel_size
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = has_bias
        self.stride = twice(stride)
        self.dilation = twice(dilation)
        self.pad_mode = pad_mode
        self.padding = padding
        self.group = group
        self.quant_delay = quant_delay

        if weight_init is None:
            weight_init = initializer(
                'normal',
                [out_channels, in_channels // group, *self.kernel_size])
        self.weight = Parameter(weight_init, name='weight')
        if bias_init is None:
            bias_init = initializer('zeros', [out_channels])
        if has_bias:
            self.bias = Parameter(bias_init, name='bias')
            self.bias_add = P.BiasAdd()

        self.conv = P.Conv2D(out_channel=self.out_channels,
                             kernel_size=self.kernel_size,
                             mode=1,
                             pad_mode=self.pad_mode,
                             pad=self.padding,
                             stride=self.stride,
                             dilation=self.dilation,
                             group=self.group)
        self.fake_quant_weight = FakeQuantWithMinMax(min_init=-6,
                                                     max_init=6,
                                                     ema=False,
                                                     num_bits=num_bits,
                                                     quant_delay=quant_delay,
                                                     per_channel=per_channel,
                                                     out_channels=out_channels,
                                                     symmetric=symmetric,
                                                     narrow_range=narrow_range)
Exemplo n.º 8
0
 def __init__(self,
              in_channel,
              out_channel,
              kernel_size,
              mode=1,
              pad=0,
              stride=1,
              dilation=1,
              group=1,
              output_padding=0,
              data_format="NCDHW",
              bias_init="zeros",
              has_bias=True):
     super().__init__()
     self.weight_shape = (in_channel, out_channel, kernel_size[0],
                          kernel_size[1], kernel_size[2])
     self.weight = weight_variable(self.weight_shape)
     self.conv_transpose = nps.Conv3DTranspose(in_channel=in_channel, out_channel=out_channel,\
                                               kernel_size=kernel_size, mode=mode, pad=pad, stride=stride, \
                                               dilation=dilation, group=group, output_padding=output_padding, \
                                               data_format=data_format)
     self.bias_init = bias_init
     self.has_bias = has_bias
     self.bias_add = P.BiasAdd(data_format=data_format)
     if self.has_bias:
         self.bias = Parameter(initializer(self.bias_init, [out_channel]),
                               name='bias')
Exemplo n.º 9
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              data_format='NCHW'):
     kernel_size = twice(kernel_size)
     stride = twice(stride)
     self._dilation = dilation
     dilation = twice(dilation)
     super(Conv2d,
           self).__init__(in_channels, out_channels, kernel_size, stride,
                          pad_mode, padding, dilation, group, has_bias,
                          weight_init, bias_init, data_format)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group,
                            data_format=self.format)
     self._init_depthwise_conv2d()
     self.bias_add = P.BiasAdd()
Exemplo n.º 10
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor_GPU, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]))

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]))

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        split_dim = 128
        matrix_A_shape, matrix_G_shape = caculate_matmul_shape(self.in_channels, self.out_channels, split_dim)
        self.matrix_A_inv = Parameter(Tensor(np.zeros(matrix_A_shape).astype(np.float32)), requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(np.zeros(matrix_G_shape).astype(np.float32)), requires_grad=False)
        self.broadcast_to = P.BroadcastTo(matrix_A_shape)
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.cube_matmul = P.MatMul(transpose_a=True)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.batch_size = Tensor(batch_size, mstype.float16)
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.damping = Parameter(Tensor(damping), requires_grad=False)
        self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
        self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
        self.cast = P.Cast()
        self.gather = P.Gather()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.add = P.Add()
        self.sqrt = P.Sqrt()
        self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
        self.vector_matmul = P.BatchMatMul(transpose_a=True)
Exemplo n.º 11
0
 def __init__(self, in_channel, x):
     super().__init__()
     #self._save_graphs(save_graph_flag=True, save_graph_path=".")
     self.biasadd = P.BiasAdd()
     self.equal = P.Equal()
     self.addn = P.AddN()
     self.conv = Conv2d(in_channels=in_channel,
                        out_channels=in_channel,
                        kernel_size=1,
                        stride=1,
                        has_bias=False,
                        weight_init='ones',
                        pad_mode='same')
     self.bn = BatchNorm2d(num_features=in_channel)
     self.assignadd = P.AssignAdd()
     self.assign = P.Assign()
     self.relu = ReLU()
     self.mean = P.ReduceMean(keep_dims=False)
     self.bias = Parameter(Tensor(
         np.random.randint(2, size=(3, )).astype((np.float32))),
                           name="bias")
     self.bias2 = Parameter(Tensor(np.ones([3]).astype(np.float32)),
                            name="bias2")
     self.parameterupdate = ParameterUpdate(self.bias)
     self.value = Tensor(np.random.randn(*(3, )), ms.float32)
     self.x = x
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              strategy=None):
     kernel_size = twice(kernel_size)
     super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, pad_mode, padding, dilation,
                                  group, has_bias, weight_init, bias_init)
     self.add = P.TensorAdd(strategy)
     self.conv2d = P.Conv2D(out_channel=self.out_channels,
                            kernel_size=self.kernel_size,
                            mode=1,
                            pad_mode=self.pad_mode,
                            pad=self.padding,
                            stride=self.stride,
                            dilation=self.dilation,
                            group=self.group,
                            strategy=None)
     self.bias_add = P.BiasAdd()
Exemplo n.º 13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
               weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
Exemplo n.º 14
0
    def __init__(self,
                 in_planes,
                 kernel_size,
                 stride,
                 pad_mode,
                 pad,
                 channel_multiplier=1,
                 has_bias=False):
        super(DepthWiseConv, self).__init__()
        self.has_bias = has_bias
        self.depthwise_conv = P.DepthwiseConv2dNative(
            channel_multiplier=channel_multiplier,
            kernel_size=kernel_size,
            stride=stride,
            pad_mode=pad_mode,
            pad=pad)
        self.bias_add = P.BiasAdd()

        weight_shape = [
            channel_multiplier, in_planes, kernel_size[0], kernel_size[1]
        ]
        self.weight = Parameter(initializer('ones', weight_shape))

        if has_bias:
            bias_shape = [channel_multiplier * in_planes]
            self.bias = Parameter(initializer('zeros', bias_shape))
        else:
            self.bias = None
Exemplo n.º 15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.reshape = P.Reshape()
        self.shape_op = P.Shape()


        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation) if isinstance(activation, str) else activation
        if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
            raise TypeError("The activation must be str or Cell or Primitive,"" but got {}.".format(activation))
        self.activation_flag = self.activation is not None
Exemplo n.º 16
0
    def __init__(
            self,
            in_channels,
            out_channels,
            activation=None,
            has_bias=True,
            weight_prior_fn=NormalPrior,
            weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
            bias_prior_fn=NormalPrior,
            bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
        super(_DenseVariational, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_prior_fn, Cell):
            if weight_prior_fn.__class__.__name__ != 'NormalPrior':
                raise TypeError('The type of `weight_prior_fn` should be `NormalPrior`')
            self.weight_prior = weight_prior_fn
        else:
            if weight_prior_fn.__name__ != 'NormalPrior':
                raise TypeError('The type of `weight_prior_fn` should be `NormalPrior`')
            self.weight_prior = weight_prior_fn()

        try:
            self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight')
        except TypeError:
            raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`')

        if self.has_bias:
            if isinstance(bias_prior_fn, Cell):
                if bias_prior_fn.__class__.__name__ != 'NormalPrior':
                    raise TypeError('The type of `bias_prior_fn` should be `NormalPrior`')
                self.bias_prior = bias_prior_fn
            else:
                if bias_prior_fn.__name__ != 'NormalPrior':
                    raise TypeError('The type of `bias_prior_fn` should be `NormalPrior`')
                self.bias_prior = bias_prior_fn()

            try:
                self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
            except TypeError:
                raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')

        self.activation = activation
        if not self.activation:
            self.activation_flag = False
        else:
            self.activation_flag = True
            if isinstance(self.activation, str):
                self.activation = get_activation(activation)
            elif isinstance(self.activation, Cell):
                self.activation = activation
            else:
                raise ValueError('The type of `activation` is wrong.')

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()
        self.sum = P.ReduceSum()
Exemplo n.º 17
0
 def __init__(self):
     super(Net, self).__init__()
     self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)),
                             name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype((np.float32))),
                           name="bias")
     self.matmul = P.MatMul()
     self.biasAdd = P.BiasAdd()
Exemplo n.º 18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):

        Validator.check_value_type("kernel_size", kernel_size, [int],
                                   self.cls_name)
        Validator.check_value_type("stride", stride, [int], self.cls_name)
        Validator.check_value_type("padding", padding, [int], self.cls_name)
        Validator.check_value_type("dilation", dilation, [int], self.cls_name)
        Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE,
                                self.cls_name)
        Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
        Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
        Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
        kernel_size = (1, kernel_size)
        stride = (1, stride)
        dilation = (1, dilation)
        get_shape = P.Shape()
        get_dtype = P.DType()
        if isinstance(weight_init, Tensor):
            weight_init_shape = get_shape(weight_init)
            Validator.check_integer('weight_init_shape',
                                    len(weight_init_shape), 3, Rel.EQ,
                                    self.cls_name)
            weight_init_dtype = get_dtype(weight_init)
            weight_init_value = weight_init.asnumpy()
            weight_init_value = np.expand_dims(weight_init_value, 2)
            weight_init = Tensor(weight_init_value, weight_init_dtype)

        super(Conv1d, self).__init__(in_channels, out_channels, kernel_size,
                                     stride, pad_mode, padding, dilation,
                                     group, has_bias, weight_init, bias_init)
        self.padding = (0, 0, padding, padding)
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)
        self.bias_add = P.BiasAdd()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv1d\' Op passed ' + str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.expand_dims = P.ExpandDims()
        self.squeeze = P.Squeeze(2)
        self.shape = P.Shape()
Exemplo n.º 19
0
 def __init__(self):
     super(BatchnormNet, self).__init__()
     self.conv1 = nn.Conv2d(3, 4, kernel_size=8, stride=2, pad_mode="pad", padding=3)
     self.bn1 = nn.BatchNorm2d(4)
     self.flatten = P.Flatten()
     self.weight = Parameter(Tensor(np.ones([64, 10], np.float32)), name="weight")
     self.bias = Parameter(Tensor(np.ones([10], np.float32)), name="bias")
     self.fc = P.MatMul()
     self.biasAdd = P.BiasAdd()
Exemplo n.º 20
0
 def __init__(self):
     super(Net, self).__init__()
     self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype(np.float32)), name="bias")
     self.fc = P.MatMul()
     self.fc2 = nn.Dense(10, 10)
     self.biasAdd = P.BiasAdd()
     self.relu = nn.ReLU()
     self.cast = P.Cast()
Exemplo n.º 21
0
 def __init__(self):
     super(Net, self).__init__()
     self.mul1 = P.Mul().shard(((2, 4), (2, 4)))
     self.mul2 = P.Mul()
     self.ba1 = P.BiasAdd()
     self.weight = Parameter(Tensor(np.ones([128, 1000]), dtype=ms.float32), name="weight")
     self.bias = Parameter(Tensor(np.ones([1000]), dtype=ms.float32), name="bias")
     self.add = P.TensorAdd().shard(((1, 8), (1, 8)))
     self.relu = P.ReLU()
Exemplo n.º 22
0
    def __init__(self, input_channels, output_channels, weight_init):
        super(DenseNoTranpose, self).__init__()
        self.weight = Parameter(initializer(weight_init, [input_channels, output_channels], mstype.float32))
        self.bias = Parameter(initializer("zeros", [output_channels], mstype.float32))

        self.matmul = P.MatMul(transpose_b=False)
        self.bias_add = P.BiasAdd()
        self.cast = P.Cast()
        self.device_type = "Ascend" if context.get_context("device_target") == "Ascend" else "Others"
Exemplo n.º 23
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        dilation = twice(dilation)
        # out_channels and in_channels swap.
        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
        # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
        super(Conv2dTranspose, self).__init__(in_channels,
                                              out_channels,
                                              kernel_size,
                                              stride,
                                              pad_mode,
                                              padding,
                                              dilation,
                                              group,
                                              has_bias,
                                              weight_init,
                                              bias_init,
                                              transposed=True)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.shape = P.Shape()
        if pad_mode not in ('valid', 'same', 'pad'):
            raise ValueError(
                'Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed ' +
                str(pad_mode) +
                ', should be one of values in \'valid\', \'same\', \'pad\'.')
        self.is_valid = self.pad_mode == 'valid'
        self.is_same = self.pad_mode == 'same'
        self.is_pad = self.pad_mode == 'pad'
        if check_bool(has_bias):
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name='bias')

        # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
        self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
                                                      kernel_size=kernel_size,
                                                      mode=1,
                                                      pad_mode=pad_mode,
                                                      pad=padding,
                                                      stride=stride,
                                                      dilation=dilation,
                                                      group=group)
        self.bias_add = P.BiasAdd()
Exemplo n.º 24
0
    def __init__(self, nx, nf):
        super(Conv1D, self).__init__()
        self.nx = nx
        self.nf = nf
        self.weight = Parameter(normal_weight([nx, nf], nf),
                                name='projection_weight')
        self.bias = Parameter(zero_weight(nf), name='projection_bias')

        self.matmul = P.MatMul()
        # self.add = P.TensorAdd()
        self.bias_add = P.BiasAdd()
Exemplo n.º 25
0
 def __init__(self):
     super().__init__()
     inputs = np.array([[1, 1], [1, 1]])
     self.parameter1 = Parameter(Tensor(inputs, ms.float32),
                                 name="parameter1")
     biasadd = np.array([0, -1])
     self.parameter2 = Parameter(Tensor(biasadd, ms.float32),
                                 name="biasadd")
     self.assign = P.Assign()
     self.matmul = P.MatMul()
     self.biasadd = P.BiasAdd()
Exemplo n.º 26
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(DepthwiseConv2d, self).__init__()
     self.kernel_size = twice(kernel_size)
     self.stride = twice(stride)
     self.dilation = twice(dilation)
     self.in_channels = check_int_positive(in_channels)
     self.out_channels = check_int_positive(out_channels)
     validator.check_integer('group', group, in_channels, Rel.EQ)
     validator.check_integer('group', group, out_channels, Rel.EQ)
     validator.check_integer('group', group, 1, Rel.GE)
     self.pad_mode = pad_mode
     self.dilation = dilation
     self.group = group
     self.has_bias = has_bias
     self.weight_init = weight_init
     self.bias_init = bias_init
     Validator.check_value_type('padding', padding, (int, tuple),
                                self.cls_name)
     if isinstance(padding, tuple):
         Validator.check_integer('padding size', len(padding), 4, Rel.EQ,
                                 self.cls_name)
     self.padding = padding
     self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
                                         kernel_size=self.kernel_size,
                                         pad_mode=self.pad_mode,
                                         pad=self.padding,
                                         stride=self.stride,
                                         dilation=self.dilation)
     self.bias_add = P.BiasAdd()
     weight_shape = [1, in_channels, *self.kernel_size]
     self.weight = Parameter(initializer(weight_init, weight_shape),
                             name='weight')
     if check_bool(has_bias):
         self.bias = Parameter(initializer(bias_init, [out_channels]),
                               name='bias')
     else:
         if bias_init != 'zeros':
             logger.warning(
                 "value of `has_bias` is False, value of `bias_init` will be ignore."
             )
         self.bias = None
Exemplo n.º 27
0
 def __init__(self, *args, **kwargs):
     super(Conv1d, self).__init__(*args, **kwargs)
     self.clear_buffer()
     self._linearized_weight = None
     self.transpose_op = P.Transpose()
     self.reshape_op = P.Reshape()
     self.squeeze_op = P.Squeeze(-2)
     self.zeros = P.Zeros()
     self.concat_op = P.Concat(axis=1)
     self.matmul = P.MatMul(transpose_b=True)
     self.bias_add = P.BiasAdd()
     self.get_weight = None
     self.get_bias = None
Exemplo n.º 28
0
    def __init__(self,
                 output_channels,
                 bias_init='zeros',
                 ):
        super(Net, self).__init__()
        self.biasAdd = P.BiasAdd()

        if isinstance(bias_init, Tensor):
            if bias_init.dim() != 1 or bias_init.shape()[0] != output_channels:
                raise ValueError("bias_init shape error")

        self.bias = Parameter(initializer(
            bias_init, [output_channels]), name="bias")
Exemplo n.º 29
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None,
                 num_bits=8,
                 quant_delay=0,
                 per_channel=False,
                 symmetric=False,
                 narrow_range=False):
        super(DenseQuant, self).__init__()
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)

        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        self.fake_quant_weight = nn.FakeQuantWithMinMax(
            min_init=-6,
            max_init=6,
            ema=False,
            num_bits=num_bits,
            quant_delay=quant_delay,
            per_channel=per_channel,
            channel_size=out_channels,
            symmetric=symmetric,
            narrow_range=narrow_range)
Exemplo n.º 30
0
 def __init__(self):
     super(BlockNet, self).__init__()
     self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, pad_mode="pad", padding=3)
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
     self.block_down_sample = ResidualBlock(
         64, 256, stride=1, down_sample=True
     )
     self.flatten = P.Flatten()
     self.weight = Parameter(Tensor(np.ones([1024, 10]).astype(np.float32)), name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype((np.float32))), name="bias")
     self.fc = P.MatMul()
     self.biasAdd = P.BiasAdd()