示例#1
0
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
                 bidirectional, weight, labels, batch_size):
        super(SentimentNet, self).__init__()
        self.num_hiddens = num_hiddens
        self.num_layers = num_layers
        self.bidirectional = bidirectional
        self.batch_size = batch_size

        self.embedding = nn.Embedding(vocab_size, embed_size, use_one_hot=False, embedding_table=Tensor(weight))
        self.embedding.embedding_table.requires_grad = False
        self.trans = P.Transpose()
        self.perm = (1, 0, 2)
        self.h, self.c, self.w = InitialLstmWeight(embed_size, num_hiddens, num_layers, bidirectional)
        self.encoder = P.LSTM(input_size=embed_size, hidden_size=self.num_hiddens,
                              num_layers=num_layers, has_bias=False,
                              bidirectional=self.bidirectional, dropout=0.0)
        self.concat = P.Concat(2)
        if self.bidirectional:
            self.decoder = nn.Dense(num_hiddens * 4, labels)
        else:
            self.decoder = nn.Dense(num_hiddens * 2, labels)

        self.slice1 = P.Slice()
        self.slice2 = P.Slice()
        self.reshape = P.Reshape()

        self.num_direction = 1
        if bidirectional:
            self.num_direction = 2
示例#2
0
    def __init__(self):
        super(Slice, self).__init__()

        self.cat = P.Slice()
        self.x1 = Parameter(initializer(
            Tensor(np.array([[[1, -1, 1], [2, -2, 2]], [[3, -3, 3], [4, -4, 4]], [[5, -5, 5], [6, -6, 6]]]).astype(
                np.float32)), [3, 2, 3]), name='x1')
示例#3
0
 def __init__(self, central_fraction):
     super(CentralCrop, self).__init__()
     validator.check_value_type("central_fraction", central_fraction, [float], self.cls_name)
     self.central_fraction = validator.check_number_range('central_fraction', central_fraction,
                                                          0.0, 1.0, Rel.INC_RIGHT, self.cls_name)
     self.central_fraction_tensor = Tensor(np.array([central_fraction]).astype(np.float64))
     self.slice = P.Slice()
示例#4
0
 def __init__(self, weight2, begin, end, strategy1=None, strategy2=None):
     super().__init__()
     self.mul = P.Mul().shard(strategy1)
     self.slice = P.Slice().shard(strategy2)
     self.weight2 = Parameter(weight2, "w2")
     self.begin = begin
     self.end = end
示例#5
0
    def __init__(self, batch_size=4):
        super(DiceLoss, self).__init__()

        self.threshold0 = Tensor(0.5, mstype.float32)
        self.zero_float32 = Tensor(0.0, mstype.float32)
        self.k = int(640 * 640)
        self.negative_one_int32 = Tensor(-1, mstype.int32)
        self.batch_size = batch_size
        self.concat = P.Concat()
        self.less_equal = P.LessEqual()
        self.greater = P.Greater()
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True)
        self.reduce_mean = P.ReduceMean()
        self.reduce_min = P.ReduceMin()
        self.cast = P.Cast()
        self.minimum = P.Minimum()
        self.expand_dims = P.ExpandDims()
        self.select = P.Select()
        self.fill = P.Fill()
        self.topk = P.TopK(sorted=True)
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.reshape = P.Reshape()
        self.slice = P.Slice()
        self.logical_and = P.LogicalAnd()
        self.logical_or = P.LogicalOr()
        self.equal = P.Equal()
        self.zeros_like = P.ZerosLike()
        self.add = P.TensorAdd()
        self.gather = P.Gather()
示例#6
0
文件: loss.py 项目: dongkcs/mindspore
 def __init__(self, num_sampled, num_classes, num_true=1,
              sampled_values=None, remove_accidental_hits=True, seed=0,
              reduction='none'):
     super(SampledSoftmaxLoss, self).__init__()
     self.num_sampled = num_sampled
     self.num_classes = num_classes
     self.num_true = num_true
     self.sampled_values = sampled_values
     self.remove_accidental_hits = remove_accidental_hits
     self.seed = seed
     self.sampler = P.UniformSampler(
         num_true,
         num_sampled,
         True,
         num_classes,
         seed,
         remove_accidental_hits)
     self.cast = P.Cast()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.exp = P.Exp()
     self.log = P.Log()
     self.slice_op = P.Slice()
     self.matmul = P.MatMul(False, True)
     self.gather_v2 = P.GatherV2()
     self.reduce_max_true = P.ReduceMax(True)
     self.reduce_sum = P.ReduceSum()
     self.reduce_sum_true = P.ReduceSum(True)
     self.concat_dim0 = P.Concat(0)
     self.concat_dim1 = P.Concat(1)
     self.ones_like = P.OnesLike()
     self.zeros_like = P.ZerosLike()
     self.mul = P.Mul()
     self.expand_dims = P.ExpandDims()
示例#7
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 use_sigmoid_cls,
                 target_means=(.0, .0, .0, .0),
                 target_stds=(1.0, 1.0, 1.0, 1.0)
                 ):
        super(Proposal, self).__init__()
        cfg = config
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.target_means = target_means
        self.target_stds = target_stds
        self.use_sigmoid_cls = config.use_sigmoid_cls

        if self.use_sigmoid_cls:
            self.cls_out_channels = 1
            self.activation = P.Sigmoid()
            self.reshape_shape = (-1, 1)
        else:
            self.cls_out_channels = num_classes
            self.activation = P.Softmax(axis=1)
            self.reshape_shape = (-1, 2)

        if self.cls_out_channels <= 0:
            raise ValueError('num_classes={} is too small'.format(num_classes))

        self.num_pre = cfg.rpn_proposal_nms_pre
        self.min_box_size = cfg.rpn_proposal_min_bbox_size
        self.nms_thr = cfg.rpn_proposal_nms_thr
        self.nms_post = cfg.rpn_proposal_nms_post
        self.nms_across_levels = cfg.rpn_proposal_nms_across_levels
        self.max_num = cfg.rpn_proposal_max_num

        # Op Define
        self.squeeze = P.Squeeze()
        self.reshape = P.Reshape()
        self.cast = P.Cast()

        self.feature_shapes = cfg.feature_shapes

        self.transpose_shape = (1, 2, 0)

        self.decode = BoundingBoxDecode()

        self.nms = P.NMSWithMask(self.nms_thr)
        self.concat_axis0 = P.Concat(axis=0)
        self.concat_axis1 = P.Concat(axis=1)
        self.split = P.Split(axis=1, output_num=5)
        self.min = P.Minimum()
        self.gatherND = P.GatherNd()
        self.slice = P.Slice()
        self.select = P.Select()
        self.greater = P.Greater()
        self.transpose = P.Transpose()
        self.tile = P.Tile()
        self.set_train_local(config, training=True)

        self.multi_10 = Tensor(10.0, mstype.float16)
示例#8
0
def test_slice_float64():
    data = Tensor(
        np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
                  [[5, 5, 5], [6, 6, 6]]]).astype(np.float64))
    slice_op = P.Slice()
    output = slice_op(data, (1, 0, 0), (1, 1, 3))
    expect = [[[3.0, 3.0, 3.0]]]
    assert (output.asnumpy() == expect).all()
示例#9
0
文件: image.py 项目: CaoE/mindspore
 def __init__(self, central_fraction):
     super(CentralCrop, self).__init__()
     validator.check_value_type("central_fraction", central_fraction,
                                [float], self.cls_name)
     validator.check_float_range(central_fraction, 0.0, 1.0, Rel.INC_RIGHT,
                                 'central_fraction', self.cls_name)
     self.central_fraction = central_fraction
     self.slice = P.Slice()
示例#10
0
    def __init__(self,
                 num_sampled,
                 num_classes,
                 num_true=1,
                 sampled_values=None,
                 remove_accidental_hits=True,
                 seed=0,
                 reduction='none'):
        super(SampledSoftmaxLoss, self).__init__(reduction)

        if num_true < 1:
            raise ValueError(f"num_true {num_true} is less than 1.")
        if seed < 0:
            raise ValueError(f"seed {seed} is less than 0.")
        if num_sampled > num_classes:
            raise ValueError(
                f"num_sampled {num_sampled} is great than num_classes {num_classes}."
            )
        if num_true > num_classes:
            raise ValueError(
                f"num_true {num_true} is great than num_classes {num_classes}."
            )
        if sampled_values is not None:
            if not isinstance(sampled_values, (list, tuple)):
                raise TypeError(
                    f"sampled_values {sampled_values} is not a list.")
            if len(sampled_values) != 3:
                raise ValueError(
                    f"sampled_values size {len(sampled_values)} is not 3.")

        self.num_sampled = num_sampled
        self.num_classes = num_classes
        self.num_true = num_true
        self.sampled_values = sampled_values
        self.remove_accidental_hits = remove_accidental_hits
        self.seed = seed
        self.sampler = P.LogUniformCandidateSampler(num_true, num_sampled,
                                                    True, num_classes, seed)
        self.cast = P.Cast()
        self.reshape = P.Reshape()
        self.shape = P.Shape()
        self.exp = P.Exp()
        self.log = P.Log()
        self.slice_op = P.Slice()
        self.matmul = P.MatMul(False, True)
        self.gather_v2 = P.Gather()
        self.reduce_max_true = P.ReduceMax(True)
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_true = P.ReduceSum(True)
        self.concat_dim0 = P.Concat(0)
        self.concat_dim1 = P.Concat(1)
        self.ones_like = P.OnesLike()
        self.zeros_like = P.ZerosLike()
        self.mul = P.Mul()
        self.expand_dims = P.ExpandDims()
        self.dtype = P.DType()
        self.compute_accidental_hits = P.ComputeAccidentalHits(num_true)
        self.scatter_nd = P.ScatterNd()
示例#11
0
 def __init__(self, stencil_width=1, lb=(0, 0, 0), rb=(1, 0, 0)):
     super(AXF, self).__init__()
     self.stencil_width = stencil_width
     self.pad = P.Pad(((self.stencil_width, self.stencil_width),
                       (self.stencil_width, self.stencil_width),
                       (self.stencil_width, self.stencil_width)))
     self.slice = P.Slice()
     self.shape = P.Shape()
     self.axf_kernel = axf_kernel()
示例#12
0
 def __init__(self, stencil_width=1, lb=(0, 0, 1), rb=(0, 0, 0)):
     super(DZB, self).__init__()
     self.stencil_width = stencil_width
     self.pad = P.Pad(((self.stencil_width, self.stencil_width),
                       (self.stencil_width, self.stencil_width),
                       (self.stencil_width, self.stencil_width)))
     self.slice = P.Slice()
     self.shape = P.Shape()
     self.dzb_kernel = dzb_kernel()
示例#13
0
 def __init__(self):
     super(ComputeRij, self).__init__()
     self.reshape = P.Reshape()
     self.transpose = P.Transpose()
     self.cast = P.Cast()
     self.rsum = P.ReduceSum()
     self.broadcastto = P.BroadcastTo((1, 192 * 138))
     self.broadcastto1 = P.BroadcastTo((1, 192, 138, 3))
     self.expdims = P.ExpandDims()
     self.concat = P.Concat(axis=1)
     self.gather = P.GatherV2()
     self.mul = P.Mul()
     self.slice = P.Slice()
示例#14
0
 def __init__(self):
     super(Processing, self).__init__()
     self.slice = P.Slice()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.batchmat = nn.MatMul()
     self.split = P.Split(1, 3)
     self.concat = P.Concat(axis=1)
     slice_64 = Tensor(np.hstack((np.identity(64), np.zeros([64, 128]))))
     slice_128 = Tensor(np.hstack((np.zeros([128, 64]), np.identity(128))))
     self.slice_0 = [slice_64, slice_128]
     slice_46 = Tensor(np.hstack((np.identity(46), np.zeros([46, 92]))))
     slice_92 = Tensor(np.hstack((np.zeros([92, 46]), np.identity(92))))
     self.slice_1 = [slice_46, slice_92]
     slice_2 = np.vstack((np.identity(1), np.zeros([3, 1])))
     self.slice_2 = Tensor(slice_2)
示例#15
0
 def __init__(self,
              kernel_size=1,
              stride=1,
              pad_mode="valid"):
     super(AvgPool1d, self).__init__(kernel_size, stride, pad_mode)
     validator.check_value_type('kernel_size', kernel_size, [int], self.cls_name)
     validator.check_value_type('stride', stride, [int], self.cls_name)
     self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(), ['VALID', 'SAME'], self.cls_name)
     validator.check_integer("kernel_size", kernel_size, 1, Rel.GE, self.cls_name)
     validator.check_integer("stride", stride, 1, Rel.GE, self.cls_name)
     self.kernel_size = (1, kernel_size)
     self.stride = (1, stride)
     self.avg_pool = P.AvgPool(ksize=self.kernel_size,
                               strides=self.stride,
                               padding=self.pad_mode)
     self.shape = F.shape
     self.reduce_mean = P.ReduceMean(keep_dims=True)
     self.slice = P.Slice()
     self.expand = P.ExpandDims()
示例#16
0
 def __init__(self,
              weight,
              w2,
              begin,
              end,
              strategy1=None,
              strategy2=None,
              is_parameter=True):
     super().__init__()
     self.mul = P.Mul().shard(strategy1)
     self.slice = P.Slice().shard(strategy2)
     if is_parameter:
         self.weight = Parameter(weight, "w1")
     else:
         self.weight = weight
     self.mul2 = P.Mul()
     self.weight2 = Parameter(w2, "w2")
     self.begin = begin
     self.end = end
示例#17
0
    def get_pixel_value(self, img, x, y):
        """
        Utility function to get pixel value for coordinate
        vectors x and y from a  4D tensor image.

        Input
        -----
        - img: tensor of shape (B, H, W, C)
        - x: flattened tensor of shape (B*H*W,)
        - y: flattened tensor of shape (B*H*W,)

        Returns
        -------
        - output: tensor of shape (B, H, W, C)
        """
        shape = P.Shape()
        img_shape = shape(x)
        batch_size = img_shape[0]
        height = img_shape[1]
        width = img_shape[2]
        img[:, 0, :, :] = self.zero
        img[:, height - 1, :, :] = self.zero
        img[:, :, 0, :] = self.zero
        img[:, :, width - 1, :] = self.zero

        tile = P.Tile()
        batch_idx = P.Slice()(self.batch_idx, (0, 0, 0), (batch_size, 1, 1))
        b = tile(batch_idx, (1, height, width))

        expand_dims = P.ExpandDims()
        b = expand_dims(b, 3)
        x = expand_dims(x, 3)
        y = expand_dims(y, 3)

        concat = P.Concat(3)
        indices = concat((b, y, x))
        cast = P.Cast()
        indices = cast(indices, mindspore.int32)
        gather_nd = P.GatherNd()

        return cast(gather_nd(img, indices), mindspore.float32)
示例#18
0
    def __init__(self, vocab_len, word_len, num_classes, vec_length):
        super(TextCNN, self).__init__()
        self.vec_length = vec_length
        self.word_len = word_len
        self.num_classes = num_classes

        self.unsqueeze = P.ExpandDims()
        self.embedding = nn.Embedding(vocab_len,
                                      self.vec_length,
                                      embedding_table='normal')

        self.slice = P.Slice()
        self.layer1 = self.make_layer(kernel_height=3)
        self.layer2 = self.make_layer(kernel_height=4)
        self.layer3 = self.make_layer(kernel_height=5)

        self.concat = P.Concat(1)

        self.fc = nn.Dense(96 * 3, self.num_classes)
        self.drop = nn.Dropout(keep_prob=0.5)
        self.print = P.Print()
        self.reducemean = P.ReduceMax(keep_dims=False)
示例#19
0
    def __init__(self):
        super(ComputeDescriptor, self).__init__()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cast = P.Cast()
        self.rsum = P.ReduceSum()
        self.broadcastto = P.BroadcastTo((1, 192 * 138))
        self.broadcastto1 = P.BroadcastTo((1, 192, 138, 3))
        self.broadcastto2 = P.BroadcastTo((1, 192, 138, 3, 3))
        self.broadcastto3 = P.BroadcastTo((1, 192, 138, 4))
        self.broadcastto4 = P.BroadcastTo((1, 192, 138, 4, 3))

        self.expdims = P.ExpandDims()
        self.concat = P.Concat(axis=3)
        self.gather = P.GatherV2()
        self.mul = P.Mul()
        self.slice = P.Slice()
        self.square = P.Square()
        self.inv = P.Inv()
        self.sqrt = P.Sqrt()
        self.ones = P.OnesLike()
        self.eye = P.Eye()
示例#20
0
    def __init__(self,
                 embedding_size,
                 embedding_shape,
                 use_relative_positions=False,
                 use_token_type=False,
                 token_type_vocab_size=16,
                 use_one_hot_embeddings=False,
                 initializer_range=0.02,
                 max_position_embeddings=512,
                 dropout_prob=0.1):
        super(EmbeddingPostprocessor, self).__init__()
        self.use_token_type = use_token_type
        self.token_type_vocab_size = token_type_vocab_size
        self.use_one_hot_embeddings = use_one_hot_embeddings
        self.max_position_embeddings = max_position_embeddings
        self.embedding_table = Parameter(initializer
                                         (TruncatedNormal(initializer_range),
                                          [token_type_vocab_size,
                                           embedding_size]),
                                         name='embedding_table')

        self.shape_flat = (-1,)
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.1, mstype.float32)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.shape = tuple(embedding_shape)
        self.layernorm = nn.LayerNorm(embedding_size)
        self.dropout = nn.Dropout(1 - dropout_prob)
        self.gather = P.GatherV2()
        self.use_relative_positions = use_relative_positions
        self.slice = P.Slice()
        self.full_position_embeddings = Parameter(initializer
                                                  (TruncatedNormal(initializer_range),
                                                   [max_position_embeddings,
                                                    embedding_size]),
                                                  name='full_position_embeddings')
示例#21
0
 def __init__(self):
     super(SliceNet, self).__init__()
     self.slice = P.Slice()
示例#22
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A_inv = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)), name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)), name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.pad = P.Pad(((0, 24), (0, 24)))
        self.pad1 = P.Pad(((0, 8), (0, 8)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False)
        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False)
        self.fused_abs_max1 = P.CusFusedAbsMax1([1000, 1000])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(2048), mstype.float32)
        self.dampingG = Tensor(np.identity(1024), mstype.float32)
        self.add = P.TensorAdd()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
示例#23
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 data_format='NCHW',
                 has_bias=False,
                 weight_init='normal',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 bias_init='zeros'):
        self.thor = True
        ksizes = (1, kernel_size, kernel_size, 1)
        self.hw = kernel_size * kernel_size
        strides = (1, stride, stride, 1)
        kernel_size = twice(kernel_size)
        super(Conv2d_Thor, self).__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            pad_mode,
            padding,
            dilation,
            group,
            data_format,
            has_bias,
            weight_init,
            bias_init,
        )
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group
                               )

        self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.transpose02314 = P.CusTranspose02314()
        self.matrix_A_dim = self.in_channels * self.kernel_size[0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        self.matrix_A_device_shape, self.matrix_A_device_dim = caculate_device_shape(self.matrix_A_dim,
                                                                                     self.in_channels, True)
        self.matrix_G_device_shape, self.matrix_G_device_dim = caculate_device_shape(self.matrix_G_dim,
                                                                                     self.in_channels, False)
        self.matrix_A_device_temp_shape = (
            self.matrix_A_device_shape[0], self.matrix_A_device_shape[2], self.matrix_A_device_shape[1],
            self.matrix_A_device_shape[3])
        self.matrix_G_device_temp_shape = (
            self.matrix_G_device_shape[0], self.matrix_G_device_shape[2], self.matrix_G_device_shape[1],
            self.matrix_G_device_shape[3])
        self.matrix_A_inv = Parameter(
            Tensor(np.reshape(np.identity(self.matrix_A_device_dim).astype(np.float16), self.matrix_A_device_shape)),
            name='matrix_A_inv', requires_grad=False)
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32), name="A_inv_max", requires_grad=False)
        self.matrix_G_inv = Parameter(
            Tensor(np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape)),
            name="matrix_G_inv", requires_grad=False)

        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32), name="G_inv_max", requires_grad=False)
        self.fake_G = Tensor(
            np.reshape(np.identity(self.matrix_G_device_dim).astype(np.float16), self.matrix_G_device_shape))

        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.vector_matmul = P.CusBatchMatMul()
        self.diag_block_dim = 128
        self.channels_slice_flag = False
        if self.in_channels % C0 != 0:
            self.channels_slice_flag = True

        self.padA_flag = False
        if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \
                and self.matrix_A_dim > self.diag_block_dim:
            self.padA_flag = True
            pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim
            self.padA = P.Pad(((0, pad_dim), (0, pad_dim)))
        self.device_shape_pad_flag = False
        if self.matrix_A_dim != self.matrix_A_device_dim:
            self.device_shape_pad_flag = True
            self.device_shape_pad = P.Pad(((0, 0), (0, C0 - self.in_channels), (0, 0), (0, C0 - self.in_channels)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.freq = Tensor(frequency, mstype.int32)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.axis = 0

        dampingA_dim = self.matrix_A_dim
        if (self.matrix_A_dim % self.diag_block_dim) != 0 and self.matrix_A_dim > self.diag_block_dim:
            dampingA_dim = (self.matrix_A_dim // self.diag_block_dim + 1) * self.diag_block_dim
        dampingG_dim = self.matrix_G_dim
        if (self.matrix_G_dim % self.diag_block_dim) != 0 and self.matrix_G_dim > self.diag_block_dim:
            dampingG_dim = (self.matrix_G_dim // self.diag_block_dim + 1) * self.diag_block_dim

        self.dampingA = Tensor(np.identity(dampingA_dim), mstype.float32)
        self.dampingG = Tensor(np.identity(dampingG_dim), mstype.float32)
        self.fused_abs_max1 = P.CusFusedAbsMax1([self.matrix_A_dim, self.matrix_A_dim])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
示例#24
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.thor = True
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float32)),
                                  name='matrix_A',
                                  requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            if out_channels == 1001:
                self.matrix_G = Parameter(Tensor(
                    np.zeros([1024, 1024]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
                self.pad = P.Pad(((0, 23), (0, 23)))
                self.pad1 = P.Pad(((0, 7), (0, 7)))
                self.slice = P.Slice()
                self.add = P.TensorAdd()
            else:
                self.matrix_G = Parameter(Tensor(
                    np.eye(out_channels).astype(np.float32)),
                                          name="matrix_G",
                                          requires_grad=False)
                self.abs = P.Abs()
                self.reduce_max = P.ReduceMax(keep_dims=False)
                self.neg = P.Neg()
                self.reduce_sum = P.ReduceSum()
            self.matmul = P.MatMul(transpose_b=True)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.cast = P.Cast()
            self.is_nsp_layer = (out_channels == 2)
        else:
            self.is_Ascend = False
            self.matrix_G = Parameter(Tensor(
                np.eye(out_channels).astype(np.float32)),
                                      name="matrix_G",
                                      requires_grad=False)
            self.cube_matmul = P.MatMul(transpose_a=True)
        self.getG = P.InsertGradientOf(self.save_gradient)
示例#25
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros'):
        kernel_size = twice(kernel_size)
        stride = twice(stride)
        self._dilation = dilation
        dilation = twice(dilation)
        super(Conv2d_Thor,
              self).__init__(in_channels, out_channels, kernel_size, stride,
                             pad_mode, padding, dilation, group, has_bias,
                             weight_init, bias_init)
        self.conv2d = P.Conv2D(out_channel=self.out_channels,
                               kernel_size=self.kernel_size,
                               mode=1,
                               pad_mode=self.pad_mode,
                               pad=self.padding,
                               stride=self.stride,
                               dilation=self.dilation,
                               group=self.group)
        self._init_depthwise_conv2d(weight_init)
        self.bias_add = P.BiasAdd()

        self.thor = True
        self.hw = kernel_size[0] * kernel_size[1]
        self.matrix_A_dim = self.in_channels * self.kernel_size[
            0] * self.kernel_size[1]
        self.matrix_G_dim = self.out_channels
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.A_normalizer = Parameter(initializer(0, [1], mstype.float32),
                                      name="A_normalizer",
                                      requires_grad=False)
        self.G_normalizer = Parameter(initializer(0, [1], mstype.float32),
                                      name="G_normalizer",
                                      requires_grad=False)
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            ksizes = (1, kernel_size[0], kernel_size[1], 1)
            strides = (1, stride[0], stride[1], 1)
            self.img2col = P.CusImg2Col(ksizes=ksizes, strides=strides)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.transpose02314 = P.CusTranspose02314()
            dampingA_dim = self.matrix_A_dim
            self.diag_block_dim = 128
            if (self.matrix_A_dim % self.diag_block_dim
                ) != 0 and self.matrix_A_dim > self.diag_block_dim:
                dampingA_dim = (self.matrix_A_dim // self.diag_block_dim +
                                1) * self.diag_block_dim
            dampingG_dim = self.matrix_G_dim
            if (self.matrix_G_dim % self.diag_block_dim
                ) != 0 and self.matrix_G_dim > self.diag_block_dim:
                dampingG_dim = (self.matrix_G_dim // self.diag_block_dim +
                                1) * self.diag_block_dim
            self.matrix_A_cov = Parameter(Tensor(
                np.zeros([dampingA_dim, dampingA_dim]).astype(np.float32)),
                                          name='matrix_A',
                                          requires_grad=False)
            self.matrix_G_cov = Parameter(Tensor(
                np.zeros([dampingG_dim, dampingG_dim]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)

            self.channels_slice_flag = False
            self.C0 = 16
            if self.in_channels % self.C0 != 0:
                self.channels_slice_flag = True
            self.padA_flag = False
            if (self.matrix_A_dim // self.diag_block_dim) * self.diag_block_dim != self.matrix_A_dim \
                    and self.matrix_A_dim > self.diag_block_dim:
                self.padA_flag = True
                pad_dim = self.diag_block_dim - self.matrix_A_dim % self.diag_block_dim
                self.padA = P.Pad(((0, pad_dim), (0, pad_dim)))
            self.slice = P.Slice()
        else:
            self.is_Ascend = False
            self.img2col = P.Im2Col(kernel_size=kernel_size,
                                    stride=stride,
                                    pad_mode="same")
            self.matmul = P.MatMul(transpose_b=True)
            self.reduce_mean = P.ReduceMean(keep_dims=False)
            self.matrix_A_cov = Parameter(Tensor(
                np.zeros([self.matrix_A_dim,
                          self.matrix_A_dim]).astype(np.float32)),
                                          name='matrix_A',
                                          requires_grad=False)
            self.matrix_G_cov = Parameter(Tensor(
                np.zeros([self.matrix_G_dim,
                          self.matrix_G_dim]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
        self.getG = P.InsertGradientOf(self.save_gradient)
示例#26
0
     'block': G.MaximumGrad(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5], [2, 3, 3, 5]],
     'skip': ['backward']}),
 ('MinimumGrad', {
     'block': G.MinimumGrad(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5], [2, 3, 3, 5]],
     'skip': ['backward']}),
 ('StridedSlice', {
     'block': P.StridedSlice(),
     'desc_const': [(0, 1, 2, 1),
               (2, 3, 3, 4),
               (1, 1, 1, 1)],
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[2, 2, 1, 3]]}),
 ('Slice_1', {
     'block': P.Slice(),
     'desc_const': [(0, 1, 2, 1),
                     (1, 1, 1, 2)],
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[1, 1, 1, 2]]}),
 ('StridedSliceGrad', {
     'block': G.StridedSliceGrad(),
     'desc_const': [(64, 1, 1024),
               (0, 1, 0),
               (64, 2, 1024),
               (1, 1, 1)],
     'desc_inputs': [[64, 128, 1024]],
     'skip': ['backward']}),
 ('RandomChoiceWithMask', {
     'block': P.RandomChoiceWithMask(256),
     'desc_inputs': [Tensor(np.random.rand(24000, 4).astype(np.bool_))],
示例#27
0
    def multiclass_nms(self, boxes_all, scores_all, mask_all):
        """Multiscale postprocessing."""
        all_bboxes = ()
        all_labels = ()
        all_masks = ()

        for i in range(self.test_batch_size):
            bboxes = boxes_all[i]
            scores = scores_all[i]
            masks = self.cast(mask_all[i], mstype.bool_)

            res_boxes_tuple = ()
            res_labels_tuple = ()
            res_masks_tuple = ()

            for j in range(self.num_classes - 1):
                k = j + 1
                _cls_scores = scores[::, k:k + 1:1]
                _bboxes = self.squeeze(bboxes[k])
                _mask_o = self.reshape(masks, (self.rpn_max_num, 1))

                cls_mask = self.greater(_cls_scores, self.test_score_thresh)
                _mask = self.logicand(_mask_o, cls_mask)

                _reg_mask = self.cast(
                    self.tile(self.cast(_mask, mstype.int32), (1, 4)),
                    mstype.bool_)

                _bboxes = self.select(_reg_mask, _bboxes, self.test_box_zeros)
                _cls_scores = self.select(_mask, _cls_scores,
                                          self.test_score_zeros)
                __cls_scores = self.squeeze(_cls_scores)
                scores_sorted, topk_inds = self.test_topk(
                    __cls_scores, self.rpn_max_num)
                topk_inds = self.reshape(topk_inds, (self.rpn_max_num, 1))
                scores_sorted = self.reshape(scores_sorted,
                                             (self.rpn_max_num, 1))
                _bboxes_sorted = self.gather(_bboxes, topk_inds)
                _mask_sorted = self.gather(_mask, topk_inds)

                scores_sorted = self.tile(scores_sorted, (1, 4))
                cls_dets = self.concat_1((_bboxes_sorted, scores_sorted))
                cls_dets = P.Slice()(cls_dets, (0, 0), (self.rpn_max_num, 5))

                cls_dets, _index, _mask_nms = self.nms_test(cls_dets)
                _index = self.reshape(_index, (self.rpn_max_num, 1))
                _mask_nms = self.reshape(_mask_nms, (self.rpn_max_num, 1))

                _mask_n = self.gather(_mask_sorted, _index)

                _mask_n = self.logicand(_mask_n, _mask_nms)
                cls_labels = self.oneslike(_index) * j
                res_boxes_tuple += (cls_dets, )
                res_labels_tuple += (cls_labels, )
                res_masks_tuple += (_mask_n, )

            res_boxes_start = self.concat(res_boxes_tuple[:self.concat_start])
            res_labels_start = self.concat(
                res_labels_tuple[:self.concat_start])
            res_masks_start = self.concat(res_masks_tuple[:self.concat_start])

            res_boxes_end = self.concat(
                res_boxes_tuple[self.concat_start:self.concat_end])
            res_labels_end = self.concat(
                res_labels_tuple[self.concat_start:self.concat_end])
            res_masks_end = self.concat(
                res_masks_tuple[self.concat_start:self.concat_end])

            res_boxes = self.concat((res_boxes_start, res_boxes_end))
            res_labels = self.concat((res_labels_start, res_labels_end))
            res_masks = self.concat((res_masks_start, res_masks_end))

            reshape_size = (self.num_classes - 1) * self.rpn_max_num
            res_boxes = self.reshape(res_boxes, (1, reshape_size, 5))
            res_labels = self.reshape(res_labels, (1, reshape_size, 1))
            res_masks = self.reshape(res_masks, (1, reshape_size, 1))

            all_bboxes += (res_boxes, )
            all_labels += (res_labels, )
            all_masks += (res_masks, )

        all_bboxes = self.concat(all_bboxes)
        all_labels = self.concat(all_labels)
        all_masks = self.concat(all_masks)
        return all_bboxes, all_labels, all_masks
示例#28
0
 def __init__(self, begin, size):
     super(Slice5, self).__init__()
     self.relu = nn.ReLU()
     self.slice = P.Slice()
     self.begin = begin
     self.size = size
示例#29
0
    def __init__(self, block, layer_nums, in_channels, channels, out_channels,
                 strides, num_classes, is_train):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )

        self.ha3 = HardAttn(2048)
        self.is_train = is_train
        self.conv1 = _conv7x7(3, 64, stride=2)
        self.bn1 = _bn(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       channel=channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       channel=channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       channel=channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       channel=channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])

        self.max = P.ReduceMax(keep_dims=True)
        self.flatten = nn.Flatten()
        self.global_bn = _bn2_kaiming(out_channels[3])
        self.partial_bn = _bn2_kaiming(out_channels[3])
        normal = Normal(0.001)
        self.global_fc = nn.Dense(out_channels[3],
                                  num_classes,
                                  has_bias=False,
                                  weight_init=normal,
                                  bias_init='zeros')
        self.partial_fc = nn.Dense(out_channels[3],
                                   num_classes,
                                   has_bias=False,
                                   weight_init=normal,
                                   bias_init='zeros')
        self.theta_0 = Tensor(np.zeros((128, 4)), mindspore.float32)
        self.theta_6 = Tensor(np.zeros((128, 4)) + 0.6, mindspore.float32)
        self.STN = STN(128, 128)
        self.concat = P.Concat(axis=1)
        self.shape = P.Shape()
        self.tanh = P.Tanh()
        self.slice = P.Slice()
        self.split = P.Split(1, 4)
示例#30
0
    def __init__(self, weight, vocab_size, cell, batch_size):
        super(textrcnn, self).__init__()
        self.num_hiddens = 512
        self.embed_size = 300
        self.num_classes = 2
        self.batch_size = batch_size
        k = (1 / self.num_hiddens)**0.5

        self.embedding = nn.Embedding(vocab_size,
                                      self.embed_size,
                                      embedding_table=weight)
        self.embedding.embedding_table.requires_grad = False
        self.cell = cell

        self.cast = P.Cast()

        self.h1 = Tensor(
            np.zeros(shape=(self.batch_size,
                            self.num_hiddens)).astype(np.float16))
        self.c1 = Tensor(
            np.zeros(shape=(self.batch_size,
                            self.num_hiddens)).astype(np.float16))

        if cell == "lstm":
            self.lstm = P.DynamicRNN(forget_bias=0.0)
            self.w1_fw = Parameter(np.random.uniform(
                -k, k, (self.embed_size + self.num_hiddens,
                        4 * self.num_hiddens)).astype(np.float16),
                                   name="w1_fw")
            self.b1_fw = Parameter(np.random.uniform(
                -k, k, (4 * self.num_hiddens)).astype(np.float16),
                                   name="b1_fw")
            self.w1_bw = Parameter(np.random.uniform(
                -k, k, (self.embed_size + self.num_hiddens,
                        4 * self.num_hiddens)).astype(np.float16),
                                   name="w1_bw")
            self.b1_bw = Parameter(np.random.uniform(
                -k, k, (4 * self.num_hiddens)).astype(np.float16),
                                   name="b1_bw")
            self.h1 = Tensor(
                np.zeros(shape=(1, self.batch_size,
                                self.num_hiddens)).astype(np.float16))
            self.c1 = Tensor(
                np.zeros(shape=(1, self.batch_size,
                                self.num_hiddens)).astype(np.float16))

        if cell == "vanilla":
            self.rnnW_fw = nn.Dense(self.num_hiddens, self.num_hiddens)
            self.rnnU_fw = nn.Dense(self.embed_size, self.num_hiddens)
            self.rnnW_bw = nn.Dense(self.num_hiddens, self.num_hiddens)
            self.rnnU_bw = nn.Dense(self.embed_size, self.num_hiddens)

        if cell == "gru":
            self.rnnWr_fw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWz_fw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWh_fw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWr_bw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWz_bw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWh_bw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.ones = Tensor(
                np.ones(shape=(self.batch_size,
                               self.num_hiddens)).astype(np.float16))
            self.rnnWr_fw.to_float(mstype.float16)
            self.rnnWz_fw.to_float(mstype.float16)
            self.rnnWh_fw.to_float(mstype.float16)
            self.rnnWr_bw.to_float(mstype.float16)
            self.rnnWz_bw.to_float(mstype.float16)
            self.rnnWh_bw.to_float(mstype.float16)

        self.transpose = P.Transpose()
        self.reduce_max = P.ReduceMax()
        self.expand_dims = P.ExpandDims()
        self.concat = P.Concat()

        self.reshape = P.Reshape()
        self.left_pad_tensor = Tensor(
            np.zeros(
                (1, self.batch_size, self.num_hiddens)).astype(np.float16))
        self.right_pad_tensor = Tensor(
            np.zeros(
                (1, self.batch_size, self.num_hiddens)).astype(np.float16))
        self.output_dense = nn.Dense(self.num_hiddens * 1, 2)
        self.concat0 = P.Concat(0)
        self.concat2 = P.Concat(2)
        self.concat1 = P.Concat(1)
        self.text_rep_dense = nn.Dense(2 * self.num_hiddens + self.embed_size,
                                       self.num_hiddens)
        self.mydense = nn.Dense(self.num_hiddens, 2)
        self.drop_out = nn.Dropout(keep_prob=0.7)
        self.tanh = P.Tanh()
        self.sigmoid = P.Sigmoid()
        self.slice = P.Slice()
        self.text_rep_dense.to_float(mstype.float16)
        self.mydense.to_float(mstype.float16)
        self.output_dense.to_float(mstype.float16)