示例#1
0
 def __init__(self,
              batch_size,
              conv_out_dim,
              encoder_hidden_size,
              decoder_hidden_size,
              decoder_output_size,
              max_length,
              dropout_p=0.1):
     super(AttentionOCR, self).__init__()
     self.encoder = Encoder(batch_size=batch_size,
                            conv_out_dim=conv_out_dim,
                            hidden_size=encoder_hidden_size)
     self.decoder = Decoder(hidden_size=decoder_hidden_size,
                            output_size=decoder_output_size,
                            max_length=max_length,
                            dropout_p=dropout_p)
     self.init_decoder_hidden = Tensor(
         np.zeros((1, batch_size, decoder_hidden_size), dtype=np.float16),
         mstype.float16)
     self.shape = P.Shape()
     self.split = P.Split(axis=1, output_num=max_length)
     self.concat = P.Concat()
     self.expand_dims = P.ExpandDims()
     self.argmax = P.Argmax()
     self.select = P.Select()
示例#2
0
 def __init__(self):
     super(BoundingBoxEncode, self).__init__()
     self.split = P.Split(axis=1, output_num=4)
     self.ones = 1.0
     self.half = 0.5
     self.log = P.Log()
     self.concat = P.Concat(axis=1)
示例#3
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              pad_mode="pad",
              pad=0,
              groups=1,
              has_bias=False):
     super(GroupConv, self).__init__()
     assert in_channels % groups == 0 and out_channels % groups == 0
     self.groups = groups
     self.convs = nn.CellList()
     self.op_split = P.Split(axis=1, output_num=self.groups)
     self.op_concat = P.Concat(axis=1)
     self.cast = P.Cast()
     for _ in range(groups):
         self.convs.append(
             nn.Conv2d(in_channels // groups,
                       out_channels // groups,
                       kernel_size=kernel_size,
                       stride=stride,
                       has_bias=has_bias,
                       padding=pad,
                       pad_mode=pad_mode,
                       group=1,
                       weight_init='xavier_uniform'))
示例#4
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 use_sigmoid_cls,
                 target_means=(.0, .0, .0, .0),
                 target_stds=(1.0, 1.0, 1.0, 1.0)
                 ):
        super(Proposal, self).__init__()
        cfg = config
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.target_means = target_means
        self.target_stds = target_stds
        self.use_sigmoid_cls = config.use_sigmoid_cls

        if self.use_sigmoid_cls:
            self.cls_out_channels = 1
            self.activation = P.Sigmoid()
            self.reshape_shape = (-1, 1)
        else:
            self.cls_out_channels = num_classes
            self.activation = P.Softmax(axis=1)
            self.reshape_shape = (-1, 2)

        if self.cls_out_channels <= 0:
            raise ValueError('num_classes={} is too small'.format(num_classes))

        self.num_pre = cfg.rpn_proposal_nms_pre
        self.min_box_size = cfg.rpn_proposal_min_bbox_size
        self.nms_thr = cfg.rpn_proposal_nms_thr
        self.nms_post = cfg.rpn_proposal_nms_post
        self.nms_across_levels = cfg.rpn_proposal_nms_across_levels
        self.max_num = cfg.rpn_proposal_max_num

        # Op Define
        self.squeeze = P.Squeeze()
        self.reshape = P.Reshape()
        self.cast = P.Cast()

        self.feature_shapes = cfg.feature_shapes

        self.transpose_shape = (1, 2, 0)

        self.decode = BoundingBoxDecode()

        self.nms = P.NMSWithMask(self.nms_thr)
        self.concat_axis0 = P.Concat(axis=0)
        self.concat_axis1 = P.Concat(axis=1)
        self.split = P.Split(axis=1, output_num=5)
        self.min = P.Minimum()
        self.gatherND = P.GatherNd()
        self.slice = P.Slice()
        self.select = P.Select()
        self.greater = P.Greater()
        self.transpose = P.Transpose()
        self.tile = P.Tile()
        self.set_train_local(config, training=True)

        self.multi_10 = Tensor(10.0, mstype.float16)
示例#5
0
    def __init__(self, config, scale=1.0, layer_idx=None):
        super(Attention, self).__init__()
        self.get_attention_mask = AttentionMask(config)
        self.projection = Mapping(config.embedding_size, config.embedding_size,
                                  config.compute_dtype, scale)
        self.split = P.Split(axis=-1, output_num=3)
        self.transpose = P.Transpose()
        self.reshape = P.Reshape()
        self.n_head = config.num_heads
        self.size_per_head = config.embedding_size // self.n_head
        self.concat_k = P.Concat(axis=3)
        self.concat_v = P.Concat(axis=2)
        self.multiply_data = Tensor([
            -10000.0,
        ], dtype=mstype.float32)
        self.batch_matmul = P.BatchMatMul()
        self.scale = scale
        if self.scale:
            self.scale_factor = Tensor(math.sqrt(self.size_per_head))
        if layer_idx is not None:
            self.coeff = math.sqrt(layer_idx * math.sqrt(self.size_per_head))
            self.coeff = Tensor(self.coeff)
        self.use_past = config.use_past
        self.dropout = nn.Dropout(1 - config.dropout_rate)
        self.prob_dropout = nn.Dropout(1 - config.dropout_rate)

        self.dense1 = nn.Dense(config.embedding_size,
                               config.embedding_size).to_float(
                                   config.compute_dtype)
        self.dense2 = nn.Dense(config.embedding_size,
                               config.embedding_size).to_float(
                                   config.compute_dtype)
        self.dense3 = nn.Dense(config.embedding_size,
                               config.embedding_size).to_float(
                                   config.compute_dtype)
示例#6
0
    def __init__(self, network, optimizer, sens=1.0, micro_batches=None, norm_clip=1.0, mech=None):
        super(_TrainOneStepCell, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.network.add_flags(defer_inline=True)
        self.weights = optimizer.parameters
        self.optimizer = optimizer
        self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
        self.sens = sens
        self.reducer_flag = False
        self.grad_reducer = None
        parallel_mode = _get_parallel_mode()
        if parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL):
            self.reducer_flag = True
        if self.reducer_flag:
            mean = _get_mirror_mean()
            degree = _get_device_num()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)

        # dp params
        self._micro_batches = micro_batches
        norm_clip = check_param_type('norm_clip', norm_clip, float)
        self._l2_norm = check_value_positive('norm_clip', norm_clip)
        self._split = P.Split(0, self._micro_batches)
        self._clip_by_global_norm = _ClipGradients()
        self._mech = mech
        self._tuple_add = _TupleAdd()
        self._hyper_map = C.HyperMap()
        self._micro_float = Tensor(micro_batches, mstype.float32)
示例#7
0
def test_out_int64():
    op = P.Split(5, 2)
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(
        np.arange(192).astype(np.int64).reshape((2, 2, 2, 2, 2, 6)))
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, 0, :], [0, 1, 2])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, 0, :], [3, 4, 5])

    op = P.Split(5, 3)
    op_wrapper = OpNetWrapper(op)
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[1, 0, 0, 0, 0, :], [96, 97])
    assert np.allclose(outputs[1].asnumpy()[1, 0, 0, 0, 0, :], [98, 99])
    assert np.allclose(outputs[2].asnumpy()[1, 0, 0, 0, 0, :], [100, 101])
示例#8
0
def test_out_float32():
    op = P.Split(5, 2)
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(
        np.arange(192).astype(np.float32).reshape((2, 2, 2, 2, 2, 6)))
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, 0, :], [0., 1., 2.])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, 0, :], [3., 4., 5.])

    op = P.Split(5, 3)
    op_wrapper = OpNetWrapper(op)
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, 0, :], [0., 1.])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, 0, :], [2., 3.])
    assert np.allclose(outputs[2].asnumpy()[0, 0, 0, 0, 0, :], [4., 5.])
示例#9
0
文件: bgcf.py 项目: yrpang/mindspore
    def __init__(self, dataset_argv, architect_argv, activation,
                 neigh_drop_rate, num_user, num_item, input_dim):
        super(BGCF, self).__init__()

        self.user_embed = Parameter(
            initializer("XavierUniform", [num_user, input_dim],
                        dtype=mstype.float32))
        self.item_embed = Parameter(
            initializer("XavierUniform", [num_item, input_dim],
                        dtype=mstype.float32))
        self.cast = P.Cast()
        self.tanh = P.Tanh()
        self.shape = P.Shape()
        self.split = P.Split(0, 2)
        self.gather = P.Gather()
        self.reshape = P.Reshape()
        self.concat_0 = P.Concat(0)
        self.concat_1 = P.Concat(1)

        (self.input_dim, self.num_user, self.num_item) = dataset_argv
        self.layer_dim = architect_argv

        self.gnew_agg_mean = MeanConv(self.input_dim,
                                      self.layer_dim,
                                      activation=activation,
                                      dropout=neigh_drop_rate[1])
        self.gnew_agg_mean.to_float(mstype.float16)

        self.gnew_agg_user = AttenConv(self.input_dim,
                                       self.layer_dim,
                                       dropout=neigh_drop_rate[2])
        self.gnew_agg_user.to_float(mstype.float16)

        self.gnew_agg_item = AttenConv(self.input_dim,
                                       self.layer_dim,
                                       dropout=neigh_drop_rate[2])
        self.gnew_agg_item.to_float(mstype.float16)

        self.user_feature_dim = self.input_dim
        self.item_feature_dim = self.input_dim

        self.final_weight = Parameter(
            initializer("XavierUniform",
                        [self.input_dim * 3, self.input_dim * 3],
                        dtype=mstype.float32))

        self.raw_agg_funcs_user = MeanConv(self.input_dim,
                                           self.layer_dim,
                                           activation=activation,
                                           dropout=neigh_drop_rate[0])
        self.raw_agg_funcs_user.to_float(mstype.float16)

        self.raw_agg_funcs_item = MeanConv(self.input_dim,
                                           self.layer_dim,
                                           activation=activation,
                                           dropout=neigh_drop_rate[0])
        self.raw_agg_funcs_item.to_float(mstype.float16)
示例#10
0
    def __init__(self):
        super(EAST, self).__init__()

        #param
        self.TEXT_SCALE = 512
        self.pi = math.pi / 1.
        self.pi = mindspore.Tensor([self.pi], mindspore.float32)

        #network
        self.model = vgg.vgg16()
        
        #for i = 0
        self.split = P.Split(1, 2)
        self.unpool0 = unpool((32, 32))
        self._concat = P.Concat(axis=1)

        #for i = 1
        self.concat1 = P.Concat(axis=1)
        self.conv1_1 = ops.conv_bn_relu(1024, 128, stride=1, kernel_size=1, padding='valid')
        self.conv1_2 = ops.conv_bn_relu(128, 128, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.unpool1 = unpool((64, 64))

        #for i = 2
        self.concat2 = P.Concat(axis=1)
        self.conv2_1 = ops.conv_bn_relu(384, 64, stride=1, kernel_size=1, padding='valid')
        self.conv2_2 = ops.conv_bn_relu(64, 64, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.unpool2 = unpool((128, 128))


        #for i = 3
        self.concat3 = P.Concat(axis=1)
        self.conv3_1 = ops.conv_bn_relu(192, 32, stride=1, kernel_size=1, padding='valid')
        self.conv3_2 = ops.conv_bn_relu(32, 32, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.conv3_3 = ops.conv_bn_relu(32, 32, stride=1, kernel_size=3, padding='pad', padding_number=1)


        #output
        ## F_score
        self.conv_for_fscore = ops._conv(32, 1, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_fscore = nn.Sigmoid()

        ## geo_map
        self.conv_for_geo_map = ops._conv(32, 4, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_geo_map = nn.Sigmoid()

        ## angle_map
        self.conv_for_angle_map = ops._conv(32, 1, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_angle_map = nn.Sigmoid()

        ## F_geometry 
        self.concat_for_F_geometry  = P.Concat(axis=1)


        ## other
        self.mul = P.Mul()
        self.add = P.TensorAdd()
示例#11
0
    def __init__(self,
                 batch_size=512,
                 d_model=768,
                 seq_length=1024,
                 num_attention_heads=12,
                 dim_per_head=64,
                 has_attention_mask=True,
                 do_return_2d_tensor=True,
                 attention_dropout=0.0,
                 compute_type=mstype.float32):
        super(MaskedSelfAttention, self).__init__()

        self.batch_size = batch_size
        self.d_model = d_model
        self.seq_length = seq_length
        self.num_heads = num_attention_heads
        self.dim_per_head = dim_per_head
        self.has_attention_mask = has_attention_mask
        assert has_attention_mask

        self.scale = Tensor([1.0 / math.sqrt(float(self.dim_per_head))],
                            dtype=compute_type)  # attention scale
        self.mask_data = Tensor([
            -10000.0,
        ], dtype=compute_type)
        self.split_head_shape = (self.batch_size, self.seq_length,
                                 self.num_heads, self.dim_per_head)

        self.c_attn = Conv1D(d_model, d_model * 3)
        self.c_proj = Conv1D(d_model, d_model)

        self.split_for_qkv = P.Split(1, 3)  # P.Split(axis, output_num)
        # self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.trans_shape = (0, 2, 1, 3)
        self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.matmul = P.BatchMatMul()
        self.multiply = P.Mul()

        if self.has_attention_mask:
            self.expand_dims = P.ExpandDims()
            self.sub = P.Sub()
            self.add = P.TensorAdd()
            self.cast = P.Cast()
            self.get_dtype = P.DType()

        if do_return_2d_tensor:
            self.shape_return = (batch_size * seq_length, d_model)
        else:
            self.shape_return = (batch_size, seq_length, d_model)

        self.softmax = nn.Softmax()
        self.softmax_cast = P.Cast()
        self.dropout = nn.Dropout(1 - attention_dropout)
        self.use_attention_dropout = attention_dropout > 0
示例#12
0
 def __init__(self, network, max_length):
     super(AttentionOCRWithLossCell, self).__init__()
     self.network = network
     self.loss = NLLLoss()
     self.shape = P.Shape()
     self.add = P.AddN()
     self.mean = P.ReduceMean()
     self.split = P.Split(axis=0, output_num=max_length)
     self.squeeze = P.Squeeze()
     self.cast = P.Cast()
示例#13
0
    def construct(self, input_data, target_features):
        """
        Calculate the inversion attack loss, which consists of three parts. Loss_1 is for evaluating the difference
        between the target deep representations and current representations; Loss_2 is for evaluating the continuity
        between adjacent pixels; Loss_3 is for regularization.

        Args:
            input_data (Tensor): The reconstructed image during inversion attack.
            target_features (Tensor): Deep representations of the original image.

        Returns:
            Tensor, inversion attack loss of the current iteration.
        """
        output = self._network(input_data)
        loss_1 = self._mse_loss(output, target_features) / self._mse_loss(
            target_features, self._zeros(target_features))

        data_shape = self._get_shape(input_data)
        if self._device_target == 'CPU':
            split_op_1 = P.Split(2, data_shape[2])
            split_op_2 = P.Split(3, data_shape[3])
            data_split_1 = split_op_1(input_data)
            data_split_2 = split_op_2(input_data)
            loss_2 = 0
            for i in range(1, data_shape[2]):
                loss_2 += self._mse_loss(data_split_1[i], data_split_1[i - 1])
            for j in range(1, data_shape[3]):
                loss_2 += self._mse_loss(data_split_2[j], data_split_2[j - 1])
        else:
            data_copy_1 = self._zeros(input_data)
            data_copy_2 = self._zeros(input_data)
            data_copy_1[:, :, :(data_shape[2] - 1), :] = input_data[:, :,
                                                                    1:, :]
            data_copy_2[:, :, :, :(data_shape[2] - 1)] = input_data[:, :, :,
                                                                    1:]
            loss_2 = self._mse_loss(input_data, data_copy_1) + self._mse_loss(
                input_data, data_copy_2)

        loss_3 = self._mse_loss(input_data, self._zeros(input_data))

        loss = loss_1 * self._weights[0] + loss_2 * self._weights[
            1] + loss_3 * self._weights[2]
        return loss
示例#14
0
 def __init__(self,
              mul_weight,
              axis=0,
              out_nums=1,
              strategy1=None,
              strategy2=None):
     super(Net2, self).__init__()
     self.split = P.Split(axis, out_nums).shard(strategy1)
     self.mul = P.Mul().shard(strategy2)
     self.weight = Parameter(mul_weight, "w1")
示例#15
0
def test_out1_axis0():
    op = P.Split(0, 1)
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(np.arange(24).astype(np.int32).reshape((2, 2, 6)))
    outputs = op_wrapper(input_x)

    print(outputs)
    assert outputs[0].shape == (2, 2, 6)
    assert np.allclose(outputs[0].asnumpy()[0, 0, :], [0, 1, 2, 3, 4, 5])
示例#16
0
def test_out_uint32():
    op = P.Split(-1, 2)
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(
        np.arange(320).astype(np.uint32).reshape((2, 2, 2, 2, 2, 10)))
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, 0, :], [0, 1, 2, 3, 4])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, 0, :], [5, 6, 7, 8, 9])

    op = P.Split(-1, 5)
    op_wrapper = OpNetWrapper(op)
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[1, 1, 1, 1, 1, :], [310, 311])
    assert np.allclose(outputs[1].asnumpy()[1, 1, 1, 1, 1, :], [312, 313])
    assert np.allclose(outputs[2].asnumpy()[1, 1, 1, 1, 1, :], [314, 315])
    assert np.allclose(outputs[3].asnumpy()[1, 1, 1, 1, 1, :], [316, 317])
    assert np.allclose(outputs[4].asnumpy()[1, 1, 1, 1, 1, :], [318, 319])

    op = P.Split(-2, 2)
    op_wrapper = OpNetWrapper(op)
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, :, 0], [0])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, :, 1], [11])
    assert np.allclose(outputs[0].asnumpy()[1, 0, 0, 0, :, 2], [162])
    assert np.allclose(outputs[1].asnumpy()[1, 0, 0, 0, :, 3], [173])
    assert np.allclose(outputs[0].asnumpy()[1, 1, 0, 0, :, 4], [244])
    assert np.allclose(outputs[1].asnumpy()[1, 1, 0, 0, :, 5], [255])
    assert np.allclose(outputs[0].asnumpy()[1, 1, 1, 0, :, 6], [286])
    assert np.allclose(outputs[1].asnumpy()[1, 1, 1, 0, :, 7], [297])
    assert np.allclose(outputs[0].asnumpy()[1, 1, 1, 1, :, 8], [308])
    assert np.allclose(outputs[1].asnumpy()[1, 1, 1, 1, :, 9], [319])

    op = P.Split(-1, 1)
    op_wrapper = OpNetWrapper(op)
    input_x = Tensor(np.arange(1).astype(np.uint32))
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy(), [0])
示例#17
0
 def __init__(self, batch_size, conv_out_dim, hidden_size):
     super(Encoder, self).__init__()
     self.cnn = CNN(int(conv_out_dim / 4))
     self.lstm1 = BidirectionalLSTM(batch_size, conv_out_dim, hidden_size,
                                    hidden_size).to_float(mstype.float16)
     self.lstm2 = BidirectionalLSTM(batch_size, hidden_size, hidden_size,
                                    hidden_size).to_float(mstype.float16)
     self.transpose = P.Transpose()
     self.cast = P.Cast()
     self.split = P.Split(axis=3, output_num=4)
     self.concat = P.Concat(axis=1)
示例#18
0
    def test_mode_init(self, config):
        self.test_batch_size = config.test_batch_size
        self.split = P.Split(axis=0, output_num=self.test_batch_size)
        self.split_shape = P.Split(axis=0, output_num=4)
        self.split_scores = P.Split(axis=1, output_num=self.num_classes)
        self.split_cls = P.Split(axis=0, output_num=self.num_classes - 1)
        self.tile = P.Tile()
        self.gather = P.GatherNd()

        self.rpn_max_num = config.rpn_max_num

        self.zeros_for_nms = Tensor(
            np.zeros((self.rpn_max_num, 3)).astype(self.dtype))
        self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool)
        self.zeros_mask = np.zeros((self.rpn_max_num, 1)).astype(np.bool)
        self.bbox_mask = Tensor(
            np.concatenate((self.ones_mask, self.zeros_mask, self.ones_mask,
                            self.zeros_mask),
                           axis=1))
        self.nms_pad_mask = Tensor(
            np.concatenate((self.ones_mask, self.ones_mask, self.ones_mask,
                            self.ones_mask, self.zeros_mask),
                           axis=1))

        self.test_score_thresh = Tensor(
            np.ones((self.rpn_max_num, 1)).astype(self.dtype) *
            config.test_score_thr)
        self.test_score_zeros = Tensor(
            np.ones((self.rpn_max_num, 1)).astype(self.dtype) * 0)
        self.test_box_zeros = Tensor(
            np.ones((self.rpn_max_num, 4)).astype(self.dtype) * -1)
        self.test_iou_thr = Tensor(
            np.ones((self.rpn_max_num, 1)).astype(self.dtype) *
            config.test_iou_thr)
        self.test_max_per_img = config.test_max_per_img
        self.nms_test = P.NMSWithMask(config.test_iou_thr)
        self.softmax = P.Softmax(axis=1)
        self.logicand = P.LogicalAnd()
        self.oneslike = P.OnesLike()
        self.test_topk = P.TopK(sorted=True)
        self.test_num_proposal = self.test_batch_size * self.rpn_max_num
示例#19
0
 def __init__(self, weight_angle=10):
     super(LossFunc, self).__init__()
     self.split = P.Split(1, 5)
     self.min = P.Minimum()
     self.log = P.Log()
     self.cos = P.Cos()
     self.mean = P.ReduceMean()
     #self.flatten = P.Flatten()
     self.sum = P.ReduceSum()
     self.weight_angle = weight_angle
     self.max = P.Maximum()
     self.print = P.Print()
示例#20
0
def test_out2_axis1neg():
    op = P.Split(-1, 2)
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(np.arange(24).astype(np.float32).reshape((2, 2, 6)))
    outputs = op_wrapper(input_x)

    print(outputs)
    assert np.allclose(outputs[0].asnumpy()[0, :, :],
                       [[0., 1., 2.], [6., 7., 8.]])
    assert np.allclose(outputs[1].asnumpy()[0, :, :],
                       [[3., 4., 5.], [9., 10., 11.]])
示例#21
0
    def __init__(self, residual_channels=None, gate_channels=None, kernel_size=None, skip_out_channels=None, bias=True,
                 dropout=1 - 0.95, dilation=1, cin_channels=-1, gin_channels=-1, padding=None, causal=True):
        super(ResidualConv1dGLU, self).__init__()
        self.dropout = dropout
        self.dropout_op = nn.Dropout(keep_prob=1. - self.dropout)
        self.eval_split_op = P.Split(axis=-1, output_num=2)
        self.train_split_op = P.Split(axis=1, output_num=2)
        self.tanh = P.Tanh()
        self.sigmoid = P.Sigmoid()
        self.mul = P.Mul()
        self.add = P.TensorAdd()

        if skip_out_channels is None:
            skip_out_channels = residual_channels
        if padding is None:
            if causal:
                padding = (kernel_size - 1) * dilation
            else:
                padding = (kernel_size - 1) // 2 * dilation
        self.causal = causal

        self.conv = Conv1d(residual_channels, gate_channels, kernel_size, pad_mode='pad',
                           padding=padding, dilation=dilation, has_bias=bias)

        # local conditioning
        if cin_channels > 0:
            self.conv1x1c = Conv1d1x1(cin_channels, gate_channels, has_bias=False)
        else:
            self.conv1x1c = None

        # global conditioning
        if gin_channels > 0:
            self.conv1x1g = Conv1d(gin_channels, gate_channels, has_bias=False, kernel_size=1, dilation=1)
        else:
            self.conv1x1g = None

        gate_out_channels = gate_channels // 2
        self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, has_bias=bias)
        self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_out_channels, has_bias=bias)
        self.factor = math.sqrt(0.5)
示例#22
0
 def __init__(self,
              mul_weight,
              axis=0,
              out_nums=1,
              strategy1=None,
              strategy2=None,
              strategy3=None):
     super(Net, self).__init__()
     self.split = P.Split(axis, out_nums).shard(strategy1)
     self.mul = P.Mul().shard(strategy2)
     self.matmul = P.MatMul(transpose_b=True).shard(strategy2)
     self.matmul2 = P.MatMul().shard(strategy3)
     self.weight = Parameter(mul_weight, "w1")
示例#23
0
def test_out_float16():
    op = P.Split(-1, 2)
    op_wrapper = OpNetWrapper(op)

    input_x = Tensor(
        np.arange(320).astype(np.float16).reshape((2, 2, 2, 2, 2, 10)))
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, 0, :],
                       [0., 1., 2., 3., 4.])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, 0, :],
                       [5., 6., 7., 8., 9.])

    op = P.Split(-1, 5)
    op_wrapper = OpNetWrapper(op)
    outputs = op_wrapper(input_x)

    assert np.allclose(outputs[0].asnumpy()[0, 0, 0, 0, 0, :], [0., 1.])
    assert np.allclose(outputs[1].asnumpy()[0, 0, 0, 0, 0, :], [2., 3.])
    assert np.allclose(outputs[2].asnumpy()[0, 0, 0, 0, 0, :], [4., 5.])
    assert np.allclose(outputs[3].asnumpy()[0, 0, 0, 0, 0, :], [6., 7.])
    assert np.allclose(outputs[4].asnumpy()[0, 0, 0, 0, 0, :], [8., 9.])
示例#24
0
 def __init__(self, network):
     super(GRUWithLossCell, self).__init__()
     self.network = network
     self.loss = NLLLoss()
     self.logits_shape = (-1, config.src_vocab_size)
     self.reshape = P.Reshape()
     self.cast = P.Cast()
     self.mean = P.ReduceMean()
     self.text_len = config.max_length
     self.split = P.Split(axis=0, output_num=config.max_length-1)
     self.squeeze = P.Squeeze()
     self.add = P.AddN()
     self.transpose = P.Transpose()
     self.shape = P.Shape()
示例#25
0
    def __init__(self, network, optimizer, scale_update_cell=None, micro_batches=None, norm_clip=1.0, mech=None):
        super(_TrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.network.add_flags(defer_inline=True)
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True)
        self.hyper_map = C.HyperMap()
        if context.get_context("device_target") == "GPU":
            self.gpu_target = True
            self.float_status = P.FloatStatus()
            self.addn = P.AddN()
            self.reshape = P.Reshape()
        else:
            self.gpu_target = False
            self.alloc_status = NPUAllocFloatStatus()
            self.get_status = NPUGetFloatStatus()
            self.clear_status = NPUClearFloatStatus()
        self.reduce_sum = ReduceSum(keep_dims=False)
        self.base = Tensor(1, mstype.float32)
        self.less_equal = LessEqual()
        self.depend_parameter_use = ControlDepend(depend_mode=1)
        self.allreduce = P.AllReduce()
        self.parallel_mode = _get_parallel_mode()
        self.grad_reducer = F.identity
        self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]
        if self.reducer_flag:
            mean = _get_mirror_mean()
            degree = _get_device_num()
            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
        self.is_distributed = self.parallel_mode != ParallelMode.STAND_ALONE

        self.loss_scale = None
        self.loss_scaling_manager = scale_update_cell
        if scale_update_cell:
            self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
                                        name="loss_scale")
        self.add_flags(has_effect=True)

        # dp params
        self._micro_batches = micro_batches
        norm_clip = check_param_type('norm_clip', norm_clip, float)
        self._l2_norm = check_value_positive('norm_clip', norm_clip)
        self._split = P.Split(0, self._micro_batches)
        self._clip_by_global_norm = _ClipGradients()
        self._mech = mech
        self._tuple_add = _TupleAdd()
        self._hyper_map = C.HyperMap()
        self._micro_float = Tensor(micro_batches, mstype.float32)
示例#26
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 low_dims,
                 training_mode=True,
                 use_MLP=False):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")

        self.use_MLP = use_MLP
        self.training_mode = training_mode
        self.concat = P.Concat()
        self.split = P.Split(0, 3)
        self.l2norm = P.L2Normalize(axis=1)

        self.conv1 = _conv3x3(3, 64, stride=1)
        self.bn1 = _bn(64, training_mode)
        self.relu = nn.ReLU()

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(block.expansion * 512, low_dims)
        self.mlp_layer1 = _fc(block.expansion * 512, block.expansion * 512)
        self.mlp_layer2 = _fc(block.expansion * 512, low_dims)
示例#27
0
文件: dpn.py 项目: yrpang/mindspore
    def __init__(self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, G, key_stride):
        super(BottleBlock, self).__init__()
        self.G = G
        self.bn1 = nn.BatchNorm2d(in_chs, eps=1e-3, momentum=0.9)
        self.conv1 = nn.Conv2d(in_chs, num_1x1_a, 1, stride=1)
        self.bn2 = nn.BatchNorm2d(num_1x1_a, eps=1e-3, momentum=0.9)
        self.conv2 = nn.CellList()
        for _ in range(G):
            self.conv2.append(nn.Conv2d(num_1x1_a // G, num_3x3_b // G, 3, key_stride, pad_mode='pad', padding=1))
        self.bn3 = nn.BatchNorm2d(num_3x3_b, eps=1e-3, momentum=0.9)
        self.conv3_r = nn.Conv2d(num_3x3_b, num_1x1_c, 1, stride=1)
        self.conv3_d = nn.Conv2d(num_3x3_b, inc, 1, stride=1)

        self.relu = nn.ReLU()
        self.concat = F.Concat(axis=1)
        self.split = F.Split(axis=1, output_num=G)
示例#28
0
 def __init__(self):
     super(Processing, self).__init__()
     self.slice = P.Slice()
     self.reshape = P.Reshape()
     self.shape = P.Shape()
     self.batchmat = nn.MatMul()
     self.split = P.Split(1, 3)
     self.concat = P.Concat(axis=1)
     slice_64 = Tensor(np.hstack((np.identity(64), np.zeros([64, 128]))))
     slice_128 = Tensor(np.hstack((np.zeros([128, 64]), np.identity(128))))
     self.slice_0 = [slice_64, slice_128]
     slice_46 = Tensor(np.hstack((np.identity(46), np.zeros([46, 92]))))
     slice_92 = Tensor(np.hstack((np.zeros([92, 46]), np.identity(92))))
     self.slice_1 = [slice_46, slice_92]
     slice_2 = np.vstack((np.identity(1), np.zeros([3, 1])))
     self.slice_2 = Tensor(slice_2)
示例#29
0
    def construct(self, output, target, target_weight):
        batch_size = F.shape(output)[0]
        num_joints = F.shape(output)[1]

        split = P.Split(1, num_joints)
        heatmaps_pred = self.reshape(output, (batch_size, num_joints, -1))
        heatmaps_pred = split(heatmaps_pred)

        heatmaps_gt = self.reshape(target, (batch_size, num_joints, -1))
        heatmaps_gt = split(heatmaps_gt)
        loss = 0
        for idx in range(num_joints):
            heatmap_pred = self.squeeze(heatmaps_pred[idx])
            heatmap_gt = self.squeeze(heatmaps_gt[idx])
            if self.use_target_weight:
                loss += 0.5 * self.criterion(
                    self.mul(heatmap_pred, target_weight[:, idx]),
                    self.mul(heatmap_gt, target_weight[:, idx]))
            else:
                loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
        return loss / num_joints
示例#30
0
    def __init__(self, neg_item_num, l2_embed, dist_reg):
        super(BGCFLoss, self).__init__()

        self.neg_item_num = neg_item_num
        self.l2_embed = l2_embed
        self.dist_reg = dist_reg

        self.log = P.Log()
        self.pow = P.Pow()
        self.cast = P.Cast()
        self.tile = P.Tile()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.concat = P.Concat(1)
        self.concat2 = P.Concat(2)
        self.split = P.Split(0, 2)
        self.reduce_sum = P.ReduceSum()
        self.expand_dims = P.ExpandDims()
        self.multiply = P.Mul()
        self.matmul = P.BatchMatMul()
        self.squeeze = P.Squeeze(1)
        self.transpose = P.Transpose()
        self.l2_loss = P.L2Loss()
        self.sigmoid = P.Sigmoid()