def __init__(self,
              begin,
              end,
              stride,
              begin_mask=0,
              end_mask=0,
              ellipsis_mask=0):
     super(StridedSliceNet, self).__init__()
     self.begin = begin
     self.end = end
     self.strides = stride
     self.slice = P.StridedSlice(begin_mask, end_mask, ellipsis_mask)
Пример #2
0
 def __init__(self,
              embedding_size,
              embedding_shape,
              use_relative_positions=False,
              use_token_type=False,
              token_type_vocab_size=16,
              use_one_hot_embeddings=False,
              initializer_range=0.02,
              max_position_embeddings=512,
              dropout_prob=0.1):
     super(EmbeddingPostprocessor, self).__init__()
     self.use_token_type = use_token_type
     self.token_type_vocab_size = token_type_vocab_size
     self.use_one_hot_embeddings = use_one_hot_embeddings
     self.max_position_embeddings = max_position_embeddings
     self.token_type_embedding = Embedding_Thor(
         vocab_size=token_type_vocab_size,
         embedding_size=embedding_size,
         embedding_shape=embedding_shape,
         use_one_hot_embeddings=use_one_hot_embeddings,
         initializer_range=initializer_range,
         batch_size=batch_size,
         damping=damping,
         loss_scale=loss_scale,
         frequency=frequency)
     self.shape_flat = (-1, )
     self.one_hot = P.OneHot()
     self.on_value = Tensor(1.0, mstype.float32)
     self.off_value = Tensor(0.1, mstype.float32)
     self.array_mul = P.MatMul()
     self.reshape = P.Reshape()
     self.shape = tuple(embedding_shape)
     self.dropout = nn.Dropout(1 - dropout_prob)
     self.gather = P.GatherV2()
     self.use_relative_positions = use_relative_positions
     self.slice = P.StridedSlice()
     _, seq, width = self.shape
     position_embedding_shape = [1, seq, width]
     self.full_position_embedding = Embedding_Thor(
         vocab_size=max_position_embeddings,
         embedding_size=embedding_size,
         embedding_shape=position_embedding_shape,
         use_one_hot_embeddings=use_one_hot_embeddings,
         initializer_range=initializer_range,
         batch_size=batch_size,
         damping=damping,
         loss_scale=loss_scale,
         frequency=frequency)
     self.position_ids = Tensor(
         np.arange(seq).reshape(-1, seq).astype(np.int32))
     self.layernorm = nn.LayerNorm((embedding_size, ))
     self.add = P.TensorAdd()
Пример #3
0
 def __init__(self, weight, w2, begin, end, strides, strategy1=None, strategy2=None, is_parameter=True, mask=0):
     super().__init__()
     self.mul = P.Mul().shard(strategy1)
     self.strided_slice = P.StridedSlice(begin_mask=mask).shard(strategy2)
     if is_parameter:
         self.weight = Parameter(weight, "w1")
     else:
         self.weight = weight
     self.mul2 = P.Mul()
     self.weight2 = Parameter(w2, "w2")
     self.begin = begin
     self.end = end
     self.strides = strides
Пример #4
0
 def __init__(self,
              weight2,
              begin,
              end,
              strides,
              strategy1=None,
              strategy2=None):
     super().__init__()
     self.mul = P.Mul().shard(strategy1)
     self.strided_slice = P.StridedSlice().shard(strategy2)
     self.weight2 = Parameter(weight2, "w2")
     self.begin = begin
     self.end = end
     self.strides = strides
Пример #5
0
 def __init__(self, in_channel, out_channel, axis, input_shape, mul_size,
              test_size, prelu_size, transpose_b, matmul_size, num_class):
     super().__init__()
     mul_np = np.full(mul_size, 0.5, dtype=np.float32)
     self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
     bias_np = np.full((12, ), 7.1, dtype=np.float32)
     self.bias = Parameter(Tensor(bias_np), name="bias")
     prelu_np = np.full(prelu_size, 0.8, dtype=np.float32)
     self.prelu_weight = Parameter(Tensor(prelu_np), name="prelu_weight")
     matmul_np = np.full(matmul_size, 1.1, dtype=np.float32)
     self.matmul_weight = Parameter(Tensor(matmul_np), name="matmul_weight")
     self.mul = P.Mul()
     self.conv = Conv2d(in_channels=in_channel,
                        out_channels=out_channel,
                        kernel_size=5,
                        has_bias=True,
                        weight_init='ones',
                        bias_init='ones',
                        pad_mode='valid')
     self.scalar = 0.5
     self.parameter = Parameter(initializer(0.5,
                                            test_size,
                                            dtype=mstype.float32),
                                name='parameter')
     self.tensor = Tensor(np.full(test_size, 0.05, dtype=np.float32))
     self.softmax = Softmax(axis=axis)
     self.relu = ReLU()
     self.relu.relu.add_prim_attr("primitive_target", "CPU")
     self.reshape = P.Reshape()
     self.input_shape = input_shape
     self.equal = P.Equal()
     self.cast = P.Cast()
     self.concat = P.Concat(axis=1)
     self.reduce_sum = P.ReduceSum()
     self.bias_add = P.BiasAdd()
     self.cos = P.Cos()
     self.prelu = P.PReLU()
     self.matmul = P.MatMul(transpose_b=transpose_b)
     self.l2norm = P.L2Normalize(axis=(1 - axis))
     self.tensoradd = P.TensorAdd()
     self.strided_slice = P.StridedSlice()
     self.dense = Dense(in_channels=6,
                        out_channels=num_class,
                        weight_init='ones',
                        bias_init='ones',
                        has_bias=True)
Пример #6
0
    def __init__(self,
                 embedding_size,
                 embedding_shape,
                 use_relative_positions=False,
                 use_token_type=False,
                 token_type_vocab_size=16,
                 use_one_hot_embeddings=False,
                 initializer_range=0.02,
                 max_position_embeddings=512,
                 dropout_prob=0.1):
        super(EmbeddingPostprocessor, self).__init__()
        self.use_token_type = use_token_type
        self.token_type_vocab_size = token_type_vocab_size
        self.use_one_hot_embeddings = use_one_hot_embeddings
        self.max_position_embeddings = max_position_embeddings
        self.embedding_table = Parameter(initializer
                                         (TruncatedNormal(initializer_range),
                                          [token_type_vocab_size,
                                           embedding_size]),
                                         name='embedding_table')

        self.shape_flat = (-1,)
        self.one_hot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.1, mstype.float32)
        self.array_mul = P.MatMul()
        self.reshape = P.Reshape()
        self.shape = tuple(embedding_shape)
        self.layernorm = nn.LayerNorm((embedding_size,))
        self.dropout = nn.Dropout(1 - dropout_prob)
        self.gather = P.GatherV2()
        self.use_relative_positions = use_relative_positions
        self.slice = P.StridedSlice()
        self.full_position_embeddings = Parameter(initializer
                                                  (TruncatedNormal(initializer_range),
                                                   [max_position_embeddings,
                                                    embedding_size]),
                                                  name='full_position_embeddings')
Пример #7
0
test_case_lists = [test_case_math_ops]
test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
# use -k to select certain testcast
# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm


@non_graph_engine
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
def test_exec():
    context.set_context(mode=context.GRAPH_MODE)
    return test_exec_case


raise_set = [
    ('StridedSlice_1_Error', {
        'block': (lambda x: P.StridedSlice(begin_mask="1"), {
            'exception': TypeError
        }),
        'desc_inputs': [0]
    }),
    ('StridedSlice_2_Error', {
        'block': (lambda x: P.StridedSlice(end_mask="1"), {
            'exception': TypeError
        }),
        'desc_inputs': [0]
    }),
    ('StridedSlice_3_Error', {
        'block': (lambda x: P.StridedSlice(ellipsis_mask=1.1), {
            'exception': TypeError
        }),
        'desc_inputs': [0]
Пример #8
0
    def __init__(self,
                 config,
                 is_training,
                 use_one_hot_embeddings=False):
        super(BertModel, self).__init__()
        config = copy.deepcopy(config)
        if not is_training:
            config.hidden_dropout_prob = 0.0
            config.attention_probs_dropout_prob = 0.0

        self.input_mask_from_dataset = config.input_mask_from_dataset
        self.token_type_ids_from_dataset = config.token_type_ids_from_dataset
        self.batch_size = config.batch_size
        self.seq_length = config.seq_length
        self.hidden_size = config.hidden_size
        self.num_hidden_layers = config.num_hidden_layers
        self.embedding_size = config.hidden_size
        self.token_type_ids = None

        self.last_idx = self.num_hidden_layers - 1
        output_embedding_shape = [self.batch_size, self.seq_length,
                                  self.embedding_size]

        if not self.token_type_ids_from_dataset:
            self.token_type_ids = initializer(
                "zeros", [self.batch_size, self.seq_length], mstype.int32).to_tensor()

        self.bert_embedding_lookup = EmbeddingLookup(
            vocab_size=config.vocab_size,
            embedding_size=self.embedding_size,
            embedding_shape=output_embedding_shape,
            use_one_hot_embeddings=use_one_hot_embeddings,
            initializer_range=config.initializer_range)

        self.bert_embedding_postprocessor = EmbeddingPostprocessor(
            embedding_size=self.embedding_size,
            embedding_shape=output_embedding_shape,
            use_relative_positions=config.use_relative_positions,
            use_token_type=True,
            token_type_vocab_size=config.type_vocab_size,
            use_one_hot_embeddings=use_one_hot_embeddings,
            initializer_range=0.02,
            max_position_embeddings=config.max_position_embeddings,
            dropout_prob=config.hidden_dropout_prob)

        self.bert_encoder = BertTransformer(
            batch_size=self.batch_size,
            hidden_size=self.hidden_size,
            seq_length=self.seq_length,
            num_attention_heads=config.num_attention_heads,
            num_hidden_layers=self.num_hidden_layers,
            intermediate_size=config.intermediate_size,
            attention_probs_dropout_prob=config.attention_probs_dropout_prob,
            use_one_hot_embeddings=use_one_hot_embeddings,
            initializer_range=config.initializer_range,
            hidden_dropout_prob=config.hidden_dropout_prob,
            use_relative_positions=config.use_relative_positions,
            hidden_act=config.hidden_act,
            compute_type=config.compute_type,
            return_all_encoders=True)

        self.cast = P.Cast()
        self.dtype = config.dtype
        self.cast_compute_type = SaturateCast(dst_type=config.compute_type)
        self.slice = P.StridedSlice()

        self.squeeze_1 = P.Squeeze(axis=1)
        self.dense = nn.Dense(self.hidden_size, self.hidden_size,
                              activation="tanh",
                              weight_init=TruncatedNormal(config.initializer_range)).to_float(config.compute_type)
        self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config)
Пример #9
0
 def __init__(self, begin, end, stride):
     super(Net, self).__init__()
     self.stridedslice = P.StridedSlice()
     self.begin = begin
     self.end = end
     self.stride = stride
Пример #10
0
 def construct(self, x1):
     return P.StridedSlice(1, 1, 0, self.rank(x1), 0)(x1, (0, 0), (0, 0), (1, 1))
Пример #11
0
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('Maximum_0', {
     'block': P.Maximum(),
     'desc_inputs': [[3, 5], [2, 3, 3, 5]],
     'desc_bprop': [[2, 3, 3, 5]]}),
 ('MaximumGrad', {
     'block': G.MaximumGrad(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5], [2, 3, 3, 5]],
     'skip': ['backward']}),
 ('MinimumGrad', {
     'block': G.MinimumGrad(),
     'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5], [2, 3, 3, 5]],
     'skip': ['backward']}),
 ('StridedSlice', {
     'block': P.StridedSlice(),
     'desc_const': [(0, 1, 2, 1),
               (2, 3, 3, 4),
               (1, 1, 1, 1)],
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[2, 2, 1, 3]]}),
 ('Slice_1', {
     'block': P.Slice(),
     'desc_const': [(0, 1, 2, 1),
                     (1, 1, 1, 2)],
     'desc_inputs': [[2, 3, 3, 5]],
     'desc_bprop': [[1, 1, 1, 2]]}),
 ('StridedSliceGrad', {
     'block': G.StridedSliceGrad(),
     'desc_const': [(64, 1, 1024),
               (0, 1, 0),
Пример #12
0
 def __init__(self):
     super(StridedSlice, self).__init__()
     self.stridedslice = P.StridedSlice()
Пример #13
0
test_case_lists = [test_case_math_ops]
test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
# use -k to select certain testcast
# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm


@non_graph_engine
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
def test_exec():
    context.set_context(mode=context.GRAPH_MODE)
    return test_exec_case


raise_set = [
    ('StridedSlice_1_Error', {
        'block': (lambda x: P.StridedSlice(begin_mask="1"), {'exception': TypeError}),
        'desc_inputs': [0]}),
    ('StridedSlice_2_Error', {
        'block': (lambda x: P.StridedSlice(end_mask="1"), {'exception': TypeError}),
        'desc_inputs': [0]}),
    ('StridedSlice_3_Error', {
        'block': (lambda x: P.StridedSlice(ellipsis_mask=1.1), {'exception': TypeError}),
        'desc_inputs': [0]}),
    ('StridedSlice_4_Error', {
        'block': (lambda x: P.StridedSlice(new_axis_mask="1.1"), {'exception': TypeError}),
        'desc_inputs': [0]}),
    ('AssignAdd_Error', {
        'block': (P.AssignAdd(), {'exception': ValueError}),
        'desc_inputs': [[1]]}),
]
Пример #14
0
 def __init__(self):
     super(Net, self).__init__()
     self.strided_slice = P.StridedSlice()
Пример #15
0
    def __init__(self,
                 config: TransformerConfig,
                 is_training: bool,
                 use_one_hot_embeddings: bool = False,
                 use_positional_embedding: bool = True):
        super(Transformer, self).__init__()

        self.use_positional_embedding = use_positional_embedding
        config = copy.deepcopy(config)
        self.is_training = is_training
        if not is_training:
            config.hidden_dropout_prob = 0.0
            config.attention_dropout_prob = 0.0

        self.input_mask_from_dataset = config.input_mask_from_dataset
        self.batch_size = config.batch_size
        self.max_positions = config.seq_length
        self.attn_embed_dim = config.hidden_size
        self.num_layers = config.num_hidden_layers
        self.word_embed_dim = config.hidden_size

        self.last_idx = self.num_layers - 1

        self.embedding_lookup = EmbeddingLookup(
            vocab_size=config.vocab_size,
            embed_dim=self.word_embed_dim,
            use_one_hot_embeddings=use_one_hot_embeddings)

        if self.use_positional_embedding:
            self.positional_embedding = PositionalEmbedding(
                embedding_size=self.word_embed_dim,
                max_position_embeddings=config.max_position_embeddings)

        self.encoder = TransformerEncoder(
            attn_embed_dim=self.attn_embed_dim,
            encoder_layers=self.num_layers,
            num_attn_heads=config.num_attention_heads,
            intermediate_size=config.intermediate_size,
            attention_dropout_prob=config.attention_dropout_prob,
            initializer_range=config.initializer_range,
            hidden_dropout_prob=config.hidden_dropout_prob,
            hidden_act=config.hidden_act,
            compute_type=config.compute_type)

        self.decoder = TransformerDecoder(
            attn_embed_dim=self.attn_embed_dim,
            decoder_layers=self.num_layers,
            num_attn_heads=config.num_attention_heads,
            intermediate_size=config.intermediate_size,
            attn_dropout_prob=config.attention_dropout_prob,
            initializer_range=config.initializer_range,
            dropout_prob=config.hidden_dropout_prob,
            hidden_act=config.hidden_act,
            compute_type=config.compute_type)

        self.cast = P.Cast()
        self.dtype = config.dtype
        self.cast_compute_type = SaturateCast(dst_type=config.compute_type)
        self.slice = P.StridedSlice()
        self.dropout = nn.Dropout(keep_prob=1 - config.hidden_dropout_prob)

        self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config)

        self.scale = Tensor([math.sqrt(float(self.word_embed_dim))],
                            dtype=mstype.float32)
        self.multiply = P.Mul()
Пример #16
0
def strided_slice(nptype):
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    x = Tensor(np.arange(0, 2 * 3 * 4 * 5).reshape(2, 3, 4, 5).astype(nptype))
    y = P.StridedSlice()(x, (1, 0, 0, 2), (2, 2, 2, 4), (1, 1, 1, 1))
    expect = np.array([[[[62, 63], [67, 68]], [[82, 83],
                                               [87, 88]]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)

    y = P.StridedSlice()(x, (1, 0, 0, 5), (2, 2, 2, 1), (1, 1, 1, -2))
    expect = np.array([[[[64, 62], [69, 67]], [[84, 82],
                                               [89, 87]]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)

    y = P.StridedSlice()(x, (1, 0, 0, -1), (2, 2, 2, 1), (1, 1, 1, -1))
    expect = np.array([[[[64, 63, 62], [69, 68, 67]],
                        [[84, 83, 82], [89, 88, 87]]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)

    y = P.StridedSlice()(x, (1, 0, -1, -2), (2, 2, 0, -5), (1, 1, -1, -2))
    expect = np.array([[[[78, 76], [73, 71], [68, 66]],
                        [[98, 96], [93, 91], [88, 86]]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)

    # ME Infer fault
    # y = P.StridedSlice(begin_mask=0b1000, end_mask=0b0010)(x, (1, 0, 0, 2), (2, 2, 2, 4), (1, 1, 1, 1))
    # expect = np.array([[[[62, 63],
    #                      [67, 68]],
    #                     [[82, 83],
    #                      [87, 88]],
    #                     [[102, 103],
    #                      [107, 108]]]]).astype(nptype)
    # assert np.allclose(y.asnumpy(), expect)

    op = P.StridedSlice(begin_mask=0b1000,
                        end_mask=0b0010,
                        ellipsis_mask=0b0100)
    y = op(x, (1, 0, 0, 2), (2, 2, 2, 4), (1, 1, 1, 1))
    expect = np.array([[[[60, 61, 62, 63], [65, 66, 67, 68], [70, 71, 72, 73],
                         [75, 76, 77, 78]],
                        [[80, 81, 82, 83], [85, 86, 87, 88], [90, 91, 92, 93],
                         [95, 96, 97, 98]],
                        [[100, 101, 102, 103], [105, 106, 107, 108],
                         [110, 111, 112, 113], [115, 116, 117,
                                                118]]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)

    x = Tensor(np.arange(0, 3 * 4 * 5).reshape(3, 4, 5).astype(nptype))
    y = P.StridedSlice()(x, (1, 0, 0), (2, -3, 3), (1, 1, 3))
    expect = np.array([[[20]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)

    x_np = np.arange(0, 4 * 5).reshape(4, 5).astype(nptype)
    y = Tensor(x_np)[:, ::-1]
    expect = x_np[:, ::-1]
    assert np.allclose(y.asnumpy(), expect)

    x = Tensor(
        np.arange(0, 2 * 3 * 4 * 5 * 4 * 3 * 2).reshape(2, 3, 4, 5, 4, 3,
                                                        2).astype(nptype))
    y = P.StridedSlice()(x, (1, 0, 0, 2, 1, 2, 0), (2, 2, 2, 4, 2, 3, 2),
                         (1, 1, 1, 1, 1, 1, 2))
    expect = np.array([[[[[[[1498.]]], [[[1522.]]]], [[[[1618.]]],
                                                      [[[1642.]]]]],
                        [[[[[1978.]]], [[[2002.]]]],
                         [[[[2098.]]], [[[2122.]]]]]]]).astype(nptype)
    assert np.allclose(y.asnumpy(), expect)