コード例 #1
0
    def __init__(self, num_labels, activation_fn=nn.Softmax()):
        super().__init__(num_labels)

        self._perturb = RandomPerturb()
        self._num_perturbations = 10  # number of perturbations used in evaluation
        self._threshold = 0.1  # threshold to generate perturbation
        self._activation_fn = activation_fn
コード例 #2
0
ファイル: eval.py プロジェクト: BebDong/MindSeg
    def __init__(self, network):
        super(BuildEvalNetwork, self).__init__()
        self.network = network

        self.transpose = P.Transpose()
        self.softmax = nn.Softmax(
            axis=3)  # support only for calculation at the last axis for now
コード例 #3
0
def test_imm_target():
    """
    Test NewParameter pattern in the target
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    set_renorm(False)
    set_reopt(False)

    @registe_pass(run_only_once=True)
    def softmax_pass():
        x = Any()
        pattern = Call(P.Softmax(), [x])
        imm = Imm(0)
        target_0 = Call("make_tuple", [pattern])
        target = Call("tuple_getitem", [target_0, imm])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(5)
    unregiste_pass(softmax_pass)
    assert "make_tuple" in transformed_repr
    assert "tuple_getitem" in transformed_repr
    assert "Softmax" in transformed_repr
コード例 #4
0
def test_switch_layer_shape_join_failed():
    class AddFuncNet(nn.Cell):
        def __init__(self, funcs, new_func):
            super(AddFuncNet, self).__init__()
            self.funcs = funcs
            self.new_func = new_func

        def construct(self, i, inputs):
            final_funcs = self.funcs + (self.new_func, )
            x = final_funcs[i](inputs)
            return x

    class ReLUTuple(nn.Cell):
        def __init__(self):
            super(ReLUTuple, self).__init__()
            self.op = nn.ReLU()

        def construct(self, x):
            return self.op(x[0])

    func1 = nn.Softmax()
    func2 = nn.ReLU()
    func3 = ReLUTuple()

    funcs = (func1, func2)

    net = AddFuncNet(funcs, func3)

    inp = Tensor(np.random.randn(2, 3, 4, 5).astype(np.float32))
    i = Tensor(1, mstype.int32)
    with pytest.raises(ValueError) as err:
        net(i, inp)
コード例 #5
0
def test_softmax_relu():
    """
    Use python pass to transform from Softmax to ReLU.
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_relu_pass():
        softmax = P.Softmax()
        relu = P.ReLU()

        def pattern(x):
            x = softmax(x)
            return x

        def target(x):
            x = relu(x)
            return x

        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(2)
    ppm = PyPassManager()
    ppm.unregiste(softmax_relu_pass)
    assert "ReLU" in transformed_repr
    assert "Softmax" not in transformed_repr
コード例 #6
0
 def __init__(self, value1):
     super(Net, self).__init__()
     self.relu = nn.ReLU()
     self.softmax = nn.Softmax(0)
     self.axis = 0
     self.TC = ClassTest("test_class", 1.2)
     self.value = value1
コード例 #7
0
def test_isnot_pattern_1():
    """
    Test IsNot pattern which expresses the IsNot semantics.
    Case: IsNot pattern matches with the graph
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def single_bn_pass():
        """
        Sub a BN which does NOT take MatMul as inputs to ReLU6.
        """
        matmul = Prim("MatMul")
        pattern_0 = NoneOf(matmul)
        softmax = P.Softmax()
        pattern = Call(softmax, [pattern_0])
        relu6 = P.ReLU6()
        target = Call(relu6, [pattern_0])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(5)
    unregiste_pass(single_bn_pass)
    assert "ReLU6" in transformed_repr
    assert "Softmax" not in transformed_repr
コード例 #8
0
def test_imm_pattern():
    """
    Test NewParameter pattern in the target
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_addn_pass():
        x = AnyPattern()
        softmax = P.Softmax()
        pattern = CallWith(softmax, inputs=[x])
        imm = Imm(0)
        target_0 = CallWith("make_tuple",
                            inputs=[pattern],
                            should_replace=False)
        target = CallWith("tuple_getitem",
                          inputs=[target_0, imm],
                          should_replace=False)
        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(5)
    print(transformed_repr)
    unregiste_pass(softmax_addn_pass)
    assert "make_tuple" in transformed_repr
    assert "tuple_getitem" in transformed_repr
    assert "Softmax" in transformed_repr
コード例 #9
0
def test_newparameter_pattern():
    """
    Test NewParameter pattern in the target
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    set_renorm(False)
    set_reopt(False)
    @registe_pass(requires_grad=False, run_only_once=True)
    def softmax_addn_pass():
        x = Any()
        pattern = Call(P.Softmax(), [x])

        default_tensor0 = Tensor(np.ones((4, 4)), mindspore.float32)
        default_tensor1 = Tensor(np.ones((4, 4)), mindspore.float32)
        new_para_0 = NewParameter("Merlin", default_tensor0)
        new_para_1 = NewParameter("Arthur", default_tensor1)
        target_0 = Call(P.MatMul(), [new_para_0, new_para_1])
        target = Call("make_tuple", [target_0])
        return pattern, target
    transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(5)
    unregiste_pass(softmax_addn_pass)
    assert "MatMul" in transformed_repr
    assert "make_tuple" in transformed_repr
    assert "Softmax" not in transformed_repr
コード例 #10
0
def test_softmax_axis_none():
    layer = nn.Softmax()
    x = Tensor(np.random.rand(1, 3, 4, 4).astype(np.float32))
    output = layer.construct(x)
    output_np = output.asnumpy()
    print(output_np)
    assert isinstance(output_np[0][0][0][0], (np.float32, np.float64))
コード例 #11
0
def test_softmax_axis_none():
    layer = nn.Softmax()
    x = Tensor(np.ones([3, 2]))
    assert layer.softmax.axis == (-1, )
    output = layer.construct(x)
    output_np = output.asnumpy()
    assert isinstance(output_np[0][0], (np.float32, np.float64))
コード例 #12
0
 def __init__(self):
     super(CaseNet, self).__init__()
     self.conv = nn.Conv2d(1, 3, 3)
     self.relu = nn.ReLU()
     self.softmax = nn.Softmax()
     self.layers1 = (self.relu, self.softmax)
     self.layers2 = (self.conv, self.relu)
コード例 #13
0
def test_gen_new_parameter():
    """
    Test gen_new_parameter
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    default_tensor = Tensor(np.ones((4, 4)), mindspore.float32)
    new_para = NewParameter("Merlin", default_tensor)
    set_renorm(False)
    set_reopt(False)
    gen_new_parameter(new_para)
    @registe_pass(requires_grad=False, run_only_once=True)
    def softmax_make_tuple_pass():
        x = Any()
        softmax = P.Softmax()
        pattern = Call(softmax, [x])

        target = Call("make_tuple", [pattern, new_para])
        return pattern, target
    transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(5)
    assert "Merlin" in transformed_repr
    unregiste_pass(softmax_make_tuple_pass)
    cancel_new_parameter(new_para)
    transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(5)
    assert "Merlin" not in transformed_repr
コード例 #14
0
def test_isin_pattern_1():
    """
    Test IsIn. IsIn is used as nested inputs for the target in this case.
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_neg_pass():
        x = Any()
        softmax_pattern = Prim(P.Softmax())
        call_softmax = Call(softmax_pattern, [x])
        relu_pattern = Prim(P.ReLU())
        call_relu = Call(relu_pattern, [x])

        pattern = OneOf([call_softmax, call_relu])
        neg_ops = Prim(P.Neg())
        target = Call(neg_ops, [pattern])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(4)
    unregiste_pass(softmax_neg_pass)
    assert "Neg" in transformed_repr
    assert "Softmax" in transformed_repr
コード例 #15
0
def test_newtensor_pattern():
    """
    Test NewTensor pattern in the target
    """
    set_renorm(False)
    set_reopt(False)
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_addn_pass():
        x = Any()
        pattern = Call(P.Softmax(), [x])

        weight_tensor = Tensor(np.zeros([42]), mindspore.float16)
        new_weight = NewTensor(weight_tensor)
        target = Call(P.AddN(), [x, new_weight])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(2)
    unregiste_pass(softmax_addn_pass)
    assert "AddN" in transformed_repr
    assert "Softmax" not in transformed_repr
    set_renorm(True)
コード例 #16
0
def test_isin_pattern_0():
    """
    Test IsIn pattern which expresses the IsIn/OneOf semantics.
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_relu_pass():
        x = Any()
        softmax_pattern = Prim(P.Softmax())
        call_softmax = Call(softmax_pattern, [x])
        relu_pattern = Prim(P.ReLU())
        call_relu = Call(relu_pattern, [x])

        pattern = OneOf([call_softmax, call_relu])
        relu6_pattern = Prim(P.ReLU6())
        target = Call(relu6_pattern, [x])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(2)
    unregiste_pass(softmax_relu_pass)
    assert "ReLU6" in transformed_repr
    assert "Softmax" not in transformed_repr
コード例 #17
0
    def __init__(self, batch_size, query_linear_bias, key_linear_bias,
                 value_linear_bias):
        """init function"""
        super(MultiHeadAttn, self).__init__()
        self.batch_size = batch_size
        self.matmul = nn.MatMul()
        self.add = P.Add()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.div = P.Div()
        self.softmax = nn.Softmax(axis=3)

        self.query_linear_weight = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, 4096)).astype(np.float32)),
                                             name=None)
        self.query_linear_bias = query_linear_bias

        self.key_linear_weight = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, 4096)).astype(np.float32)),
                                           name=None)
        self.key_linear_bias = key_linear_bias

        self.value_linear_weight = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, 4096)).astype(np.float32)),
                                             name=None)
        self.value_linear_bias = value_linear_bias

        self.reshape_shape = tuple([batch_size, 512, 64, 64])

        self.w = Parameter(Tensor(
            np.random.uniform(0, 1, (64, 64, 4096)).astype(np.float32)),
                           name=None)
        self.b = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, )).astype(np.float32)),
                           name=None)
コード例 #18
0
def test_softmax_relu_sigmoid():
    """
    Use python pass to transform from Softmax(x) to ReLU(Sigmoid(x)).

    NOTE:
        Sigmoid pattern only exists in the target.
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_relu_pass():
        x = Any()
        softmax_pattern = Prim(P.Softmax())
        pattern = Call(softmax_pattern, [x])
        sigmoid_pattern = Prim(P.Sigmoid())
        call_sigmoid = Call(sigmoid_pattern, [x])
        relu_pattern = Prim(P.ReLU())
        target = Call(relu_pattern, [call_sigmoid])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(3)
    unregiste_pass(softmax_relu_pass)
    assert "ReLU" in transformed_repr
    assert "Sigmoid" in transformed_repr
    assert "Softmax" not in transformed_repr
コード例 #19
0
def test_isin_pattern():
    """
    Test IsIn pattern which expresses the IsIn/OneOf semantics.
    """
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_relu_pass():
        x = AnyPattern()
        softmax_pattern = IsPrimTypeOf(P.Softmax())
        call_softmax = CallWith(softmax_pattern, inputs=[x])
        relu_pattern = IsPrimTypeOf(P.ReLU())
        call_relu = CallWith(relu_pattern, inputs=[x])

        pattern = IsIn([call_softmax, call_relu])
        relu6_pattern = IsPrimTypeOf(P.ReLU6(), should_replace=False)
        target = CallWith(relu6_pattern, inputs=[x])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model,
                                      inputs).get_return().expanded_str(2)
    ppm = PyPassManager()
    ppm.unregiste(softmax_relu_pass)
    assert "ReLU6" in transformed_repr
    assert "Softmax" not in transformed_repr
コード例 #20
0
ファイル: occlusion.py プロジェクト: WANGSSSSSSS/mindspore
    def __init__(self, network, activation_fn=nn.Softmax()):
        super().__init__(network, activation_fn)

        self._ablation = Ablation(perturb_mode='Deletion')
        self._aggregation_fn = abs_max
        self._get_replacement = Constant(base_value=0.0)
        self._num_sample_per_dim = 32  # specify the number of perturbations each dimension.
        self._num_per_eval = 2  # number of perturbations generate for each sample per evaluation step.
コード例 #21
0
ファイル: twohop_bert.py プロジェクト: yrpang/mindspore
 def __init__(self):
     super(MultiHeadAttn, self).__init__()
     self.matmul_0 = nn.MatMul()
     self.matmul_0.to_float(mstype.float16)
     self.matmul_0_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                 name=None)
     self.matmul_1 = nn.MatMul()
     self.matmul_1.to_float(mstype.float16)
     self.matmul_1_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                 name=None)
     self.matmul_2 = nn.MatMul()
     self.matmul_2.to_float(mstype.float16)
     self.matmul_2_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                 name=None)
     self.add_3 = P.Add()
     self.add_3_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                 name=None)
     self.add_4 = P.Add()
     self.add_4_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                 name=None)
     self.add_5 = P.Add()
     self.add_5_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                 name=None)
     self.reshape_6 = P.Reshape()
     self.reshape_6_shape = tuple([BATCH_SIZE, 448, 12, 64])
     self.reshape_7 = P.Reshape()
     self.reshape_7_shape = tuple([BATCH_SIZE, 448, 12, 64])
     self.reshape_8 = P.Reshape()
     self.reshape_8_shape = tuple([BATCH_SIZE, 448, 12, 64])
     self.transpose_9 = P.Transpose()
     self.transpose_10 = P.Transpose()
     self.transpose_11 = P.Transpose()
     self.matmul_12 = nn.MatMul()
     self.matmul_12.to_float(mstype.float16)
     self.div_13 = P.Div()
     self.div_13_w = 8.0
     self.add_14 = P.Add()
     self.softmax_15 = nn.Softmax(axis=3)
     self.matmul_16 = nn.MatMul()
     self.matmul_16.to_float(mstype.float16)
     self.transpose_17 = P.Transpose()
     self.reshape_18 = P.Reshape()
     self.reshape_18_shape = tuple([BATCH_SIZE, 448, 768])
     self.matmul_19 = nn.MatMul()
     self.matmul_19.to_float(mstype.float16)
     self.matmul_19_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                  name=None)
     self.add_20 = P.Add()
     self.add_20_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                  name=None)
コード例 #22
0
ファイル: rpn.py プロジェクト: mindspore-ai/course
    def __init__(self, config, batch_size, in_channels, feat_channels,
                 num_anchors, cls_out_channels):
        super(RPN, self).__init__()
        cfg_rpn = config
        self.cfg = config
        self.num_bboxes = cfg_rpn.num_bboxes
        self.feature_anchor_shape = cfg_rpn.feature_shapes
        self.feature_anchor_shape = self.feature_anchor_shape[0] * \
            self.feature_anchor_shape[1] * num_anchors * batch_size
        self.num_anchors = num_anchors
        self.batch_size = batch_size
        self.test_batch_size = cfg_rpn.test_batch_size
        self.num_layers = 1
        self.real_ratio = Tensor(np.ones((1, 1)).astype(np.float16))
        self.use_sigmoid_cls = config.use_sigmoid_cls
        if config.use_sigmoid_cls:
            self.reshape_shape_cls = (-1, )
            self.loss_cls = P.SigmoidCrossEntropyWithLogits()
            cls_out_channels = 1
        else:
            self.reshape_shape_cls = (-1, cls_out_channels)
            self.loss_cls = nn.SoftmaxCrossEntropyWithLogits(sparse=True,
                                                             reduction="none")
        self.rpn_convs_list = self._make_rpn_layer(self.num_layers, in_channels, feat_channels,\
            num_anchors, cls_out_channels)

        self.transpose = P.Transpose()
        self.reshape = P.Reshape()
        self.concat = P.Concat(axis=0)
        self.fill = P.Fill()
        self.placeh1 = Tensor(np.ones((1, )).astype(np.float16))

        self.trans_shape = (0, 2, 3, 1)

        self.reshape_shape_reg = (-1, 4)
        self.softmax = nn.Softmax()
        self.rpn_loss_reg_weight = Tensor(
            np.array(cfg_rpn.rpn_loss_reg_weight).astype(np.float16))
        self.rpn_loss_cls_weight = Tensor(
            np.array(cfg_rpn.rpn_loss_cls_weight).astype(np.float16))
        self.num_expected_total = Tensor(
            np.array(cfg_rpn.num_expected_neg * self.batch_size).astype(
                np.float16))
        self.num_bboxes = cfg_rpn.num_bboxes
        self.get_targets = BboxAssignSample(cfg_rpn, self.batch_size,
                                            self.num_bboxes, False)
        self.CheckValid = P.CheckValid()
        self.sum_loss = P.ReduceSum()
        self.loss_bbox = P.SmoothL1Loss(beta=1.0 / 9.0)
        self.squeeze = P.Squeeze()
        self.cast = P.Cast()
        self.tile = P.Tile()
        self.zeros_like = P.ZerosLike()
        self.loss = Tensor(np.zeros((1, )).astype(np.float16))
        self.clsloss = Tensor(np.zeros((1, )).astype(np.float16))
        self.regloss = Tensor(np.zeros((1, )).astype(np.float16))
        self.print = P.Print()
コード例 #23
0
    def __init__(self, config, scale=1.0, layer_idx=None):
        super(Attention, self).__init__()
        self.get_attention_mask = AttentionMask(config)
        self.projection = Mapping(config, config.embedding_size,
                                  config.embedding_size, scale)
        self.transpose = P.Transpose().shard(((config.dp, 1, config.mp, 1),))
        self.merger_head_transpose = P.Transpose().shard(
            ((config.dp, config.mp, 1, 1),))
        self.reshape = P.Reshape()
        self.n_head = config.num_heads
        self.size_per_head = config.embedding_size // self.n_head
        self.concat_k = P.Concat(axis=3)
        self.concat_v = P.Concat(axis=2)
        self.multiply_data = Tensor([
            -10000.0,
        ], dtype=mstype.float32)
        self.batch_matmul = P.BatchMatMul().shard(
            ((config.dp, config.mp, 1, 1), (config.dp, config.mp, 1, 1)))
        self.scale = scale
        self.real_div = P.RealDiv().shard(((config.dp, config.mp, 1, 1), ()))
        self.sub = P.Sub().shard(((1,), (config.dp, 1, 1, 1))).add_prim_attr("_side_effect", True)
        self.mul = P.Mul().shard(((config.dp, 1, 1, 1), (1,))).add_prim_attr("_side_effect", True)
        self.add = P.TensorAdd().shard(
            ((config.dp, 1, 1, 1), (config.dp, config.mp, 1, 1)))
        if self.scale:
            self.scale_factor = Tensor(math.sqrt(self.size_per_head))
        if layer_idx is not None:
            self.coeff = math.sqrt(layer_idx * math.sqrt(self.size_per_head))
            self.coeff = Tensor(self.coeff)
        self.use_past = config.use_past
        self.dropout = nn.Dropout(1 - config.dropout_rate)
        self.dropout.dropout_gen_mask.shard(((config.dp, 1, 1),))
        self.dropout.dropout_do_mask.shard(((config.dp, 1, 1),))
        self.prob_dropout = nn.Dropout(1 - config.dropout_rate)
        self.prob_dropout.dropout_gen_mask.shard(
            ((config.dp, config.mp, 1, 1),))
        self.prob_dropout.dropout_do_mask.shard(
            ((config.dp, config.mp, 1, 1),))
        self.softmax = nn.Softmax()
        self.softmax.softmax.shard(((config.dp, config.mp, 1),))
        self.expand_dims = P.ExpandDims().shard(((config.dp, 1, 1),))

        self.dense1 = nn.Dense(config.embedding_size,
                               config.embedding_size).to_float(
            config.compute_dtype)
        self.dense1.matmul.shard(((config.dp, 1), (config.mp, 1)))
        self.dense1.bias_add.shard(((config.dp, config.mp), (config.mp,)))
        self.dense2 = nn.Dense(config.embedding_size,
                               config.embedding_size).to_float(
            config.compute_dtype)
        self.dense2.matmul.shard(((config.dp, 1), (config.mp, 1)))
        self.dense2.bias_add.shard(((config.dp, config.mp), (config.mp,)))
        self.dense3 = nn.Dense(config.embedding_size,
                               config.embedding_size).to_float(
            config.compute_dtype)
        self.dense3.matmul.shard(((config.dp, 1), (config.mp, 1)))
        self.dense3.bias_add.shard(((config.dp, config.mp), (config.mp,)))
コード例 #24
0
    def __init__(self,
                 batch_size=512,
                 d_model=768,
                 seq_length=1024,
                 num_attention_heads=12,
                 dim_per_head=64,
                 has_attention_mask=True,
                 do_return_2d_tensor=True,
                 attention_dropout=0.0,
                 compute_type=mstype.float32):
        super(MaskedSelfAttention, self).__init__()

        self.batch_size = batch_size
        self.d_model = d_model
        self.seq_length = seq_length
        self.num_heads = num_attention_heads
        self.dim_per_head = dim_per_head
        self.has_attention_mask = has_attention_mask
        assert has_attention_mask

        self.scale = Tensor([1.0 / math.sqrt(float(self.dim_per_head))],
                            dtype=compute_type)  # attention scale
        self.mask_data = Tensor([
            -10000.0,
        ], dtype=compute_type)
        self.split_head_shape = (self.batch_size, self.seq_length,
                                 self.num_heads, self.dim_per_head)

        self.c_attn = Conv1D(d_model, d_model * 3)
        self.c_proj = Conv1D(d_model, d_model)

        self.split_for_qkv = P.Split(1, 3)  # P.Split(axis, output_num)
        # self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.trans_shape = (0, 2, 1, 3)
        self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.matmul = P.BatchMatMul()
        self.multiply = P.Mul()

        if self.has_attention_mask:
            self.expand_dims = P.ExpandDims()
            self.sub = P.Sub()
            self.add = P.TensorAdd()
            self.cast = P.Cast()
            self.get_dtype = P.DType()

        if do_return_2d_tensor:
            self.shape_return = (batch_size * seq_length, d_model)
        else:
            self.shape_return = (batch_size, seq_length, d_model)

        self.softmax = nn.Softmax()
        self.softmax_cast = P.Cast()
        self.dropout = nn.Dropout(1 - attention_dropout)
        self.use_attention_dropout = attention_dropout > 0
コード例 #25
0
ファイル: naml.py プロジェクト: yrpang/mindspore
 def __init__(self, query_vector_dim, input_vector_dim):
     super(Attention, self).__init__()
     self.dense1 = nn.Dense(input_vector_dim,
                            query_vector_dim,
                            has_bias=True,
                            activation='tanh')
     self.dense2 = nn.Dense(query_vector_dim, 1, has_bias=False)
     self.softmax = nn.Softmax()
     self.sum_keep_dims = ops.ReduceSum(keep_dims=True)
     self.sum = ops.ReduceSum(keep_dims=False)
コード例 #26
0
ファイル: utils.py プロジェクト: zhangjinrong/mindspore
def logits_to_probs(logits, is_binary=False):
    """
    converts logits into probabilities.
    Args:
        logits (Tensor)
        is_binary (bool)
    """
    if is_binary:
        return nn.Sigmoid()(logits)
    return nn.Softmax(axis=-1)(logits)
コード例 #27
0
    def __init__(self,
                 is_training,
                 query_size,
                 key_size,
                 num_units,
                 normalize=False,
                 initializer_range=0.1,
                 compute_type=mstype.float16):
        super(BahdanauAttention, self).__init__()
        self.is_training = is_training
        self.mask = None
        self.query_size = query_size
        self.key_size = key_size
        self.normalize = normalize
        self.num_units = num_units
        self.linear_att = Parameter(Tensor(np.random.uniform(
            -initializer_range, initializer_range, size=[num_units]),
                                           dtype=mstype.float32),
                                    name='linear_att')
        if self.normalize:
            self.normalize_scalar = Parameter(Tensor(np.array(
                [1.0 / num_units]),
                                                     dtype=mstype.float32),
                                              name='normalize_scalar')
            self.normalize_bias = Parameter(Tensor(np.zeros(num_units),
                                                   dtype=mstype.float32),
                                            name='normalize_bias')
        self.transpose = P.Transpose()
        self.transpose_orders = (1, 0, 2)
        self.shape_op = P.Shape()

        self.linear_q = nn.Dense(
            query_size,
            num_units,
            has_bias=False,
            weight_init=Uniform(initializer_range)).to_float(compute_type)

        self.linear_k = nn.Dense(
            key_size,
            num_units,
            has_bias=False,
            weight_init=Uniform(initializer_range)).to_float(compute_type)
        self.expand = P.ExpandDims()
        self.tile = P.Tile()

        self.norm = nn.Norm(axis=-1)
        self.mul = P.Mul()
        self.matmul = P.MatMul()
        self.batchMatmul = P.BatchMatMul()
        self.tanh = nn.Tanh()

        self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.softmax = nn.Softmax(axis=-1)
        self.reshape = P.Reshape()
        self.cast = P.Cast()
コード例 #28
0
 def __init__(self, in_channel=3, out_channel=8, axis=1, input_shape=(32, 4, 110, -1),
              mul_size=(32, 1, 220, 220)):
     super().__init__()
     mul_np = np.full(mul_size, 0.5, dtype=np.float32)
     self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
     self.mul = P.Mul()
     self.conv = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
                           kernel_size=5, has_bias=True, weight_init='ones',
                           bias_init='ones', pad_mode='valid')
     self.softmax = nn.Softmax(axis=axis)
     self.relu = nn.ReLU()
     self.reshape = P.Reshape()
     self.input_shape = input_shape
コード例 #29
0
 def __init__(self):
     super().__init__()
     self.relu = nn.ReLU()
     self.softmax = nn.Softmax()
     self.mul = P.Mul()
     self.add = P.Add()
     self.sub = P.Sub()
     self.div = P.Div()
     self.assign = P.Assign()
     param_a = np.full((1,), 5, dtype=np.float32)
     self.param_a = Parameter(Tensor(param_a), name='a')
     param_b = np.full((1,), 2, dtype=np.float32)
     self.param_b = Parameter(Tensor(param_b), name='b')
     param_c = np.full((1,), 16, dtype=np.float32)
     self.param_c = Parameter(Tensor(param_c), name='c')
コード例 #30
0
def test_prim():
    inputs = Tensor(np.ones([42]), mindspore.float16)
    softmax_model = nn.Softmax()

    @registe_pass(run_only_once=True)
    def softmax_relu_pass():
        x = Any()
        sigmoid_softmax_pattern = Prim([P.Sigmoid(), P.Softmax()])
        pattern = Call(sigmoid_softmax_pattern, [x])
        target = Call(P.ReLU(), [x])
        return pattern, target

    transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(3)
    unregiste_pass(softmax_relu_pass)
    assert "ReLU" in transformed_repr
    assert "Softmax" not in transformed_repr