예제 #1
0
 def __init__(self):
     super(NetNeg, self).__init__()
     self.neg = P.Neg()
예제 #2
0
 def __init__(self, mul_weight, strategy1=None, strategy2=None):
     super().__init__()
     self.mul = P.Mul().shard(strategy1)
     self.neg = P.Neg().shard(strategy2)
     self.mul_weight = Parameter(mul_weight, "w1")
예제 #3
0
    def __init__(self,
                 batch_size,
                 seq_length,
                 vocab_size,
                 decoder,
                 beam_width=4,
                 decoder_layers_nums=4,
                 length_penalty_weight=0.6,
                 cov_penalty_factor=0.1,
                 hidden_size=1024,
                 max_decode_length=64,
                 sos_id=2,
                 eos_id=3,
                 is_using_while=True,
                 compute_type=mstype.float32):
        super(BeamSearchDecoder, self).__init__()

        self.encoder_length = seq_length
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.beam_width = beam_width
        self.decoder_layers_nums = decoder_layers_nums
        self.length_penalty_weight = length_penalty_weight
        self.cov_penalty_factor = cov_penalty_factor
        self.max_decode_length = max_decode_length
        self.decoder = decoder
        self.is_using_while = is_using_while

        self.add = P.Add()
        self.expand = P.ExpandDims()
        self.reshape = P.Reshape()
        self.shape_flat = (-1, )
        self.shape = P.Shape()

        self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]),
                                  mstype.float32)
        self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF),
                                  mstype.float32)

        self.select = P.Select()
        self.flat_shape = (batch_size, beam_width * vocab_size)
        self.topk = P.TopK(sorted=True)
        self.floor_div = P.FloorDiv()
        self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32)
        self.real_div = P.RealDiv()
        self.mod = Mod()
        self.equal = P.Equal()
        self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id),
                              mstype.int32)

        beam_ids = np.tile(
            np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1])
        self.beam_ids = Tensor(beam_ids, mstype.int32)

        batch_ids = np.arange(batch_size * beam_width).reshape(
            (batch_size, beam_width)) // beam_width
        self.batch_ids = Tensor(batch_ids, mstype.int32)

        self.concat = P.Concat(axis=-1)
        self.gather_nd = P.GatherNd()

        self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id),
                                mstype.int32)
        if self.is_using_while:
            self.start = Tensor(0, dtype=mstype.int32)
            self.init_seq = Tensor(
                np.full([batch_size, beam_width, self.max_decode_length + 1],
                        sos_id), mstype.int32)
        else:
            self.init_seq = Tensor(
                np.full([batch_size, beam_width, 1], sos_id), mstype.int32)

        init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]),
                              [batch_size, 1])
        self.init_scores = Tensor(init_scores, mstype.float32)
        self.init_finished = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.bool))
        self.init_length = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))

        self.length_penalty = LengthPenalty(weight=length_penalty_weight)

        self.one = Tensor(1, mstype.int32)
        self.prob_concat = P.Concat(axis=1)
        self.cast = P.Cast()
        self.decoder_hidden_state = Tensor(
            np.zeros([
                self.decoder_layers_nums, 2, self.batch_size * self.beam_width,
                hidden_size
            ]), mstype.float32)

        self.zeros_scores = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.float))
        self.active_index = Tensor(
            np.ones([batch_size, beam_width], dtype=np.int32))
        self.init_zeros = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))
        self.init_ones = Tensor(
            np.ones([batch_size, beam_width], dtype=np.float32))

        self.accu_attn_scores = Tensor(
            np.zeros([batch_size, beam_width, self.encoder_length],
                     dtype=np.float32))

        self.zeros = Tensor([0], mstype.int32)
        self.eos_tensor = Tensor(
            np.full([batch_size, beam_width, beam_width], eos_id),
            mstype.int32)

        self.ones_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], 1),
            mstype.float32)
        self.neg_inf_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], -INF),
            mstype.float32)
        self.zeros_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], 0),
            mstype.float32)
        self.zeros_2d = Tensor(
            np.full([batch_size * beam_width, self.encoder_length], 0),
            mstype.int32)
        self.argmin = P.ArgMinWithValue(axis=1)
        self.reducesum = P.ReduceSum()
        self.div = P.Div()
        self.shape_op = P.Shape()
        self.mul = P.Mul()
        self.log = P.Log()
        self.less = P.Less()
        self.tile = P.Tile()
        self.noteq = P.Neg()
        self.zeroslike = P.ZerosLike()
        self.greater_equal = P.GreaterEqual()
        self.sub = P.Sub()
예제 #4
0
 def __init__(self):
     super(CastDownNet, self).__init__()
     self.cast = P.Cast()
     self.transpose = P.Transpose()
     self.neg = P.Neg()
예제 #5
0

class SummaryNet(nn.Cell):
    def __init__(self, ):
        super(SummaryNet, self).__init__()
        self.s = P.ScalarSummary()
        self.add = P.TensorAdd()

    def construct(self, x, y):
        self.s("x1", x)
        return self.add(x, y)


test_case_math_ops = [
    ('Neg', {
        'block': P.Neg(),
        'desc_inputs': [[1, 3, 4, 4]],
        'desc_bprop': [[1, 3, 4, 4]]
    }),
    ('Sub', {
        'block': P.Sub(),
        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
        'desc_bprop': [[2, 3, 3, 5]]
    }),
    ('TensorAdd', {
        'block': P.TensorAdd(),
        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
        'desc_bprop': [[2, 3, 3, 5]]
    }),
    ('Mul0', {
        'block': P.Mul(),
예제 #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.thor = True
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float32)),
                                  name='matrix_A',
                                  requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            if out_channels == 1001:
                self.matrix_G = Parameter(Tensor(
                    np.zeros([1024, 1024]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
                self.pad = P.Pad(((0, 23), (0, 23)))
                self.pad1 = P.Pad(((0, 7), (0, 7)))
                self.slice = P.Slice()
                self.add = P.TensorAdd()
            else:
                self.matrix_G = Parameter(Tensor(
                    np.eye(out_channels).astype(np.float32)),
                                          name="matrix_G",
                                          requires_grad=False)
                self.abs = P.Abs()
                self.reduce_max = P.ReduceMax(keep_dims=False)
                self.neg = P.Neg()
                self.reduce_sum = P.ReduceSum()
            self.matmul = P.MatMul(transpose_b=True)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.cast = P.Cast()
            self.is_nsp_layer = (out_channels == 2)
        else:
            self.is_Ascend = False
            self.matrix_G = Parameter(Tensor(
                np.eye(out_channels).astype(np.float32)),
                                      name="matrix_G",
                                      requires_grad=False)
            self.cube_matmul = P.MatMul(transpose_a=True)
        self.getG = P.InsertGradientOf(self.save_gradient)
예제 #7
0
        'skip': ['backward']
    }),
    # shape not match
    ('AddN3', {
        'block': (P.AddN(), {
            'exception': ValueError,
            'error_keywords': ['AddN']
        }),
        'desc_inputs': [(Tensor(np.ones([2, 3]).astype(np.int32)),
                         Tensor(np.ones([3, 2]).astype(np.int32)))],
        'skip': ['backward']
    }),

    # input is Tensor(bool)
    ('Neg1', {
        'block': (P.Neg(), {
            'exception': TypeError,
            'error_keywords': ['Neg']
        }),
        'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))],
        'skip': ['backward']
    }),

    # input two tensors, their shapes do not match
    ('Sub2', {
        'block': (P.Sub(), {
            'exception': ValueError,
            'error_keywords': ['Sub']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 5]).astype(np.float32)),
예제 #8
0
 def __init__(self, funcs):
     super(SwitchNegNet, self).__init__()
     self.funcs = funcs
     self.op = P.Neg()
예제 #9
0
 def __init__(self):
     super(Net, self).__init__()
     self.ops = P.Neg()