예제 #1
0
    def __init__(self, compute_type=mstype.float32):
        super(Mod, self).__init__()
        self.compute_type = compute_type

        self.floor_div = P.FloorDiv()
        self.sub = P.Sub()
        self.multiply = P.Mul()
 def __init__(self, mul_size, test_size, strategy=None, strategy2=None):
     super().__init__()
     mul_np = np.full(mul_size, 0.5, dtype=np.float32)
     floordiv_np = np.full(test_size, 0.1, dtype=np.float32)
     self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
     self.floordiv_weight = Parameter(Tensor(floordiv_np),
                                      name="floordiv_weight")
     self.mul = TwoInputBprop()
     self.floor_div = P.FloorDiv()
     if strategy is not None:
         self.mul.op.shard(strategy2)
         self.floor_div.shard(strategy)
예제 #3
0
    def __init__(self,
                 batch_size,
                 seq_length,
                 vocab_size,
                 decoder,
                 beam_width=4,
                 length_penalty_weight=1.0,
                 max_decode_length=128,
                 sos_id=1,
                 eos_id=2,
                 compute_type=mstype.float32):
        super(BeamSearchDecoder, self).__init__(auto_prefix=False)
        self.seq_length = seq_length
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.beam_width = beam_width
        self.length_penalty_weight = length_penalty_weight
        self.max_decode_length = max_decode_length
        self.decoder = decoder

        self.add = P.TensorAdd()
        self.expand = P.ExpandDims()
        self.reshape = P.Reshape()
        self.shape_flat = (-1, )
        self.shape = P.Shape()

        self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]),
                                  mstype.float32)
        self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF),
                                  mstype.float32)

        self.select = P.Select()
        self.flat_shape = (batch_size, beam_width * vocab_size)
        self.topk = P.TopK(sorted=True)
        self.floor_div = P.FloorDiv()
        self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32)
        self.real_div = P.RealDiv()
        self.mod = Mod()
        self.equal = P.Equal()
        self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id),
                              mstype.int32)

        beam_ids = np.tile(
            np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1])
        self.beam_ids = Tensor(beam_ids, mstype.int32)
        batch_ids = np.arange(batch_size * beam_width).reshape(
            (batch_size, beam_width)) // beam_width
        self.batch_ids = Tensor(batch_ids, mstype.int32)
        self.concat = P.Concat(axis=-1)
        self.gather_nd = P.GatherNd()

        self.greater_equal = P.GreaterEqual()
        self.sub = P.Sub()
        self.cast = P.Cast()
        self.zeroslike = P.ZerosLike()

        # init inputs and states
        self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id),
                                mstype.int32)
        self.init_seq = Tensor(np.full([batch_size, beam_width, 1], sos_id),
                               mstype.int32)
        init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]),
                              [batch_size, 1])
        self.init_scores = Tensor(init_scores, mstype.float32)
        self.init_finished = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.bool))
        self.init_length = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))
        self.length_penalty = LengthPenalty(weight=length_penalty_weight)
        self.one = Tensor(1, mstype.int32)
예제 #4
0
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().set_strategy(strategy1)
     self.floordiv = P.FloorDiv().set_strategy(strategy2)
예제 #5
0
     'skip': ['backward']}),
 ('RealDiv_0', {
     'block': P.RealDiv(),
     'desc_const': [Tensor(2048.0), Tensor(0.0)],
     'desc_inputs': [],
     'skip': ['backward']}),
 ('RealDiv', {
     'block': P.RealDiv(),
     'desc_inputs': [[4], Tensor(np.ones(4).astype(np.float32))],
     'desc_bprop': [[4]]}),
 ('RealDiv_1', {
     'block': P.RealDiv(),
     'desc_inputs': [[512, 1024], [512, 1024]],
     'desc_bprop': [[512, 1024]]}),
 ('FloorDiv', {
     'block': P.FloorDiv(),
     'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)),
                     Tensor(np.random.rand(4).astype(np.float16))],
     'skip': ['backward']}),
 ('FloorMod', {
     'block': P.FloorMod(),
     'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)),
                     Tensor(np.random.rand(4).astype(np.float16))],
     'skip': ['backward']}),
 ('identity', {
     'block': ops.functional.identity,
     'desc_inputs': [[2, 2]],
     'skip': ['backward']}),
 ('MatMul_1', {
     'block': P.MatMul(transpose_a=False, transpose_b=False),
     'desc_inputs': [[1024, 160], [160, 1024]],
예제 #6
0
    def __init__(self,
                 batch_size,
                 seq_length,
                 vocab_size,
                 decoder,
                 beam_width=4,
                 decoder_layers_nums=4,
                 length_penalty_weight=0.6,
                 cov_penalty_factor=0.1,
                 hidden_size=1024,
                 max_decode_length=64,
                 sos_id=2,
                 eos_id=3,
                 compute_type=mstype.float32):
        super(BeamSearchDecoder, self).__init__()

        self.encoder_length = seq_length
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.beam_width = beam_width
        self.decoder_layers_nums = decoder_layers_nums
        self.length_penalty_weight = length_penalty_weight
        self.cov_penalty_factor = cov_penalty_factor
        self.max_decode_length = max_decode_length
        self.decoder = decoder

        self.add = P.TensorAdd()
        self.expand = P.ExpandDims()
        self.reshape = P.Reshape()
        self.shape_flat = (-1,)
        self.shape = P.Shape()

        self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]), mstype.float32)
        self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF), mstype.float32)

        self.select = P.Select()
        self.flat_shape = (batch_size, beam_width * vocab_size)
        self.topk = P.TopK(sorted=True)
        self.floor_div = P.FloorDiv()
        self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32)
        self.real_div = P.RealDiv()
        self.mod = Mod()
        self.equal = P.Equal()
        self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id), mstype.int32)

        beam_ids = np.tile(np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1])
        self.beam_ids = Tensor(beam_ids, mstype.int32)

        batch_ids = np.arange(batch_size * beam_width).reshape((batch_size, beam_width)) // beam_width
        self.batch_ids = Tensor(batch_ids, mstype.int32)

        self.concat = P.Concat(axis=-1)
        self.gather_nd = P.GatherNd()

        self.start = Tensor(0, dtype=mstype.int32)
        self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id), mstype.int32)
        self.init_seq = Tensor(np.full([batch_size, beam_width, self.max_decode_length], sos_id), mstype.int32)

        init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]), [batch_size, 1])
        self.init_scores = Tensor(init_scores, mstype.float32)
        self.init_finished = Tensor(np.zeros([batch_size, beam_width], dtype=np.bool))
        self.init_length = Tensor(np.zeros([batch_size, beam_width], dtype=np.int32))

        self.length_penalty = LengthPenalty(weight=length_penalty_weight)

        self.one = Tensor(1, mstype.int32)
        self.prob_concat = P.Concat(axis=1)
        self.cast = P.Cast()
        self.decoder_hidden_state = Tensor(np.zeros([self.decoder_layers_nums, 2,
                                                     self.batch_size * self.beam_width,
                                                     hidden_size]), mstype.float32)

        self.zeros_scores = Tensor(np.zeros([batch_size, beam_width], dtype=np.float))
        self.active_index = Tensor(np.ones([batch_size, beam_width], dtype=np.int32))
        self.init_zeros = Tensor(np.zeros([batch_size, beam_width], dtype=np.int32))
        self.init_ones = Tensor(np.ones([batch_size, beam_width], dtype=np.float32))

        self.accu_attn_scores = Tensor(np.zeros([batch_size, beam_width, self.encoder_length], dtype=np.float32))

        self.zeros = Tensor([0], mstype.int32)
        self.eos_tensor = Tensor(np.full([batch_size, beam_width, beam_width], eos_id), mstype.int32)

        self.ones_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], 1), mstype.float32)
        self.neg_inf_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], -INF), mstype.float32)
        self.zeros_3d = Tensor(np.full([batch_size, beam_width, self.encoder_length], 0), mstype.float32)
        self.zeros_2d = Tensor(np.full([batch_size * beam_width, self.encoder_length], 0), mstype.int32)
        self.argmin = P.ArgMinWithValue(axis=1)
        self.reducesum = P.ReduceSum()
        self.div = P.Div()
        self.shape_op = P.Shape()
        self.mul = P.Mul()
        self.log = P.Log()
        self.less = P.Less()
        self.tile = P.Tile()
        self.noteq = P.Neg()
        self.zeroslike = P.ZerosLike()
        self.greater_equal = P.GreaterEqual()
        self.sub = P.Sub()
 def __init__(self):
     super().__init__()
     self.matmul = P.MatMul()
     self.floordiv = P.FloorDiv()
예제 #8
0
        'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))],
        'skip': ['backward']}),
    # input two tensors, but element types are not same
    ('Div1', {
        'block': (P.Div(), {'exception': TypeError, 'error_keywords': ['Div']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))],
        'skip': ['backward']}),
    # input two tensors, their shapes do not match
    ('Div2', {
        'block': (P.Div(), {'exception': ValueError, 'error_keywords': ['Div']}),
        'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))],
        'skip': ['backward']}),

    # one input is scalar, and another is Tensor(float32)
    ('FloorDiv0', {
        'block': (P.FloorDiv(), {'exception': TypeError, 'error_keywords': ['FloorDiv']}),
        'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))],
        'skip': ['backward']}),
    # input two tensors, but element types are not same
    ('FloorDiv1', {
        'block': (P.FloorDiv(), {'exception': TypeError, 'error_keywords': ['FloorDiv']}),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))],
        'skip': ['backward']}),
    # input two tensors, their shapes do not match
    ('FloorDiv2', {
        'block': (P.FloorDiv(), {'exception': ValueError, 'error_keywords': ['FloorDiv']}),
        'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))],
        'skip': ['backward']}),

    # input x is Tensor(int32), not Tensor(float)
    ('Floor1', {
예제 #9
0
    # input two tensors, their shapes do not match
    ('Div2', {
        'block': (P.Div(), {
            'exception': ValueError,
            'error_keywords': ['Div']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 5]).astype(np.float32)),
            Tensor(np.ones([3, 4]).astype(np.float32))
        ],
        'skip': ['backward']
    }),

    # input two tensors, their shapes do not match
    ('FloorDiv2', {
        'block': (P.FloorDiv(), {
            'exception': ValueError,
            'error_keywords': ['FloorDiv']
        }),
        'desc_inputs': [
            Tensor(np.ones([3, 5]).astype(np.float32)),
            Tensor(np.ones([3, 4]).astype(np.float32))
        ],
        'skip': ['backward']
    }),

    # input x is Tensor(int32), not Tensor(float)
    ('Floor1', {
        'block': (P.Floor(), {
            'exception': TypeError,
            'error_keywords': ['Floor']
예제 #10
0
     'desc_inputs': [],
     'skip': ['backward']
 }),
 ('RealDiv', {
     'block': P.RealDiv(),
     'desc_inputs': [[4], Tensor(np.ones(4).astype(np.float32))],
     'desc_bprop': [[4]]
 }),
 ('RealDiv_1', {
     'block': P.RealDiv(),
     'desc_inputs': [[512, 1024], [512, 1024]],
     'desc_bprop': [[512, 1024]]
 }),
 ('FloorDiv', {
     'block':
     P.FloorDiv(),
     'desc_inputs': [
         Tensor(np.random.rand(4).astype(np.float16)),
         Tensor(np.random.rand(4).astype(np.float16))
     ],
     'skip': ['backward']
 }),
 ('FloorMod', {
     'block':
     P.FloorMod(),
     'desc_inputs': [
         Tensor(np.random.rand(4).astype(np.float16)),
         Tensor(np.random.rand(4).astype(np.float16))
     ],
     'skip': ['backward']
 }),
예제 #11
0
 def __init__(self):
     super(NetFloorDiv, self).__init__()
     self.floordiv = P.FloorDiv()
예제 #12
0
 def __init__(self):
     super(FloorDivNet, self).__init__()
     self.floor_div = P.FloorDiv()