def __init__(self, strategy0=None, strategy1=None):
     super(AddRelu, self).__init__()
     self.add = P.TensorAdd().set_strategy(strategy=strategy0)
     self.relu = P.ReLU().set_strategy(strategy=strategy1)
Esempio n. 2
0
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().set_strategy(strategy1)
     self.add = P.TensorAdd().set_strategy(strategy2)
     self.b = Tensor(0.9, ms.float32)
Esempio n. 3
0
 def __init__(self):
     super().__init__()
     self.op = P.TensorAdd()
Esempio n. 4
0
    def __init__(self,
                 batch_size,
                 from_tensor_width,
                 to_tensor_width,
                 from_seq_length,
                 to_seq_length,
                 num_attention_heads=1,
                 size_per_head=512,
                 query_act=None,
                 key_act=None,
                 value_act=None,
                 has_attention_mask=False,
                 attention_probs_dropout_prob=0.0,
                 use_one_hot_embeddings=False,
                 initializer_range=0.02,
                 do_return_2d_tensor=False,
                 use_relative_positions=False,
                 compute_type=mstype.float32):

        super(BertAttention, self).__init__()
        self.batch_size = batch_size
        self.from_seq_length = from_seq_length
        self.to_seq_length = to_seq_length
        self.num_attention_heads = num_attention_heads
        self.size_per_head = size_per_head
        self.has_attention_mask = has_attention_mask
        self.use_relative_positions = use_relative_positions

        self.scores_mul = 1.0 / math.sqrt(float(self.size_per_head))
        self.reshape = P.Reshape()
        self.shape_from_2d = (-1, from_tensor_width)
        self.shape_to_2d = (-1, to_tensor_width)
        weight = TruncatedNormal(initializer_range)
        units = num_attention_heads * size_per_head
        self.query_layer = Dense_Thor(in_channels=from_tensor_width,
                                      out_channels=units,
                                      weight_init=weight,
                                      has_bias=True,
                                      bias_init='zeros',
                                      damping=damping,
                                      loss_scale=loss_scale,
                                      frequency=frequency,
                                      activation=query_act,
                                      batch_size=batch_size).to_float(compute_type)
        self.key_layer = Dense_Thor(in_channels=to_tensor_width,
                                    out_channels=units,
                                    weight_init=weight,
                                    has_bias=True,
                                    bias_init='zeros',
                                    damping=damping,
                                    loss_scale=loss_scale,
                                    frequency=frequency,
                                    activation=key_act,
                                    batch_size=batch_size).to_float(compute_type)
        self.value_layer = Dense_Thor(in_channels=to_tensor_width,
                                      out_channels=units,
                                      weight_init=weight,
                                      has_bias=True,
                                      bias_init='zeros',
                                      damping=damping,
                                      loss_scale=loss_scale,
                                      frequency=frequency,
                                      activation=value_act,
                                      batch_size=batch_size).to_float(compute_type)
        self.shape_from = (batch_size, from_seq_length, num_attention_heads, size_per_head)
        self.shape_to = (
            batch_size, to_seq_length, num_attention_heads, size_per_head)

        self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.multiply = P.Mul()
        self.transpose = P.Transpose()
        self.trans_shape = (0, 2, 1, 3)
        self.trans_shape_relative = (2, 0, 1, 3)
        self.trans_shape_position = (1, 2, 0, 3)
        self.multiply_data = -10000.0
        self.batch_num = batch_size * num_attention_heads
        self.matmul = P.BatchMatMul()

        self.softmax = nn.Softmax()
        self.dropout = nn.Dropout(1 - attention_probs_dropout_prob)

        if self.has_attention_mask:
            self.expand_dims = P.ExpandDims()
            self.sub = P.Sub()
            self.add = P.TensorAdd()
            self.cast = P.Cast()
            self.get_dtype = P.DType()
        if do_return_2d_tensor:
            self.shape_return = (batch_size * from_seq_length, num_attention_heads * size_per_head)
        else:
            self.shape_return = (batch_size, from_seq_length, num_attention_heads * size_per_head)

        self.cast_compute_type = SaturateCast(dst_type=compute_type)
        if self.use_relative_positions:
            self._generate_relative_positions_embeddings = \
                RelaPosEmbeddingsGenerator(length=to_seq_length,
                                           depth=size_per_head,
                                           max_relative_position=16,
                                           initializer_range=initializer_range,
                                           use_one_hot_embeddings=use_one_hot_embeddings)
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.mul = P.Mul().set_strategy(strategy1)
     self.reduce_max = P.ReduceMax(keep_dims=False).set_strategy(strategy2)
     self.add = P.TensorAdd().set_strategy(strategy3)
Esempio n. 6
0
    def __init__(self,
                 batch_size,
                 seq_length,
                 vocab_size,
                 decoder,
                 beam_width=4,
                 decoder_layers_nums=4,
                 length_penalty_weight=0.6,
                 cov_penalty_factor=0.1,
                 hidden_size=1024,
                 max_decode_length=64,
                 sos_id=2,
                 eos_id=3,
                 is_using_while=True,
                 compute_type=mstype.float32):
        super(BeamSearchDecoder, self).__init__()

        self.encoder_length = seq_length
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.beam_width = beam_width
        self.decoder_layers_nums = decoder_layers_nums
        self.length_penalty_weight = length_penalty_weight
        self.cov_penalty_factor = cov_penalty_factor
        self.max_decode_length = max_decode_length
        self.decoder = decoder
        self.is_using_while = is_using_while

        self.add = P.TensorAdd()
        self.expand = P.ExpandDims()
        self.reshape = P.Reshape()
        self.shape_flat = (-1, )
        self.shape = P.Shape()

        self.zero_tensor = Tensor(np.zeros([batch_size, beam_width]),
                                  mstype.float32)
        self.ninf_tensor = Tensor(np.full([batch_size, beam_width], -INF),
                                  mstype.float32)

        self.select = P.Select()
        self.flat_shape = (batch_size, beam_width * vocab_size)
        self.topk = P.TopK(sorted=True)
        self.floor_div = P.FloorDiv()
        self.vocab_size_tensor = Tensor(self.vocab_size, mstype.int32)
        self.real_div = P.RealDiv()
        self.mod = Mod()
        self.equal = P.Equal()
        self.eos_ids = Tensor(np.full([batch_size, beam_width], eos_id),
                              mstype.int32)

        beam_ids = np.tile(
            np.arange(beam_width).reshape((1, beam_width)), [batch_size, 1])
        self.beam_ids = Tensor(beam_ids, mstype.int32)

        batch_ids = np.arange(batch_size * beam_width).reshape(
            (batch_size, beam_width)) // beam_width
        self.batch_ids = Tensor(batch_ids, mstype.int32)

        self.concat = P.Concat(axis=-1)
        self.gather_nd = P.GatherNd()

        self.start_ids = Tensor(np.full([batch_size * beam_width, 1], sos_id),
                                mstype.int32)
        if self.is_using_while:
            self.start = Tensor(0, dtype=mstype.int32)
            self.init_seq = Tensor(
                np.full([batch_size, beam_width, self.max_decode_length],
                        sos_id), mstype.int32)
        else:
            self.init_seq = Tensor(
                np.full([batch_size, beam_width, 1], sos_id), mstype.int32)

        init_scores = np.tile(np.array([[0.] + [-INF] * (beam_width - 1)]),
                              [batch_size, 1])
        self.init_scores = Tensor(init_scores, mstype.float32)
        self.init_finished = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.bool))
        self.init_length = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))

        self.length_penalty = LengthPenalty(weight=length_penalty_weight)

        self.one = Tensor(1, mstype.int32)
        self.prob_concat = P.Concat(axis=1)
        self.cast = P.Cast()
        self.decoder_hidden_state = Tensor(
            np.zeros([
                self.decoder_layers_nums, 2, self.batch_size * self.beam_width,
                hidden_size
            ]), mstype.float32)

        self.zeros_scores = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.float))
        self.active_index = Tensor(
            np.ones([batch_size, beam_width], dtype=np.int32))
        self.init_zeros = Tensor(
            np.zeros([batch_size, beam_width], dtype=np.int32))
        self.init_ones = Tensor(
            np.ones([batch_size, beam_width], dtype=np.float32))

        self.accu_attn_scores = Tensor(
            np.zeros([batch_size, beam_width, self.encoder_length],
                     dtype=np.float32))

        self.zeros = Tensor([0], mstype.int32)
        self.eos_tensor = Tensor(
            np.full([batch_size, beam_width, beam_width], eos_id),
            mstype.int32)

        self.ones_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], 1),
            mstype.float32)
        self.neg_inf_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], -INF),
            mstype.float32)
        self.zeros_3d = Tensor(
            np.full([batch_size, beam_width, self.encoder_length], 0),
            mstype.float32)
        self.zeros_2d = Tensor(
            np.full([batch_size * beam_width, self.encoder_length], 0),
            mstype.int32)
        self.argmin = P.ArgMinWithValue(axis=1)
        self.reducesum = P.ReduceSum()
        self.div = P.Div()
        self.shape_op = P.Shape()
        self.mul = P.Mul()
        self.log = P.Log()
        self.less = P.Less()
        self.tile = P.Tile()
        self.noteq = P.Neg()
        self.zeroslike = P.ZerosLike()
        self.greater_equal = P.GreaterEqual()
        self.sub = P.Sub()
Esempio n. 7
0
 def __init__(self):
     super(MathBinaryNet1, self).__init__()
     self.add = P.TensorAdd()
     self.mul = P.Mul()
     self.max = P.Maximum()
     self.number = 3
Esempio n. 8
0
 def __init__(self):
     super(Net1, self).__init__()
     self.add = P.TensorAdd()
Esempio n. 9
0
 def construct(self, x, y):
     add = P.TensorAdd()
     result = add(x, y)
     return result
Esempio n. 10
0
        return self.add(x, y)


test_case_math_ops = [
    ('Neg', {
        'block': P.Neg(),
        'desc_inputs': [[1, 3, 4, 4]],
        'desc_bprop': [[1, 3, 4, 4]]
    }),
    ('Sub', {
        'block': P.Sub(),
        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
        'desc_bprop': [[2, 3, 3, 5]]
    }),
    ('TensorAdd', {
        'block': P.TensorAdd(),
        'desc_inputs': [[3, 5], [2, 3, 3, 5]],
        'desc_bprop': [[2, 3, 3, 5]]
    }),
    ('Mul0', {
        'block': P.Mul(),
        'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]],
        'desc_bprop': [[2, 3, 3, 5]]
    }),
    ('Mul1', {
        'block': P.Mul(),
        'desc_inputs': [[2, 3, 1, 1], [2, 3, 3, 5]],
        'desc_bprop': [[2, 3, 3, 5]]
    }),
    ('Mul2', {
        'block': P.Mul(),
Esempio n. 11
0
 def __init__(self, ):
     super(SummaryNet, self).__init__()
     self.s = P.ScalarSummary()
     self.add = P.TensorAdd()
Esempio n. 12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bias=True,
                 activation=None):
        super(Dense_Thor, self).__init__()
        self.thor = True
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("Weight init shape error.")
        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        self.bias = None
        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("Bias init shape error.")
            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")
            self.bias_add = P.BiasAdd()

        self.matmul = P.MatMul(transpose_b=True)
        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A = Parameter(Tensor(
            np.zeros([in_channels, in_channels]).astype(np.float32)),
                                  name='matrix_A',
                                  requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.is_Ascend = True
        if context.get_context("device_target") == "Ascend":
            if out_channels == 1001:
                self.matrix_G = Parameter(Tensor(
                    np.zeros([1024, 1024]).astype(np.float32)),
                                          name='matrix_G',
                                          requires_grad=False)
                self.pad = P.Pad(((0, 23), (0, 23)))
                self.pad1 = P.Pad(((0, 7), (0, 7)))
                self.slice = P.Slice()
                self.add = P.TensorAdd()
            else:
                self.matrix_G = Parameter(Tensor(
                    np.eye(out_channels).astype(np.float32)),
                                          name="matrix_G",
                                          requires_grad=False)
                self.abs = P.Abs()
                self.reduce_max = P.ReduceMax(keep_dims=False)
                self.neg = P.Neg()
                self.reduce_sum = P.ReduceSum()
            self.matmul = P.MatMul(transpose_b=True)
            self.cube_matmul = P.CusMatMulCube(transpose_a=True)
            self.cast = P.Cast()
            self.is_nsp_layer = (out_channels == 2)
        else:
            self.is_Ascend = False
            self.matrix_G = Parameter(Tensor(
                np.eye(out_channels).astype(np.float32)),
                                      name="matrix_G",
                                      requires_grad=False)
            self.cube_matmul = P.MatMul(transpose_a=True)
        self.getG = P.InsertGradientOf(self.save_gradient)
Esempio n. 13
0
 def __init__(self):
     super().__init__()
     self.matmul = P.MatMul()
     self.add = P.TensorAdd()
Esempio n. 14
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 rate,
                 stride=1,
                 num=0,
                 index=None):
        super(ResidualBlock, self).__init__()

        channel = out_channel // self.expansion
        prune_channel = math.ceil(channel * rate)
        self.conv1 = _conv1x1(in_channel, prune_channel, stride=1)
        self.bn1 = _bn(prune_channel)

        self.conv2 = _conv3x3(prune_channel, prune_channel, stride=stride)
        self.bn2 = _bn(prune_channel)

        self.conv3 = _conv1x1(prune_channel,
                              math.ceil(channel * rate * self.expansion),
                              stride=1)
        self.bn3 = _bn_last(math.ceil(channel * rate * self.expansion))

        self.relu = nn.ReLU()

        self.down_sample = False

        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        self.down_sample_layer = None

        if self.down_sample:
            self.down_sample_layer = nn.SequentialCell(
                [_conv1x1(in_channel, out_channel, stride),
                 _bn(out_channel)])
        self.add = P.TensorAdd()

        self.op = P.ScatterNd()
        self.transpose = P.Transpose()
        self.idx = None
        self.shape = None
        if out_channel == 256:
            self.shape = (out_channel, 1, 56, 56)
            if num == 0:
                self.idx = index[0]
            elif num == 1:
                self.idx = index[1]
            elif num == 2:
                self.idx = index[2]
        elif out_channel == 512:
            self.shape = (out_channel, 1, 28, 28)
            if num == 0:
                self.idx = index[3]
            elif num == 1:
                self.idx = index[4]
            elif num == 2:
                self.idx = index[5]
            elif num == 3:
                self.idx = index[6]
        elif out_channel == 1024:
            self.shape = (out_channel, 1, 14, 14)
            if num == 0:
                self.idx = index[7]
            elif num == 1:
                self.idx = index[8]
            elif num == 2:
                self.idx = index[9]
            elif num == 3:
                self.idx = index[10]
            elif num == 4:
                self.idx = index[11]
            elif num == 5:
                self.idx = index[12]
        elif out_channel == 2048:
            self.shape = (out_channel, 1, 7, 7)
            if num == 0:
                self.idx = index[13]
            elif num == 1:
                self.idx = index[14]
            elif num == 2:
                self.idx = index[15]
Esempio n. 15
0
 def __init__(self):
     super(SecondNet, self).__init__()
     self.add = P.TensorAdd()
Esempio n. 16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 down_sample=False,
                 momentum=0.1,
                 training=False,
                 weights_update=False):
        super(ResidualBlockUsing, self).__init__()

        self.affine = weights_update

        out_chls = out_channels // self.expansion
        self.conv1 = _conv(in_channels,
                           out_chls,
                           kernel_size=1,
                           stride=1,
                           padding=0)
        self.bn1 = _BatchNorm2dInit(out_chls,
                                    momentum=momentum,
                                    affine=self.affine,
                                    use_batch_statistics=training)

        self.conv2 = _conv(out_chls,
                           out_chls,
                           kernel_size=3,
                           stride=stride,
                           padding=1)
        self.bn2 = _BatchNorm2dInit(out_chls,
                                    momentum=momentum,
                                    affine=self.affine,
                                    use_batch_statistics=training)

        self.conv3 = _conv(out_chls,
                           out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0)
        self.bn3 = _BatchNorm2dInit(out_channels,
                                    momentum=momentum,
                                    affine=self.affine,
                                    use_batch_statistics=training)

        if training:
            self.bn1 = self.bn1.set_train()
            self.bn2 = self.bn2.set_train()
            self.bn3 = self.bn3.set_train()

        if not weights_update:
            self.conv1.weight.requires_grad = False
            self.conv2.weight.requires_grad = False
            self.conv3.weight.requires_grad = False

        self.relu = P.ReLU()
        self.downsample = down_sample
        if self.downsample:
            self.conv_down_sample = _conv(in_channels,
                                          out_channels,
                                          kernel_size=1,
                                          stride=stride,
                                          padding=0)
            self.bn_down_sample = _BatchNorm2dInit(
                out_channels,
                momentum=momentum,
                affine=self.affine,
                use_batch_statistics=training)
            if training:
                self.bn_down_sample = self.bn_down_sample.set_train()
            if not weights_update:
                self.conv_down_sample.weight.requires_grad = False
        self.add = P.TensorAdd()
Esempio n. 17
0
 def __init__(self, dropout_prob=0.1):
     super(ResidualConnection, self).__init__()
     self.add = P.TensorAdd()
     self.dropout = nn.Dropout(1.0 - dropout_prob)
Esempio n. 18
0
    def __init__(self, args, strategy):
        super(SemiAutoOneHotNet, self).__init__()
        self.a = args.a
        self.b = args.b
        self.c = args.c
        self.d = args.d
        self.e = args.e
        self.cast = P.Cast()
        self.cast.set_strategy(strategy=strategy.twod_strategy)
        self.cast1 = P.Cast()
        self.cast1.set_strategy(strategy=strategy.twod_strategy)
        self.cast2 = P.Cast()
        self.cast2.set_strategy(strategy=strategy.twod_strategy)
        self.cast3 = P.Cast()
        self.cast3.set_strategy(strategy=strategy.scalar_strategy)
        self.cast4 = P.Cast()
        self.cast4.set_strategy(strategy=strategy.scalar_strategy)
        self.a_const = Tensor(self.a, dtype=mstype.float32)
        self.b_const = Tensor(self.b, dtype=mstype.float32)
        self.c_const = Tensor(self.c, dtype=mstype.float32)
        self.d_const = Tensor(self.d, dtype=mstype.float32)
        self.e_const = Tensor(self.e, dtype=mstype.float32)
        self.m_const_zero = Tensor(0, dtype=mstype.float32)
        self.a_const_one = Tensor(1, dtype=mstype.float32)
        self.onehot = P.OneHot()
        self.onehot.set_strategy(strategy=strategy.onehot_strategy)
        self.exp = P.Exp()
        self.exp.set_strategy(strategy=strategy.twod_strategy)
        self.exp2 = P.Exp()
        self.exp2.set_strategy(strategy=strategy.twod_strategy)
        self.exp3 = P.Exp()
        self.exp3.set_strategy(strategy=strategy.twod_strategy)
        self.mul_const = P.Mul()
        self.mul_const.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const2 = P.TensorAdd()
        self.mul_const2.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const3 = P.Sub()
        self.mul_const3.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul_const4 = P.Sub()
        self.mul_const4.set_strategy(strategy=strategy.scalar_twod_strategy)
        self.mul_const5 = P.Mul()
        self.mul_const5.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul = P.Mul()
        self.mul.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul2 = P.Mul()
        self.mul2.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul3 = P.TensorAdd()
        self.mul3.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul4 = P.Sub()
        self.mul4.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul5 = P.RealDiv()
        self.mul5.set_strategy(strategy=strategy.twod_twodbc_strategy)
        self.mul6 = P.Mul()
        self.mul6.set_strategy(strategy=strategy.twod_twod_strategy)
        self.mul7 = P.Mul()
        self.mul7.set_strategy(strategy=strategy.twod_scalar_strategy)
        self.mul8 = P.RealDiv()
        self.mul8.set_strategy(strategy=strategy.scalar_scalar_strategy)
        self.mul9 = P.TensorAdd()
        self.mul9.set_strategy(strategy=strategy.twod_scalar_strategy)

        self.reduce_max = P.ReduceMax(keep_dims=True)
        self.reduce_max.set_strategy(strategy=strategy.twod_strategy)

        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.reduce_sum.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_2 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_2.set_strategy(strategy=strategy.twod_strategy)
        self.reduce_sum_3 = P.ReduceSum(keep_dims=False)
        self.reduce_sum_3.set_strategy(strategy=strategy.oned_strategy)

        self.reshape = P.Reshape()
        self.log = P.Log()
        self.log.set_strategy(strategy=strategy.twod_strategy)

        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.normalize = P.L2Normalize(axis=1)
        self.normalize.set_strategy(strategy=strategy.twod_strategy_m)
        self.normalize2 = P.L2Normalize(axis=1)
        self.normalize2.set_strategy(strategy=strategy.twod_strategy_m)
        self.fc = P.MatMul(transpose_b=True)
        self.fc.set_strategy(strategy=strategy.twodbc_twod_strategy)
        weight_shape = [args.num_classes, args.emb_size]
        weight_np = np.zeros(weight_shape, np.float32)
        self.weight = Parameter(Tensor(weight_np),
                                name='model_parallel_weight')
Esempio n. 19
0
 def __init__(self):
     super(SecondNet, self).__init__()
     self.addN = P.AddN()
     self.max = P.Maximum()
     self.add = P.TensorAdd()
Esempio n. 20
0
 def __init__(self, in_features, out_features):
     super(Net, self).__init__()
     self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight")
     self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias")
     self.matmul = P.MatMul()
     self.add = P.TensorAdd()
 def __init__(self, target_shape, strategy0=None, strategy1=None):
     super(Reshape, self).__init__()
     self.add = P.TensorAdd(strategy=strategy0)
     self.reshape = P.Reshape(strategy=strategy1)
     self.shape = tuple(target_shape)
Esempio n. 22
0
# ============================================================================
""" test_hypermap """
import numpy as np

from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import Primitive
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from ...ut_filter import non_graph_engine

# pylint: disable=W0613
# W0613: unused-argument

tensor_add = P.TensorAdd()
scala_add = Primitive('scalar_add')
add = C.MultitypeFuncGraph('add')


@add.register("Number", "Number")
def add_scala(x, y):
    return scala_add(x, y)


@add.register("Tensor", "Tensor")
def add_tensor(x, y):
    return tensor_add(x, y)


hyper_add = C.HyperMap(add)
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P

tuple_getitem = Primitive('tuple_getitem')
add = P.TensorAdd()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
transdata = Primitive("TransData")
Transpose = P.Transpose()


class FnDict:
    def __init__(self):
        self.fnDict = {}

    def __call__(self, fn):
        self.fnDict[fn.__name__] = fn

    def __getitem__(self, name):
        return self.fnDict[name]
Esempio n. 24
0
 def __init__(self, axis=0, epsilon=1e-4, strategy0=None, strategy1=None):
     super(L2normalize, self).__init__()
     self.add = P.TensorAdd(strategy=strategy0)
     self.l2norm = P.L2Normalize(axis, epsilon, strategy1)
Esempio n. 25
0
    def __init__(self,
                 vocab_size,
                 embedding_size,
                 field_size,
                 param_init='normal',
                 target='CPU',
                 slice_mode='batch_slice',
                 feature_num_list=None,
                 max_norm=None,
                 sparse=True,
                 operator='SUM'):
        super(MultiFieldEmbeddingLookup,
              self).__init__(vocab_size, embedding_size, param_init, target,
                             slice_mode, feature_num_list, max_norm, sparse)
        self.field_size = validator.check_positive_int(field_size,
                                                       'field_size')
        self.operator = operator

        self.mul = P.Mul()
        self.inf_mask_mul = P.Mul()
        self.bias_add = P.TensorAdd()
        self.inf_add = P.TensorAdd()
        self.merge_op = None
        self.count_op = P.UnsortedSegmentSum()
        self.abs = P.Abs()
        self.equal = P.Equal()
        self.add = P.TensorAdd()
        self.cast = P.Cast()
        self.div_no_nan = P.DivNoNan()
        self.expand = P.ExpandDims()
        self.max_mask_mul = P.Mul()
        self.max_no_equal = P.NotEqual()

        if operator == MultiFieldEmbeddingLookup.OPERATOR_SUM:
            self.merge_op = P.UnsortedSegmentSum()
        elif operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
            self.merge_op = P.UnsortedSegmentMax()
        elif operator == MultiFieldEmbeddingLookup.OPERATOR_MEAN:
            self.merge_op = P.UnsortedSegmentSum()
        else:
            raise ValueError(
                "The operator supports ['SUM', 'MAX', 'MEAN'], but found: " +
                str(operator))

        parallel_mode = _get_parallel_mode()
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        if slice_mode in ["table_row_slice", "batch_slice"
                          ] and is_auto_parallel:
            self.merge_op.shard(
                ((get_group_size(), 1, 1), (get_group_size(), 1)))
            self.expand.shard(((get_group_size(), ), ))
            self.bias_add.shard(((1, 1), (1, 1)))
            self.mul.shard(
                ((get_group_size(), 1, 1), (get_group_size(), 1, 1)))
            self.count_op.shard(((get_group_size(), 1), (get_group_size(), 1)))
            self.add.shard(((get_group_size(), ), (get_group_size(), )))
            self.div_no_nan.shard(
                ((get_group_size(), 1), (get_group_size(), 1)))
            self.max_mask_mul.shard(
                ((get_group_size(), 1), (get_group_size(), 1)))
            self.max_no_equal.shard(((1, ), ()))
            if operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
                self.equal.shard(((get_group_size(), 1, 1), ()))
                self.inf_mask_mul.shard(((get_group_size(), 1, 1), ()))
                self.merge_op.shard(
                    ((get_group_size(), 1), (get_group_size(), )))
                self.count_op.shard(
                    ((get_group_size(), ), (get_group_size(), )))
                self.inf_add.shard(
                    ((get_group_size(), 1, 1), (get_group_size(), 1, 1)))
        elif slice_mode == "table_column_slice" and is_auto_parallel:
            self.merge_op.shard(((1, 1, get_group_size()), (1, 1)))
            self.div_no_nan.shard(((1, get_group_size()), (1, 1)))
            self.bias_add.shard(((1, 1), (1, 1)))
            self.mul.shard(((1, 1, 1), (1, 1, get_group_size())))
            self.count_op.shard(((1, 1), (1, 1)))
            self.add.shard(((1, ), (1, )))
            self.max_mask_mul.shard(((1, get_group_size()), (1, 1)))
            self.expand.shard(((1, ), ))
            self.max_no_equal.shard(((1, ), ()))
            if operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
                self.equal.shard(((1, 1, 1), ()))
                self.inf_mask_mul.shard(((1, 1, 1), ()))
                self.merge_op.shard(((1, get_group_size()), (1, )))
                self.count_op.shard(((1, ), (1, )))
                self.inf_add.shard(((1, 1, get_group_size()), (1, 1, 1)))
        else:
            if is_auto_parallel:
                raise ValueError(
                    "slice_mode should be  ['table_row_slice', 'batch_slice' and \
                       'table_column_slice'], but get " + str(slice_mode))

        # Min value for fp32
        self.negative_inf_value = -3.402823466E+38
Esempio n. 26
0
 def __init__(self):
     super(MultiOutNet, self).__init__()
     self.add = P.TensorAdd()
     self.mul = P.Mul()
     self.sum = P.ReduceSum()
Esempio n. 27
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32.0,
                 has_bias=True,
                 activation=None):
        super(Dense_ThorNoBN, self).__init__()
        self.batch_size = batch_size
        self.in_channels = check_int_positive(in_channels)
        self.out_channels = check_int_positive(out_channels)
        self.has_bias = check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
                    weight_init.shape()[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init,
                                            [out_channels, in_channels]),
                                name="weight")

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.dim() != 1 or bias_init.shape(
                )[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]),
                                  name="bias")

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None

        self.matrix_A_inv = Parameter(Tensor(
            np.zeros([128, 128, 16, 16]).astype(np.float16)),
                                      name='matrix_A_inv',
                                      requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(
            np.zeros([63, 63, 16, 16]).astype(np.float16)),
                                      name="matrix_G_inv",
                                      requires_grad=False)
        self.fake_G = Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16))

        self.matmul = P.MatMul(transpose_b=True)
        self.cube_matmul = P.CusMatMulCube(transpose_a=True)
        self.matrix_combine = P.CusMatrixCombine()
        self.cholesky = P.CusCholeskyTrsm()
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.cov_step = Parameter(initializer(0, [1], mstype.int32),
                                  name="cov_step",
                                  requires_grad=False)
        self.mul = P.Mul()
        self.cast = P.Cast()
        self.damping = Tensor(damping)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.vector_matmul = P.CusBatchMatMul()
        self.pad = P.Pad(((0, 24), (0, 24)))
        self.pad1 = P.Pad(((0, 8), (0, 8)))
        self.slice = P.Slice()
        self.gather = P.GatherV2()
        self.assignadd = P.AssignAdd()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.A_inv_max = Parameter(initializer(0, [1], mstype.float32),
                                   name="A_inv_max",
                                   requires_grad=False)
        self.G_inv_max = Parameter(initializer(0, [1], mstype.float32),
                                   name="G_inv_max",
                                   requires_grad=False)
        self.fused_abs_max1 = P.CusFusedAbsMax1([1000, 1000])
        self.fused_abs_max2 = P.CusFusedAbsMax1()
        self.log = P.Log()
        self.exp = P.Exp()
        self.dampingA = Tensor(np.identity(2048), mstype.float32)
        self.dampingG = Tensor(np.identity(1024), mstype.float32)
        self.add = P.TensorAdd()
        self.sqrt = P.Sqrt()
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.fake_G_inv_max = Tensor(np.zeros([
            1,
        ]).astype(np.float32))

        self.norm = P.L2Normalize(axis=1)
        self.multiplier = Parameter(Tensor(np.ones([1]), dtype=mstype.float16),
                                    requires_grad=True,
                                    name='multiplier')
Esempio n. 28
0
 def __init__(self):
     super(FirstNet, self).__init__()
     self.net = SecondNet().add_flags_recursive(fp16=True)
     self.add = P.TensorAdd()
Esempio n. 29
0
 def __init__(self, strategy1, strategy2):
     super().__init__()
     self.matmul = P.MatMul().shard(strategy1)
     self.add = P.TensorAdd().shard(strategy2)
Esempio n. 30
0
 def __init__(self, ):
     super(SummaryDemo, self).__init__()
     self.s = P.ScalarSummary()
     self.histogram_summary = P.HistogramSummary()
     self.add = P.TensorAdd()