Example #1
0
    def __init__(self, flat_dim, fc_dim, attri_num_list):
        super(AttriHead, self).__init__()
        self.fc1 = fc_with_initialize(flat_dim, fc_dim)
        self.fc1_relu = P.ReLU()
        self.fc1_bn = nn.BatchNorm1d(fc_dim, affine=False)
        self.attri_fc1 = fc_with_initialize(fc_dim, attri_num_list[0])
        self.attri_fc1_relu = P.ReLU()
        self.attri_bn1 = nn.BatchNorm1d(attri_num_list[0], affine=False)
        self.softmax1 = P.Softmax()

        self.fc2 = fc_with_initialize(flat_dim, fc_dim)
        self.fc2_relu = P.ReLU()
        self.fc2_bn = nn.BatchNorm1d(fc_dim, affine=False)
        self.attri_fc2 = fc_with_initialize(fc_dim, attri_num_list[1])
        self.attri_fc2_relu = P.ReLU()
        self.attri_bn2 = nn.BatchNorm1d(attri_num_list[1], affine=False)
        self.softmax2 = P.Softmax()

        self.fc3 = fc_with_initialize(flat_dim, fc_dim)
        self.fc3_relu = P.ReLU()
        self.fc3_bn = nn.BatchNorm1d(fc_dim, affine=False)
        self.attri_fc3 = fc_with_initialize(fc_dim, attri_num_list[2])
        self.attri_fc3_relu = P.ReLU()
        self.attri_bn3 = nn.BatchNorm1d(attri_num_list[2], affine=False)
        self.softmax3 = P.Softmax()
    def _sample_from_distribution(self,distribution:Tensor):
        """
        sample one token per batch from self.sample_function(). sample function may varies due to different type of Device target.

        Arg:
            distribution (Tensor): (batch_size,vocab_length) distribution or logits of the last token of different batches.
        
        Return:
            word_index (Tensor): (batch_size,)

        """
        # reshape if Ascend
        if self.device_target == "Ascend":
            distribution = self.reshape(distribution, (self.vocab_size, self.batch_size))
            topk_distribution = distribution[:self.topk_num, ::]
            topk_distribution = self.reshape(topk_distribution, (self.batch_size, -1))
            
            word_index = self.sample_function(P.Softmax()(topk_distribution), 1 , 1)
            word_index = self.reshape(word_index,(-1,))
            
            # GPU
        elif self.device_target == "GPU":
            word_index = self.sample_function(P.Softmax()(distribution),1)

        else:
            raise ValueError("Device type {} not supported yet.".format(self.device_target))

        return word_index
    def generate_one_step(self,
                          input_ids: Tensor,
                          input_mask: Tensor,
                          beam_size=1):
        """
        generate next token for only one step, use softmax to regularize logits

        Arg:
            input_ids (Tensor): (batch_size,seq_length) ids of input text
            input_mask (Tensor): (batch_size,seq_length) mask of input text, 0 for mask, 1 for reserved
            beam_size (int): int, beam_size for each candidate text
        
        Return:
            topk_indices (Tensor): (batch_size,beam_size), topk (k = beam_size) num of the next token indices for each batch 
            topk_logits (Tensor): (batch_size,beam_size), topk (k = beam_size) num of the next token logits(distribution) for each batch 
        """
        logits = self.decoder.predict(input_ids, input_mask)
        last_token_pos_recorder = LastTokenPos(input_mask)
        last_token_pos_list = last_token_pos_recorder.get_pos(shift=0)
        return_last_logits = extract_single_token_logits(
            logits, last_token_pos_list)  #(batch_size,1,vocab_size)
        return_last_logits = P.Reshape()(
            return_last_logits,
            (self.batch_size, self.vocab_size))  #(batch_size,vocab_size)
        return_last_logits = P.Softmax()(return_last_logits)
        topk_logits, topk_indices = P.TopK(sorted=True)(
            return_last_logits, beam_size)  #(batch_size,beam_size)
        return topk_indices, topk_logits
Example #4
0
    def __init__(self, dim, n_heads):
        super().__init__()

        # h
        self.n_heads = n_heads

        # v = V / h
        self.size_per_head = dim // n_heads
        scores_mul = 1.0 / np.sqrt(float(self.size_per_head))
        self.scores_mul = ms.Tensor(scores_mul, ms.float32)

        self.exones = P.Ones()((1, 1, n_heads, 1, 1), ms.int32)

        # shape = (h, v)
        self.reshape_tail = (self.n_heads, self.size_per_head)

        self.output = Dense(dim, dim, has_bias=False)

        self.mul = P.Mul()
        self.div = P.Div()
        self.softmax = P.Softmax()
        self.bmm = P.BatchMatMul()
        self.bmmt = P.BatchMatMul(transpose_b=True)
        self.squeeze = P.Squeeze(-2)
        self.reducesum = P.ReduceSum(keep_dims=True)

        self.transpose = P.Transpose()
        self.trans_shape = (0, 1, 3, 2, 4)
Example #5
0
 def __init__(self,
              input_channel=1280,
              num_classes=1000,
              has_dropout=False,
              activation="None"):
     super(MobileNetV2Head, self).__init__()
     # mobilenet head
     head = ([
         GlobalAvgPooling(),
         LastQuantLayer(
             input_channel, num_classes, has_bias=True, has_bn=False)
     ] if not has_dropout else [
         GlobalAvgPooling(),
         nn.Dropout(0.2),
         LastQuantLayer(
             input_channel, num_classes, has_bias=True, has_bn=False)
     ])
     self.head = nn.SequentialCell(head)
     self.need_activation = True
     if activation == "Sigmoid":
         self.activation = P.Sigmoid()
     elif activation == "Softmax":
         self.activation = P.Softmax()
     else:
         self.need_activation = False
     self._initialize_weights()
Example #6
0
 def softmax_relu_pass():
     x = AnyPattern()
     softmax_pattern = IsPrimTypeOf(P.Softmax())
     pattern = CallWith(softmax_pattern, inputs=[x])
     relu_pattern = IsPrimTypeOf(P.ReLU(), should_replace=False)
     target = CallWith(relu_pattern, inputs=[x])
     return pattern, target
 def __init__(self):
     super(NetSoftmax1, self).__init__()
     self.softmax = P.Softmax(axis=-2)
     x = Tensor(
         np.array([[0.1, 0.3, 0.6], [0.2, -0.6, 0.8],
                   [0.6, 1, 0.4]]).astype(np.float32))
     self.x = Parameter(initializer(x, x.shape), name='x')
Example #8
0
    def construct(self, inputs):

        f1, f2, f3 = self.base(inputs)
        f1, f2, f3 = self.fpn(f1, f2, f3)

        # SSH
        f1 = self.ssh1(f1)
        f2 = self.ssh2(f2)
        f3 = self.ssh3(f3)
        features = [f1, f2, f3]

        bbox = ()
        for i, feature in enumerate(features):
            bbox = bbox + (self.BboxHead[i](feature), )
        bbox_regressions = self.cat(bbox)

        cls = ()
        for i, feature in enumerate(features):
            cls = cls + (self.ClassHead[i](feature), )
        classifications = self.cat(cls)

        landm = ()
        for i, feature in enumerate(features):
            landm = landm + (self.LandmarkHead[i](feature), )
        ldm_regressions = self.cat(landm)

        if self.phase == 'train':
            output = (bbox_regressions, classifications, ldm_regressions)
        else:
            output = (bbox_regressions, P.Softmax(-1)(classifications),
                      ldm_regressions)

        return output
Example #9
0
    def softmax_make_tuple_pass():
        x = Any()
        softmax = P.Softmax()
        pattern = Call(softmax, [x])

        target = Call("make_tuple", [pattern, new_para])
        return pattern, target
 def __init__(self):
     super(Net, self).__init__()
     self.softmax = P.Softmax(axis=1)
     self.add = P.TensorAdd()
     self.cast = P.Cast()
     self.relu = P.ReLU()
     self.reduce_mean = P.ReduceMean()
Example #11
0
 def softmax_pass():
     x = Any()
     pattern = Call(P.Softmax(), [x])
     imm = Imm(0)
     target_0 = Call("make_tuple", [pattern])
     target = Call(Constants.kTupleGetItem, [target_0, imm])
     return pattern, target
Example #12
0
def 单步测试(单元_捆, 词_数表, 数_词表, network):
    枝数 = len(单元_捆)
    for i in range(枝数):
        if i == 0:
            测试_捆 = 单元_捆[i]["待测数组"]
        else:
            测试_捆 = np.vstack((测试_捆, 单元_捆[i]["待测数组"]))

    MAKS1 = 创建_遮罩(测试_捆, 12)
    MAKS1 = Tensor(MAKS1)
    累加 = Tensor(测试_捆, mindspore.int32)
    结果_A = network.前向(累加, MAKS1)
    softmax = P.Softmax(-1)
    成功数 = 0
    和报 = ''
    for i in range(枝数):

        结果 = 结果_A[i:i + 1, -1 - 单元_捆[i]["标差"], :]
        结果 = softmax(结果)
        结果 = 结果.asnumpy()

        结果 = np.argmax(结果, -1)
        返回, 简报 = 生成测试简报(数_词表, 结果, 单元_捆[i])
        和报 = 和报 + 简报 + "\n"
        if 返回 == True:
            成功数 = 成功数 + 1

        # torch.cat([a, b], dim=0)
    #累加 = 累加.cpu().numpy()

    return 成功数, 枝数, 和报
Example #13
0
 def __init__(self, hidden_size, output_size, max_length, dropout_p=0.1):
     super(AttnDecoderRNN, self).__init__()
     self.hidden_size = hidden_size
     self.output_size = output_size
     self.dropout_p = dropout_p
     self.max_length = max_length
     self.embedding = nn.Embedding(self.output_size, self.hidden_size)
     self.attn = nn.Dense(in_channels=self.hidden_size * 2,
                          out_channels=self.max_length).to_float(
                              mstype.float16)
     self.attn_combine = nn.Dense(in_channels=self.hidden_size * 2,
                                  out_channels=self.hidden_size).to_float(
                                      mstype.float16)
     self.dropout = nn.Dropout(keep_prob=1.0 - self.dropout_p)
     self.gru = GRU(hidden_size, hidden_size).to_float(mstype.float16)
     self.out = nn.Dense(in_channels=self.hidden_size,
                         out_channels=self.output_size).to_float(
                             mstype.float16)
     self.transpose = P.Transpose()
     self.concat = P.Concat(axis=2)
     self.concat1 = P.Concat(axis=1)
     self.softmax = P.Softmax(axis=1)
     self.relu = P.ReLU()
     self.log_softmax = P.LogSoftmax(axis=1)
     self.bmm = P.BatchMatMul()
     self.unsqueeze = P.ExpandDims()
     self.squeeze = P.Squeeze(1)
     self.squeeze1 = P.Squeeze(0)
     self.cast = P.Cast()
Example #14
0
 def bn_pass():
     """
     Sub a BN to Softmax.
     """
     pattern = Call(P.BatchNorm())
     target = Call(P.Softmax())
     return pattern, target
 def __init__(self, backbone, generate=False):
     super(EvalNet, self).__init__(auto_prefix=False)
     self.backbone = backbone
     self.argmax = P.ArgMaxWithValue()
     self.generate = generate
     self.topk = P.TopK(sorted=True).shard(((1, 1),))
     self.log_softmax = P.Softmax(axis=-1)
Example #16
0
 def softmax_pass():
     x = Any()
     pattern = Call(P.Softmax(), [x])
     imm = Imm(0)
     target_0 = Call("make_tuple", [pattern])
     target = Call("tuple_getitem", [target_0, imm])
     return pattern, target
Example #17
0
    def __init__(self,
                 config,
                 batch_size,
                 num_classes,
                 use_sigmoid_cls,
                 target_means=(.0, .0, .0, .0),
                 target_stds=(1.0, 1.0, 1.0, 1.0)
                 ):
        super(Proposal, self).__init__()
        cfg = config
        self.batch_size = batch_size
        self.num_classes = num_classes
        self.target_means = target_means
        self.target_stds = target_stds
        self.use_sigmoid_cls = config.use_sigmoid_cls

        if self.use_sigmoid_cls:
            self.cls_out_channels = 1
            self.activation = P.Sigmoid()
            self.reshape_shape = (-1, 1)
        else:
            self.cls_out_channels = num_classes
            self.activation = P.Softmax(axis=1)
            self.reshape_shape = (-1, 2)

        if self.cls_out_channels <= 0:
            raise ValueError('num_classes={} is too small'.format(num_classes))

        self.num_pre = cfg.rpn_proposal_nms_pre
        self.min_box_size = cfg.rpn_proposal_min_bbox_size
        self.nms_thr = cfg.rpn_proposal_nms_thr
        self.nms_post = cfg.rpn_proposal_nms_post
        self.nms_across_levels = cfg.rpn_proposal_nms_across_levels
        self.max_num = cfg.rpn_proposal_max_num

        # Op Define
        self.squeeze = P.Squeeze()
        self.reshape = P.Reshape()
        self.cast = P.Cast()

        self.feature_shapes = cfg.feature_shapes

        self.transpose_shape = (1, 2, 0)

        self.decode = BoundingBoxDecode()

        self.nms = P.NMSWithMask(self.nms_thr)
        self.concat_axis0 = P.Concat(axis=0)
        self.concat_axis1 = P.Concat(axis=1)
        self.split = P.Split(axis=1, output_num=5)
        self.min = P.Minimum()
        self.gatherND = P.GatherNd()
        self.slice = P.Slice()
        self.select = P.Select()
        self.greater = P.Greater()
        self.transpose = P.Transpose()
        self.tile = P.Tile()
        self.set_train_local(config, training=True)

        self.multi_10 = Tensor(10.0, mstype.float16)
Example #18
0
    def softmax_addn_pass():
        x = Any()
        pattern = Call(P.Softmax(), [x])

        weight_tensor = Tensor(np.zeros([42]), mindspore.float16)
        new_weight = NewTensor(weight_tensor)
        target = Call(P.AddN(), [x, new_weight])
        return pattern, target
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.matmul1 = P.MatMul().shard(strategy1)
     self.matmul2 = P.MatMul().shard(strategy2)
     self.gelu = P.Gelu().shard(strategy3)
     self.tanh = P.Tanh().shard(strategy3)
     self.softmax = P.Softmax().shard(strategy3)
     self.logsoftmax = P.LogSoftmax().shard(strategy3)
Example #20
0
 def __init__(self, strategy1, strategy2, strategy3, strategy4, strategy5, strategy6):
     super().__init__()
     self.matmul1 = P.MatMul().set_strategy(strategy1)
     self.matmul2 = P.MatMul().set_strategy(strategy2)
     self.gelu = P.Gelu().set_strategy(strategy3)
     self.tanh = P.Tanh().set_strategy(strategy4)
     self.softmax = P.Softmax(axis=(0, 1)).set_strategy(strategy5)
     self.logsoftmax = P.LogSoftmax().set_strategy(strategy6)
Example #21
0
 def __init__(self, net, need_slice=False):
     super(UnetEval, self).__init__()
     self.net = net
     self.need_slice = need_slice
     self.transpose = ops.Transpose()
     self.softmax = ops.Softmax(axis=-1)
     self.argmax = ops.Argmax(axis=-1)
     self.squeeze = ops.Squeeze(axis=0)
Example #22
0
 def bn_pass():
     """
     Sub a BN to Softmax.
     """
     bn = P.BatchNorm()
     pattern = CallWith(bn)
     softmax = P.Softmax()
     target = CallWith(softmax, should_replace=False)
     return pattern, target
Example #23
0
    def __init__(self,
                 probs=None,
                 seed=None,
                 dtype=mstype.int32,
                 name="Categorical"):
        param = dict(locals())
        param['param_dict'] = {'probs': probs}
        valid_dtype = mstype.uint_type + mstype.int_type + mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Categorical, self).__init__(seed, dtype, name, param)

        self._probs = self._add_parameter(probs, 'probs')
        if self.probs is not None:
            check_rank(self.probs)
            check_prob(self.probs)
            check_sum_equal_one(probs)

            # update is_scalar_batch and broadcast_shape
            # drop one dimension
            if self.probs.shape[:-1] == ():
                self._is_scalar_batch = True
            self._broadcast_shape = self._broadcast_shape[:-1]

        self.argmax = P.ArgMaxWithValue(axis=-1)
        self.broadcast = broadcast_to
        self.cast = P.Cast()
        self.clip_by_value = C.clip_by_value
        self.concat = P.Concat(-1)
        self.cumsum = P.CumSum()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expand_dim = P.ExpandDims()
        self.fill = P.Fill()
        self.gather = P.GatherNd()
        self.greater = P.Greater()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.log = log_generic
        self.log_softmax = P.LogSoftmax()
        self.logicor = P.LogicalOr()
        self.logicand = P.LogicalAnd()
        self.multinomial = P.Multinomial(seed=self.seed)
        self.reshape = P.Reshape()
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.select = P.Select()
        self.shape = P.Shape()
        self.softmax = P.Softmax()
        self.squeeze = P.Squeeze()
        self.squeeze_first_axis = P.Squeeze(0)
        self.squeeze_last_axis = P.Squeeze(-1)
        self.square = P.Square()
        self.transpose = P.Transpose()
        self.is_nan = P.IsNan()

        self.index_type = mstype.int32
        self.nan = np.nan
Example #24
0
 def softmax_relu_pass():
     x = Any()
     softmax_pattern = Prim(P.Softmax())
     pattern = Call(softmax_pattern, [x])
     sigmoid_pattern = Prim(P.Sigmoid())
     call_sigmoid = Call(sigmoid_pattern, [x])
     relu_pattern = Prim(P.ReLU())
     target = Call(relu_pattern, [call_sigmoid])
     return pattern, target
Example #25
0
 def __init__(self):
     super(Net, self).__init__()
     self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)),
                             name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype(np.float32)),
                           name="bias")
     self.matmul = P.MatMul()
     self.biasAdd = P.BiasAdd()
     self.softmax = P.Softmax()
Example #26
0
    def softmax_make_tuple_pass():
        x = AnyPattern()
        softmax = P.Softmax()
        pattern = CallWith(softmax, inputs=[x])

        target = CallWith("make_tuple",
                          inputs=[pattern, new_para],
                          should_replace=False)
        return pattern, target
Example #27
0
 def single_bn_pass():
     """
     Sub a BN which does NOT take MatMul as inputs to ReLU6.
     """
     matmul = Prim("MatMul")
     pattern_0 = NoneOf(matmul)
     softmax = P.Softmax()
     pattern = Call(softmax, [pattern_0])
     relu6 = P.ReLU6()
     target = Call(relu6, [pattern_0])
     return pattern, target
Example #28
0
    def softmax_neg_pass():
        x = Any()
        softmax_pattern = Prim(P.Softmax())
        call_softmax = Call(softmax_pattern, [x])
        relu_pattern = Prim(P.ReLU())
        call_relu = Call(relu_pattern, [x])

        pattern = OneOf([call_softmax, call_relu])
        neg_ops = Prim(P.Neg())
        target = Call(neg_ops, [pattern])
        return pattern, target
Example #29
0
    def softmax_addn_pass():
        x = Any()
        pattern = Call(P.Softmax(), [x])

        default_tensor0 = Tensor(np.ones((4, 4)), mindspore.float32)
        default_tensor1 = Tensor(np.ones((4, 4)), mindspore.float32)
        new_para_0 = NewParameter("Merlin", default_tensor0)
        new_para_1 = NewParameter("Arthur", default_tensor1)
        target_0 = Call(P.MatMul(), [new_para_0, new_para_1])
        target = Call("make_tuple", [target_0])
        return pattern, target
Example #30
0
    def _fspecial_gauss(self, filter_size, filter_sigma):
        """get gauss kernel"""
        filter_size, g = _gauss_kernel_helper(filter_size)

        square_sigma_scale = -0.5 / (filter_sigma * filter_sigma)
        g = g * square_sigma_scale
        g = F.reshape(g, (1, -1)) + F.reshape(g, (-1, 1))
        g = F.reshape(g, (1, -1))
        g = P.Softmax()(g)
        ret = F.reshape(g, (1, 1, filter_size, filter_size))
        return ret