Exemplo n.º 1
0
 def __init__(self):
     super(FlipLROff, self).__init__()
     self.gather_flip_feat = GatherFlipFeature()
     self.flip_index = Tensor(
         np.array(
             [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15],
             np.int32))
     self.half = ops.Split(axis=0, output_num=2)
     self.split = ops.Split(axis=1, output_num=2)
     self.flip = ops.ReverseV2(axis=[3])
     self.concat = ops.Concat(axis=1)
Exemplo n.º 2
0
    def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, has_bias=False, modulation=True):
        super(DeformConv2d, self).__init__()
        self.kernel_size = kernel_size
        self.padding = padding
        self.stride = stride
        self.zero_padding = nn.Pad(((0, 0), (0, 0), (padding, padding), (padding, padding)))
        self.conv = nn.Conv2d(inc, outc, kernel_size=kernel_size, pad_mode='valid', padding=0,
                              stride=kernel_size, has_bias=has_bias)

        self.p_conv = nn.Conv2d(inc, 2*kernel_size*kernel_size, kernel_size=self.kernel_size,
                                pad_mode='pad', padding=self.padding, stride=self.stride)

        self.modulation = modulation
        if modulation:
            self.m_conv = nn.Conv2d(inc, kernel_size*kernel_size, kernel_size=self.kernel_size,
                                    pad_mode='valid', padding=0, stride=self.stride)
        if kernel_size % 2 == 0:
            raise ValueError("Only odd number is supported, but current kernel sizeis {}".format(kernel_size))
        self.N = kernel_size * kernel_size
        self.begin = kernel_size // 2
        self.sigmoid = ops.Sigmoid()
        self.dtype = ops.DType()
        self.perm_list = (0, 2, 3, 1)
        self.transpose = ops.Transpose()
        self.floor = ops.Floor()
        self.half = ops.Split(axis=-1, output_num=2)
        self.clip_value = ClipByValue()
        self.expand_dims = ops.ExpandDims()
        self.shape = ops.Shape()
        self.cast = ops.Cast()
        self._get_offset = GetOffsetPosition(self.begin, self.stride)
        self._get_surround = GetSurroundFeature()
        self._generate_fm = RegenerateFeatureMap(self.kernel_size)
Exemplo n.º 3
0
 def __init__(self, ks):
     super(RegenerateFeatureMap, self).__init__()
     self.ks = ks
     self.shape = ops.Shape()
     self.reshape = ops.Reshape()
     self.split = ops.Split(axis=-1, output_num=ks)
     self.concat = ops.Concat(axis=2)
Exemplo n.º 4
0
def gru_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
    if b_ih is None:
        gi = P.MatMul(False, True)(input, w_ih)
        gh = P.MatMul(False, True)(hidden, w_hh)
    else:
        gi = P.MatMul(False, True)(input, w_ih) + b_ih
        gh = P.MatMul(False, True)(hidden, w_hh) + b_hh
    i_r, i_i, i_n = P.Split(1, 3)(gi)
    h_r, h_i, h_n = P.Split(1, 3)(gh)

    resetgate = P.Sigmoid()(i_r + h_r)
    inputgate = P.Sigmoid()(i_i + h_i)
    newgate = P.Tanh()(i_n + resetgate * h_n)
    hy = newgate + inputgate * (hidden - newgate)

    return hy
Exemplo n.º 5
0
class Scalar_mix(nn.Cell):
    """
    Computes a paramterised scalar mixture of N tensor, ```mixture = gamma * sum(s_k * tensor_k)```
    where ``s = softmax(w)``, with ``w`` and ``gamma`` scalar parameters.
    """

    def __init__(self, mixture_size: int, do_layer_norm: bool = False) -> None:
        super(Scalar_mix, self).__init__()
        self.mixture_size = mixture_size
        self.do_layer_norm = do_layer_norm
        self.scalar_parameters = ParameterTuple([Parameter(Tensor(np.array([0.0]), mindspore.float32)) \
                                                 for _ in range(mixture_size)])
        self.gamma = Parameter(Tensor(np.array([0.0]), mindspore.float32))
        self.sum = P.ReduceSum()
        self.sqrt = P.Sqrt()
        self.cat = P.Concat()
        self.unsqueeze = P.ExpandDims(0)

    def construct(self, tensors, mask):
        """
        Compute a weighted average of the ``tensors``
        Args:
        tensors: The input tensors can be any shape
        with at least two dimensions, but must all be the same shape.
        mask: When ``do_layer_norm=True``, the ``mask`` is required input.
        for example with ``tensors`` of shape``(batch_size, timesteps, dim)``
        and ``mask`` of shape ``(batch_size, timesteps)``.dtype=mindspore.float32
        """
        if len(tensors) != self.mixture_size:
            raise ValueError("{} tensors were passed, but the module was initialized to "
                             "mix {} tensors.".format(len(tensors), self.mixture_size))

        def _do_layer_norm(tensor, broadcast_mask, num_elments_not_masked):
            tensor_masked = tensor * broadcast_mask
            mean = self.sum(tensor_masked) / num_elments_not_masked
            variance = self.sum(((tensor_masked - mean) * broadcast_mask) ** 2) /
                                num_elments_not_masked
            return (tensor - mean) / self.sqrt(variance + 1E-12)

        normed_weights = P.Softmax(dim=0)(self.cat([parameter for parameter \
                                                    in self.scalar_parameters]))
        normed_weights = P.Split(output_num=normed_weights.shape[0])(normed_weights)  # 待验证 torch.split(split=1)

        if not self.do_layer_norm:
            pieces = []
            for weight, tensor in zip(normed_weights, tensors):
                pieces.append(weight * tensor)
            return self.gamma * sum(pieces)
        else:
            # mask_float = mask.float()
            broadcast_mask = self.unsqueeze(mask)
            input_dim = tensors[0].shape[-1]
            num_elments_not_masked = sum(mask) * input_dim

            pieces = []
            for weight, tensor in zip(normed_weights, tensors):
                pieces.append(weight * _do_layer_norm(tensor,
                                                      broadcast_mask, num_elments_not_masked))
            return self.gamma * sum(pieces)
Exemplo n.º 6
0
 def __init__(self):
     super(GetSurroundFeature, self).__init__()
     self.shape = ops.Shape()
     self.concat = ops.Concat(axis=1)
     self.reshape = ops.Reshape()
     self.half = ops.Split(axis=-1, output_num=2)
     self.tile = ops.Tile()
     self.gather_nd = ops.GatherNd()
     self.transpose = ops.Transpose()
     self.perm_list = (0, 2, 3, 1)
     self.order_list = (0, 3, 1, 2)
     self.expand_dims = ops.ExpandDims()
Exemplo n.º 7
0
 def __init__(self, net_config, K=100, enable_nms_fp16=True):
     super(MultiPoseDecode, self).__init__()
     self.K = K
     self.nms = NMS(enable_nms_fp16=enable_nms_fp16)
     self.shape = ops.Shape()
     self.gather_topk = GatherTopK()
     self.gather_topk_channel = GatherTopKChannel()
     self.gather_by_ind = GatherFeatureByInd()
     self.half = ops.Split(axis=-1, output_num=2)
     self.half_first = ops.Split(axis=0, output_num=2)
     self.split = ops.Split(axis=-1, output_num=4)
     self.flip_lr = FlipLR()
     self.flip_lr_off = FlipLROff()
     self.flip_tensor = FlipTensor()
     self.concat = ops.Concat(axis=1)
     self.concat_a2 = ops.Concat(axis=2)
     self.concat_a3 = ops.Concat(axis=3)
     self.trans_gather_feature = TransposeGatherFeature()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.add = ops.TensorAdd()
     self.dtype = ops.DType()
     self.cast = ops.Cast()
     self.thresh = 0.1
     self.transpose = ops.Transpose()
     self.perm_list = (0, 2, 1, 3)
     self.tile = ops.Tile()
     self.greater = ops.Greater()
     self.square = ops.Square()
     self.sqrt = ops.Sqrt()
     self.reduce_sum = ops.ReduceSum()
     self.min = ops.ArgMinWithValue(axis=3)
     self.max = ops.Maximum()
     self.hm_hp = net_config.hm_hp
     self.dense_hp = net_config.dense_hp
     self.reg_offset = net_config.reg_offset
     self.reg_hp_offset = net_config.reg_hp_offset
     self.hm_hp_ind = 3 if self.hm_hp else 2
     self.reg_ind = self.hm_hp_ind + 1 if self.reg_offset else self.hm_hp_ind
     self.reg_hp_ind = self.reg_ind + 1 if self.reg_hp_offset else self.reg_ind
Exemplo n.º 8
0
 def __init__(self, net_config, K=100, enable_nms_fp16=True):
     super(DetectionDecode, self).__init__()
     self.K = K
     self.nms = NMS(enable_nms_fp16=enable_nms_fp16)
     self.shape = ops.Shape()
     self.gather_topk = GatherTopK()
     self.half = ops.Split(axis=-1, output_num=2)
     self.add = ops.TensorAdd()
     self.concat_a2 = ops.Concat(axis=2)
     self.trans_gather_feature = TransposeGatherFeature()
     self.expand_dims = ops.ExpandDims()
     self.reshape = ops.Reshape()
     self.reg_offset = net_config.reg_offset
     self.Sigmoid = nn.Sigmoid()
Exemplo n.º 9
0
def lstm_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
    hx, cx = hidden
    if b_ih is None:
        gates = P.MatMul(False, True)(input, w_ih) + P.MatMul(False, True)(hx, w_hh)
    else:
        gates = P.MatMul(False, True)(input, w_ih) + P.MatMul(False, True)(hx, w_hh) + b_ih + b_hh
    ingate, forgetgate, cellgate, outgate = P.Split(1, 4)(gates)
    
    ingate = P.Sigmoid()(ingate)
    forgetgate = P.Sigmoid()(forgetgate)
    cellgate = P.Tanh()(cellgate)
    outgate = P.Sigmoid()(outgate)
    
    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * P.Tanh()(cy)
    
    return hy, cy
Exemplo n.º 10
0
    def construct(self, inputs, inputs_backward, next_ids_forward,
                  next_ids_backward):
        """
            args:
                inputs: (batch_size, sequence_length, max_chars)
                next_ids_forward: (batch_size, sequence_length)
                next_ids_backward: (batch_size, sequence_length)
        """
        # (batch_size, sequence_length, embedding_dim)
        token_embedding = self.char_embedding(inputs)
        token_embedding_backward = self.char_embedding(inputs_backward)
        # (num_layers, batch_size, sequence_length, embedding_dim)
        encoder_output, _ = self.bilstm(token_embedding,
                                        token_embedding_backward)
        # (batch_size, sequence_length, embedding_dim * num_directions)
        encoder_output = encoder_output[1]
        # (batch_size, sequence_length, embedding_dim)
        forward, backward = P.Split(2, 2)(encoder_output)

        loss = self.loss((forward, backward),
                         (next_ids_forward, next_ids_backward))
        return loss
Exemplo n.º 11
0
 def __init__(self):
     super(FlipTensor, self).__init__()
     self.half = ops.Split(axis=0, output_num=2)
     self.flip = ops.ReverseV2(axis=[3])
     self.gather_nd = ops.GatherNd()
Exemplo n.º 12
0
def glu(input, dim=- 1):
    a, b = ops.Split(dim, 2)(input)
    return a * ops.Sigmoid()(b)
Exemplo n.º 13
0
 def __init__(self, dim: int = -1):
     super().__init__()
     self.split = ops.Split(dim, 2)
     self.sigmoid = ops.Sigmoid()