Beispiel #1
0
    def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):
        block = LayerHelper('norm', **locals())
        out = block.create_variable_for_type_inference(
            dtype=block.input_dtype())
        abs_out = block.create_variable_for_type_inference(
            dtype=block.input_dtype())
        block.append_op(type='abs',
                        inputs={'X': input},
                        outputs={'Out': abs_out})
        pow_out = block.create_variable_for_type_inference(
            dtype=block.input_dtype())

        block.append_op(type='pow',
                        inputs={'X': abs_out},
                        outputs={'Out': pow_out},
                        attrs={'factor': porder})
        sum_out = block.create_variable_for_type_inference(
            dtype=block.input_dtype())
        block.append_op(type='reduce_sum',
                        inputs={'X': pow_out},
                        outputs={'Out': sum_out},
                        attrs={
                            'dim': axis,
                            'keep_dim': keepdim,
                            'reduce_all': True if axis is None else False
                        })
        porder
        block.append_op(type='pow',
                        inputs={'X': sum_out},
                        outputs={'Out': out},
                        attrs={'factor': float(1. / porder)})
        return out
Beispiel #2
0
def segment_pool(data, segment_ids, pool_type, name=None):
    """
    Segment Operator.
    """
    pool_type = pool_type.upper()
    if in_dygraph_mode():
        out, tmp = core.ops.segment_pool(data, segment_ids, 'pooltype',
                                         pool_type)
        return out

    check_variable_and_dtype(data, "X", ("float32", "float64"), "segment_pool")
    check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
                             "segment_pool")

    helper = LayerHelper("segment_pool", **locals())
    out = helper.create_variable_for_type_inference(dtype=data.dtype)
    pool_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
    helper.append_op(
        type="segment_pool",
        inputs={"X": data,
                "SegmentIds": segment_ids},
        outputs={"Out": out,
                 "SummedIds": pool_ids},
        attrs={"pooltype": pool_type})
    return out
Beispiel #3
0
    def inf_norm(input,
                 porder=None,
                 axis=axis,
                 keepdim=False,
                 asvector=False,
                 name=None):
        helper = LayerHelper('frobenius_norm', **locals())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
        helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
        reduce_out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())

        reduce_all = True if axis == None or axis == [] or asvector == True else False
        axis = axis if axis != None and axis != [] else [0]

        reduce_type = 'reduce_max' if porder == np.float(
            'inf') else 'reduce_min'
        helper.append_op(type=reduce_type,
                         inputs={'X': out},
                         outputs={'Out': reduce_out},
                         attrs={
                             'dim': axis,
                             'keep_dim': keepdim,
                             'reduce_all': reduce_all
                         })

        return reduce_out
Beispiel #4
0
def rank_attention(input,
                   rank_offset,
                   rank_param_shape,
                   rank_param_attr,
                   max_rank=3):
    """
    **Rank Attention layer**
    This Op can calculate rank attention between input and rank_param, and 
    rank_param gives the organization of data. Notice: It currently supports
    GPU device.
    This Op exists in contrib, which means that it is not shown to the public.
    Args:
        input: Tensor with data type float32, float64.
        rank_offset: Tensor with data type int32.
        rank_para_shape: The shape of rank_param.
        rank_param_attr: Attribute initializer of rank_param.
        max_rank: The max rank of input's ranks.
    Returns:
        Variable: A Tensor with the same data type as input's.
    Examples:
        .. code-block:: python
           import paddle.fluid as fluid
           import numpy as np
           
           input = fluid.data(name="input", shape=[None, 2], dtype="float32")
           rank_offset = fluid.data(name="rank_offset", shape=[None, 7], dtype="int32")
           out = fluid.contrib.layers.rank_attention(input=input,
                                                     rank_offset=rank_offset,
                                                     rank_param_shape=[18,3],
                                                     rank_param_attr=
                                                       fluid.ParamAttr(learning_rate=1.0,
                                                                     name="ubm_rank_param.w_0",
                                                                     initializer=
                                                                     fluid.initializer.Xavier(uniform=False)),
                                                      max_rank=3)
    """
    helper = LayerHelper('rank_attention', **locals())
    dtype = helper.input_dtype(input_param_name='input')
    input_shape = input.shape
    assert input_shape[1] * max_rank * max_rank == rank_param_shape[0]

    rank_param = helper.create_parameter(attr=rank_param_attr,
                                         shape=rank_param_shape,
                                         dtype=dtype)
    rank_param.stop_gradient = False

    output = helper.create_variable_for_type_inference(dtype)
    ins_rank = helper.create_variable_for_type_inference(dtype=dtype,
                                                         stop_gradient=True)

    helper.append_op(type="rank_attention",
                     inputs={
                         "X": input,
                         "RankOffset": rank_offset,
                         "RankParam": rank_param
                     },
                     outputs={"Out": output},
                     attrs={"MaxRank": max_rank})

    return output
Beispiel #5
0
def segment_max(data, segment_ids, name=None):
    r"""
    Segment max operator.

    This operator calculate the maximum elements of input `data` which with
    the same index in `segment_ids`.
    It computes a tensor such that $out_i = \\max_{j} data_{j}$
    where max is over j such that `segment_ids[j] == i`.

    Args:
        data (tensor): a tensor, available data type float32, float64, int32, int64.
        segment_ids (tensor): a 1-d tensor, which have the same size
                            with the first dimension of input data. 
                            available data type is int32, int64.
        name (str, optional): Name for the operation (optional, default is None). 
                            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
       output (Tensor): the reduced result.

    Examples:

        .. code-block:: python

            import paddle
            data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
            segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
            out = paddle.incubate.segment_max(data, segment_ids)
            #Outputs: [[3., 2., 3.], [4., 5., 6.]]

    """

    if in_dygraph_mode():
        out, tmp = _C_ops.final_state_segment_pool(data, segment_ids, "MAX")
        return out

    if _non_static_mode():
        out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MAX")
        return out

    check_variable_and_dtype(data, "X",
                             ("float32", "float64", "int32", "int64"),
                             "segment_pool")
    check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
                             "segment_pool")

    helper = LayerHelper("segment_max", **locals())
    out = helper.create_variable_for_type_inference(dtype=data.dtype)
    summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
    helper.append_op(type="segment_pool",
                     inputs={
                         "X": data,
                         "SegmentIds": segment_ids
                     },
                     outputs={
                         "Out": out,
                         "SummedIds": summed_ids
                     },
                     attrs={"pooltype": "MAX"})
    return out
Beispiel #6
0
def cornerpool_op(layer_type, input, name):
    helper = LayerHelper(layer_type, input=input, name=name)
    dtype = helper.input_dtype()
    output = helper.create_variable_for_type_inference(dtype)
    max_map = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type=layer_type,
                     inputs={"X": input},
                     outputs={
                         "Output": output,
                         "MaxMap": max_map
                     })
    return output
Beispiel #7
0
def segment_mean(data, segment_ids, name=None):
    """
    Segment mean Operator.

    Ihis operator calculate the mean value of input `data` which
    with the same index in `segment_ids`.
    It computes a tensor such that $out_i = \\frac{1}{n_i}  \\sum_{j} data[j]$
    where sum is over j such that 'segment_ids[j] == i' and $n_i$ is the number
    of all index 'segment_ids[j] == i'.

    Args:
        data (tensor): a tensor, available data type float32, float64.
        segment_ids (tensor): a 1-d tensor, which have the same size 
                            with the first dimension of input data. 
                            available data type is int32, int64.

    Returns:
       output (Tensor): the reduced result.

    Examples:

        .. code-block:: python

            import paddle
            import pgl
            data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
            segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
            out = pgl.math.segment_mean(data, segment_ids)
            #Outputs: [[2., 2., 2.], [4., 5., 6.]]

    """
    if in_dygraph_mode():
        out, tmp = core.ops.segment_pool(data, segment_ids, 'pooltype', "MEAN")
        return out

    check_variable_and_dtype(data, "X", ("float32", "float64"), "segment_pool")
    check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
                             "segment_pool")

    helper = LayerHelper("segment_mean", **locals())
    out = helper.create_variable_for_type_inference(dtype=data.dtype)
    summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
    helper.append_op(type="segment_pool",
                     inputs={
                         "X": data,
                         "SegmentIds": segment_ids
                     },
                     outputs={
                         "Out": out,
                         "SummedIds": summed_ids
                     },
                     attrs={"pooltype": "MEAN"})
    return out
    def forward(self,
                text,
                text_pair=None,
                max_seq_len=0,
                pad_to_max_seq_len=False):
        if in_dygraph_mode():
            if isinstance(text, list) or isinstance(text, tuple):
                text = to_tensor(list(text))
            if text_pair is not None:
                if isinstance(text_pair, list) or isinstance(text_pair, tuple):
                    text_pair = to_tensor(list(text_pair))
            input_ids, seg_ids = self.mod.faster_tokenizer(
                self.vocab, text, text_pair, "do_lower_case",
                self.do_lower_case, "max_seq_len", max_seq_len,
                "pad_to_max_seq_len", pad_to_max_seq_len,
                "is_split_into_words", self.is_split_into_words)

            return input_ids, seg_ids

        attrs = {
            "do_lower_case": self.do_lower_case,
            "max_seq_len": max_seq_len,
            "pad_to_max_seq_len": pad_to_max_seq_len,
            "is_split_into_words": self.is_split_into_words,
        }
        helper = LayerHelper("faster_tokenizer")
        input_ids = helper.create_variable_for_type_inference(dtype="int64")
        seg_ids = helper.create_variable_for_type_inference(dtype="int64")
        if text_pair is None:
            helper.append_op(type='faster_tokenizer',
                             inputs={
                                 'Vocab': self.vocab,
                                 'Text': text
                             },
                             outputs={
                                 'InputIds': input_ids,
                                 'SegmentIds': seg_ids
                             },
                             attrs=attrs)
        else:
            helper.append_op(type='faster_tokenizer',
                             inputs={
                                 'Vocab': self.vocab,
                                 'Text': text,
                                 'TextPair': text_pair
                             },
                             outputs={
                                 'InputIds': input_ids,
                                 'SegmentIds': seg_ids
                             },
                             attrs=attrs)
        return input_ids, seg_ids
Beispiel #9
0
def segment_sum(data, segment_ids, name=None):
    """
    Segment Sum Operator.

    This operator sums the elements of input `data` which with
    the same index in `segment_ids`.
    It computes a tensor such that $out_i = \\sum_{j} data_{j}$
    where sum is over j such that `segment_ids[j] == i`.

    Args:
        data (Tensor): A tensor, available data type float32, float64.
        segment_ids (Tensor): A 1-D tensor, which have the same size
                            with the first dimension of input data. 
                            Available data type is int32, int64.
    Returns:
       output (Tensor): the reduced result.

    Examples:

        .. code-block:: python

            import paddle
            import pgl
            data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
            segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
            out = pgl.math.segment_sum(data, segment_ids)
            #Outputs: [[4., 4., 4.], [4., 5., 6.]]

    """
    if in_dygraph_mode():
        out, tmp = core.ops.segment_pool(data, segment_ids, 'pooltype', "SUM")
        return out

    check_variable_and_dtype(data, "X", ("float32", "float64"), "segment_pool")
    check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
                             "segment_pool")

    helper = LayerHelper("segment_sum", **locals())
    out = helper.create_variable_for_type_inference(dtype=data.dtype)
    summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
    helper.append_op(type="segment_pool",
                     inputs={
                         "X": data,
                         "SegmentIds": segment_ids
                     },
                     outputs={
                         "Out": out,
                         "SummedIds": summed_ids
                     },
                     attrs={"pooltype": "SUM"})
    return out
Beispiel #10
0
def three_nn(input, known, eps=1e-10, name=None):
    """
    **Three Nearest Neighbor Layer**

    This operator samples the top-3 nearest neighbor of each point
    coordinates specified by Input(X) between known point coordinates
    specified by Input(Known) and calcualte the distance between these
    nearest neighbors.

    Args:
        input (Variable): The input tensor of three_nn operator. This
                          is a 3-D tensor with shape of [B, N, 3].
        known (Variable): The input tensor of known points of three_nn
                          operator. This is a 3-D tensor with shape of
                          [B, M, 3].
        name(str|None): A name for this layer(optional). If set None, the layer
                        will be named automatically.

    Returns:
        distance (Variable): The output distance tensor of three_nn operator.
                             This is a 3-D tensor with shape of [B, N, 3].
        idx (Variable): The output index tensor of three_nn operator.
                             This is a 3-D tensor with shape of [B, N, 3].

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[16, 3], dtype='float32')
            known = fluid.layers.data(name='known', shape=[32, 3], dtype='float32')
            distance, idx = fluid.layers.three_nn(input, known)
    """
    helper = LayerHelper('three_nn', **locals())
    dtype = helper.input_dtype()
    dist = helper.create_variable_for_type_inference(dtype)
    idx = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type="three_nn",
                     inputs={
                         "X": input,
                         "Known": known
                     },
                     outputs={
                         "Distance": dist,
                         "Idx": idx
                     },
                     attrs={'eps': eps})
    return (dist, idx)
Beispiel #11
0
def yolo_box_post(box0,
                  box1,
                  box2,
                  im_shape,
                  im_scale,
                  anchors0=[116, 90, 156, 198, 373, 326],
                  anchors1=[30, 61, 62, 45, 59, 119],
                  anchors2=[10, 13, 16, 30, 33, 23],
                  class_num=80,
                  conf_thresh=0.005,
                  downsample_ratio0=32,
                  downsample_ratio1=16,
                  downsample_ratio2=8,
                  clip_bbox=True,
                  scale_x_y=1.,
                  nms_threshold=0.45):
    helper = LayerHelper('yolo_box_post', **locals())
    output = helper.create_variable_for_type_inference(dtype=box0.dtype)
    nms_rois_num = helper.create_variable_for_type_inference(dtype='int32')
    inputs = {
        'Boxes0': box0,
        'Boxes1': box1,
        'Boxes2': box2,
        "ImageShape": im_shape,
        "ImageScale": im_scale
    }
    outputs = {'Out': output, 'NmsRoisNum': nms_rois_num}

    helper.append_op(
        type="yolo_box_post",
        inputs=inputs,
        attrs={
            'anchors0': anchors0,
            'anchors1': anchors1,
            'anchors2': anchors2,
            'class_num': class_num,
            'conf_thresh': conf_thresh,
            'downsample_ratio0': downsample_ratio0,
            'downsample_ratio1': downsample_ratio1,
            'downsample_ratio2': downsample_ratio2,
            'clip_bbox': clip_bbox,
            'scale_x_y': scale_x_y,
            'nms_threshold': nms_threshold
        },
        outputs=outputs)
    output.stop_gradient = True
    nms_rois_num.stop_gradient = True
    return output, nms_rois_num
Beispiel #12
0
    def frobenius_norm(input, dim=None, keepdim=False, name=None):
        """
        The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
        Args:
          input (Variable): Tensor, data type float32, float64.
          dim (list, optional): None for last two dimensions.
          keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
        """
        if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
            raise ValueError(
                "The dim of frobenius norm op should be None or two elements list!"
            )
        if in_dygraph_mode():
            if dim is None:
                return core.ops.frobenius_norm(input, 'keep_dim', keepdim,
                                               'reduce_all', True)
            return core.ops.frobenius_norm(input, 'dim', dim, 'keep_dim',
                                           keepdim, 'reduce_all', False)
        attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
        if dim is None:
            attrs['reduce_all'] = True
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'frobenius_norm')

        helper = LayerHelper('frobenius_norm', **locals())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())

        helper.append_op(type='frobenius_norm',
                         inputs={'X': input},
                         outputs={'Out': out},
                         attrs=attrs)
        return out
Beispiel #13
0
def check_finite_and_unscale(x, scale, name=None):
    """
    Check if input X contains all finite data, if yes, scale it by input Scale.

    $$Out = X / scale$$

    If any tensor in X contains Inf or Nan, the Out will generate a indicator.
    FoundInfinite will be 1 (True), and Out will not be scaled. In this case, the data of 
    Out should not be used, and its data may not be deterministic. 
    Otherwise, FoundInfinite will be 0 (False).
    Args:
        x(list|tuple): The input tensors of check_finite_and_unscale operator.
        scale: The scale of check_finite_and_unscale operator.
    """
    #check_type(x, 'x', (tuple, list), 'check_finite_and_unscale')
    #for e in x:
    #    check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'],
    #                             'check_finite_and_unscale')

    helper = LayerHelper("check_finite_and_unscale", **locals())
    found_inf = helper.create_variable_for_type_inference(dtype='bool')

    inputs = {'X': x, 'Scale': scale}
    outputs = {'Out': x, 'FoundInfinite': found_inf}
    helper.append_op(type='check_finite_and_unscale',
                     inputs=inputs,
                     outputs=outputs)

    return x, found_inf
Beispiel #14
0
def farthest_point_sampling(input, sampled_point_num):
    '''
    Sampling point based on its max eucliden distance with other points. 
    
    Args:
        input (Variable): input point cloud dataset with shape (B, N, 3)
            B is batch size, N is points's nums, 3 is (x,y,z) coordinate
        sampled_point_num (int): sampled points's nums

    Retrun:
        output (Variable): return sampled points with shape (B, M)
            B is batch size, M is points's nums

    Examples:
        .. code-block:: python
        x = fluid.layers.data(name='data', shape=(2,100,3), dtype='float32')
        sampled_points = fluid.layers.farthest_point_sampling(
            x, 50
        )
    '''

    helper = LayerHelper('farthest_point_sampling', **locals())
    dtype = input.dtype
    op_out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type='farthest_point_sampling',
                     inputs={'X': input},
                     outputs={'Output': op_out},
                     attrs={'sampled_point_num': sampled_point_num})
    return op_out
Beispiel #15
0
def partial_concat(input, start_index=0, length=-1):
    """
    **Partial Concat**
    This OP concatenates the inputs according to the start index and length. This
    OP exists in contrib, which means that it is not shown to the public.
    Only 2-D Tensor or LodTensor input is supported. Slice and concat can only be
    performed along the second dimension.

    .. code-block:: text

        Given:
            x = [[0, 1, 2],
                 [3, 4, 5]]
            y = [[6, 7 ,8],
                 [9, 10, 11]]
            output = partial_concat([x, y], start_index=0, length=2)

          we get:

            output = [[0, 1, 6, 7],
                      [3, 4, 9, 10]]

    Args:
        input(list): List of input Tensors with data type float32, float64, int32,
            int64.
        start_index(int32): The start index of each instance for partial concatenation.
            Default is 0.
        length(int32): The length of each instance for partial concatenation. Default is -1.
            Negative values for all elements after start_index.
    Returns:
        Variable: A Tensor with the same data type as input's.
    Examples:
        .. code-block:: python
            import paddle.fluid as fluid
            x = fluid.data(name="x", shape=[None,3], dtype="float32")
            y = fluid.data(name="y", shape=[None,3], dtype="float32")
            concat = fluid.contrib.layers.partial_concat(
                [x, y], start_index=0, length=2)
    """
    if not isinstance(input, list):
        warnings.warn(
            "The type of input in partial_concat should be list, but received %s."
            % (type(input)))
        input = [input]
    for id, x in enumerate(input):
        check_variable_and_dtype(
            x, 'input[' + str(id) + ']',
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'partial_concat')
    check_type(start_index, 'start_index', (int), 'partial_concat')
    check_type(length, 'length', (int), 'partial_concat')
    inputs = {'X': input}
    attrs = {'start_index': start_index, 'length': length}
    helper = LayerHelper('partial_concat', **locals())
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    helper.append_op(type='partial_concat',
                     inputs=inputs,
                     outputs={'Out': [out]},
                     attrs=attrs)
    return out
Beispiel #16
0
def lookup_table(input, embedding_table, dtype='float32'):
    """
    lookup table support for paddle.
    :param input:
    :param embedding_table:
    :param dtype:
    :return:
    """
    is_sparse = False
    is_distributed = False
    helper = LayerHelper('embedding', **locals())
    remote_prefetch = is_sparse and (not is_distributed)
    if remote_prefetch:
        assert is_sparse is True and is_distributed is False
    tmp = helper.create_variable_for_type_inference(dtype)
    padding_idx = -1
    helper.append_op(type='lookup_table',
                     inputs={
                         'Ids': input,
                         'W': embedding_table
                     },
                     outputs={'Out': tmp},
                     attrs={
                         'is_sparse': is_sparse,
                         'is_distributed': is_distributed,
                         'remote_prefetch': remote_prefetch,
                         'padding_idx': padding_idx
                     })
    return tmp
Beispiel #17
0
def correlation(input1,
                input2,
                pad_size,
                kernel_size,
                max_displacement,
                stride1,
                stride2,
                corr_type_multiply=1):
    helper = LayerHelper("correlation", **locals())
    output = helper.create_variable_for_type_inference(dtype=input1.dtype)
    helper.append_op(type="correlation",
                     inputs={
                         "Input1": input1,
                         "Input2": input2
                     },
                     attrs={
                         "pad_size": pad_size,
                         "kernel_size": kernel_size,
                         "max_displacement": max_displacement,
                         "stride1": stride1,
                         "stride2": stride2,
                         "corr_type_multiply": corr_type_multiply
                     },
                     outputs={"Output": output})
    return output
Beispiel #18
0
def scatter_nd_add_fix_bug(ref, index, updates, name=None):
    """fix bug of paddle.fluid.layers.scatter_nd_add

    Args:
        ref (TYPE): NULL
        index (TYPE): NULL
        updates (TYPE): NULL
        name (TYPE): Default is None

    Returns: TODO

    Raises: NULL
    """
    if ref.dtype != updates.dtype:
        raise ValueError("ref and updates must have same data type.")

    helper = LayerHelper('scatter_nd_add', **locals())
    dtype = helper.input_dtype(input_param_name='ref')
    if name is None:
        output = helper.create_variable_for_type_inference(dtype)
    else:
        output = helper.create_variable(
            name=name, dtype=dtype, persistable=False)
    helper.append_op(
        type="scatter_nd_add",
        inputs={"X": ref,
                "Index": index,
                "Updates": updates},
        outputs={"Out": output})
    return output
Beispiel #19
0
def relu2(x, name=None):
    # relu2的type和在OP中定义的type相同
    helper = LayerHelper("relu2", **locals())
    # 创建输出Variable
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type="relu2", inputs={"X0": [x]}, outputs={"Out": out})
    return out
Beispiel #20
0
def vmat(x, name=None):
    # vmat的type和在OP中定义的type相同
    helper = LayerHelper("vmat", **locals())
    # 创建输出Variable
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type="vmat", inputs={"X": x}, outputs={"Y": out})
    return out
Beispiel #21
0
def slice_select(x, axis, starts, ends, strides, out=None):
    if not isinstance(axis, (list, tuple)):
        raise TypeError(
            f'Argument type error. `axis` is supposed to be list or'
            f' tuple but found {type(axis)}.')
    if not isinstance(starts, (list, tuple)):
        raise TypeError(
            f'Argument type error. `starts` is supposed to be list or'
            f' tuple but found {type(starts)}.')
    if not isinstance(ends, (list, tuple)):
        raise TypeError(
            f'Argument type error. `ends` is supposed to be list or'
            f' tuple but found {type(ends)}.')
    assert len(axis) == len(starts) == len(ends) == len(strides), (
        f'len(axis), len(starts), len(ends) and len(strides) should be equal, '
        f'but len(axis)={len(axis)}, len(starts)={len(starts)}, '
        f'len(ends)={len(ends)} and len(strides)={len(strides)}')

    attrs = {'axis': axis, 'starts': starts, 'ends': ends, 'strides': strides}
    helper = LayerHelper('slice_select_p', **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type=helper.layer_type,
                     inputs={'X': x},
                     outputs={'Y': out},
                     attrs=attrs)
    return out
Beispiel #22
0
def fill_const(value, shape, dtype, out=None):
    attrs = {'value': value, 'shape': shape, 'dtype': dtype}
    helper = LayerHelper('fill_constant_p', **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type=helper.layer_type, outputs={'Y': out}, attrs=attrs)
    return out
Beispiel #23
0
        def op2(x):
            value = paddle.fluid.layers.fill_constant([1, 3, 2], "float32", 1)
            # test stop_gradient
            value.stop_gradient = False
            x.stop_gradient = False
            attrs = {
                'axes': [0],
                'starts': [6],
                'ends': [0],
                'steps': [-4],
                'decrease_axes': [],
                'none_axes': [],
                'dtype': paddle.float32
            }
            inputs = {'Input': x, 'ValueTensor': value}

            helper = LayerHelper("set_value")
            y = helper.create_variable_for_type_inference(dtype=x.dtype)

            helper.append_op(type="set_value",
                             inputs=inputs,
                             outputs={'Out': y},
                             attrs=attrs)

            return y, value
    def forward(self,
                text,
                text_pair=None,
                do_lower_case=True,
                max_seq_len=-1,
                is_split_into_words=False,
                pad_to_max_seq_len=False):
        if in_dygraph_mode():
            input_ids, seg_ids = _C_ops.faster_tokenizer(
                self.vocab, text, text_pair, "do_lower_case", do_lower_case,
                "max_seq_len", max_seq_len, "pad_to_max_seq_len",
                pad_to_max_seq_len, "is_split_into_words", is_split_into_words)
            return input_ids, seg_ids

        attrs = {
            "do_lower_case": do_lower_case,
            "max_seq_len": max_seq_len,
            "pad_to_max_seq_len": pad_to_max_seq_len,
            "is_split_into_words": is_split_into_words,
        }
        helper = LayerHelper("faster_tokenizer")
        input_ids = helper.create_variable_for_type_inference(dtype="int64")
        seg_ids = helper.create_variable_for_type_inference(dtype="int64")
        if text_pair is None:
            helper.append_op(type='faster_tokenizer',
                             inputs={
                                 'Vocab': self.vocab,
                                 'Text': text
                             },
                             outputs={
                                 'InputIds': input_ids,
                                 'SegmentIds': seg_ids
                             },
                             attrs=attrs)
        else:
            helper.append_op(type='faster_tokenizer',
                             inputs={
                                 'Vocab': self.vocab,
                                 'Text': text,
                                 'TextPair': text_pair
                             },
                             outputs={
                                 'InputIds': input_ids,
                                 'SegmentIds': seg_ids
                             },
                             attrs=attrs)
        return input_ids, seg_ids
Beispiel #25
0
def histogram(input, bins=100, min=0, max=0):
    """
    Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
    If min and max are both zero, the minimum and maximum values of the data are used.

    Args:
        input (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
            should be float32, float64, int32, int32.
        bins (int): number of histogram bins
        min (int): lower end of the range (inclusive)
        max (int): upper end of the range (inclusive)

    Returns:
        Variable: Tensor or LoDTensor calculated by histogram layer. The data type is int32.

    Code Example 1:
        .. code-block:: python
            import paddle
            import numpy as np
            startup_program = paddle.static.Program()
            train_program = paddle.static.Program()
            with paddle.static.program_guard(train_program, startup_program):
                inputs = paddle.data(name='input', dtype='int32', shape=[2,3])
                output = paddle.histogram(inputs, bins=5, min=1, max=5)
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                exe.run(startup_program)
                img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32)
                res = exe.run(train_program,
                              feed={'input': img},
                              fetch_list=[output])
                print(np.array(res[0])) # [0,3,0,2,1]

    Code Example 2:
        .. code-block:: python
            import paddle
            paddle.disable_static(paddle.CPUPlace())
            inputs = paddle.to_tensor([1, 2, 1])
            result = paddle.histogram(inputs, bins=4, min=0, max=3)
            print(result) # [0, 2, 1, 0]
            paddle.enable_static()
    """
    if in_dygraph_mode():
        return core.ops.histogram(input, "bins", bins, "min", min, "max", max)

    helper = LayerHelper('histogram', **locals())
    check_variable_and_dtype(input, 'X',
                             ['int32', 'int32', 'float32', 'float64'],
                             'histogram')
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
    helper.append_op(type='histogram',
                     inputs={'X': input},
                     outputs={'Out': out},
                     attrs={
                         'bins': bins,
                         'min': min,
                         'max': max
                     })
    return out
Beispiel #26
0
def rrpn_box_coder(prior_box, prior_box_var, target_box, name=None):
    """
    Args:
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape 
            [M, 5] holds M boxes and data type is float32 or float64. Each box
            is represented as [x, y, w, h, angle], [x, y] is the 
            center coordinate of the anchor box, [w, h] is the width and height
            of the anchor box, angle is rotated angle of prior_box.
        prior_box_var(List|Variable|None): "prior_box_var is a 2-D Tensor with
             shape [M, 5] holds M group of variance."
        target_box(Variable): This input can be a 2-D LoDTensor with shape 
            [M, 5]. Each box is represented as [x, y, w, h, angle]. The data
            type is float32 or float64.
        name(str): Name of this layer. None by default. 
    Returns:
        Variable:
        output_box(Variable): The output tensor of rrpn_box_coder_op with shape [N, 5] representing the 
        result of N target boxes encoded with N Prior boxes and variances. 
        N represents the number of box and 5 represents [x, y, w, h ,angle].
    Examples:
 
        .. code-block:: python
 
            import paddle.fluid as fluid
            prior_box_decode = fluid.data(name='prior_box_decode',
                                          shape=[512, 5],
                                          dtype='float32')
            target_box_decode = fluid.data(name='target_box_decode',
                                           shape=[512, 5],
                                           dtype='float32')
            output_decode = rrpn_box_coder(prior_box=prior_box_decode,
                                           prior_box_var=[10, 10, 5, 5, 1],
                                           target_box=target_box_decode)
    """

    helper = LayerHelper("rrpn_box_coder", **locals())

    if name is None:
        output_box = helper.create_variable_for_type_inference(
            dtype=prior_box.dtype)
    else:
        output_box = helper.create_variable(
            name=name, dtype=prior_box.dtype, persistable=False)

    inputs = {"PriorBox": prior_box, "TargetBox": target_box}
    attrs = {}
    if isinstance(prior_box_var, Variable):
        inputs['PriorBoxVar'] = prior_box_var
    elif isinstance(prior_box_var, list):
        attrs['variance'] = prior_box_var
    else:
        raise TypeError(
            "Input variance of rrpn_box_coder must be Variable or list")
    helper.append_op(
        type="rrpn_box_coder",
        inputs=inputs,
        attrs=attrs,
        outputs={"OutputBox": output_box})
    return output_box
Beispiel #27
0
def rotated_roi_align(input,
                      rois,
                      pooled_height=1,
                      pooled_width=1,
                      spatial_scale=1.0,
                      name=None):
    """
    **RotatedRoIAlign Operator**

    Rotated Region of interest align (also known as Rotated RoI align) is to perform
    bilinear interpolation on inputs of nonuniform sizes to obtain 
    fixed-size feature maps (e.g. 7*7)
    
    Dividing each region proposal into equal-sized sections with
    the pooled_width and pooled_height. Location remains the origin
    result.
    
    Each ROI bin are transformed to become horizontal by perspective transformation and
    values in each ROI bin are computed directly through bilinear interpolation. The output is
    the mean of all values.
    Thus avoid the misaligned problem.  
    """
    helper = LayerHelper('rrpn_rotated_roi_align', **locals())
    dtype = helper.input_dtype()
    align_out = helper.create_variable_for_type_inference(dtype)
    cx = helper.create_variable_for_type_inference('float32')
    cy = helper.create_variable_for_type_inference('float32')
    helper.append_op(type="rrpn_rotated_roi_align",
                     inputs={
                         "X": input,
                         "ROIs": rois
                     },
                     outputs={
                         "Out": align_out,
                         "ConIdX": cx,
                         "ConIdY": cy
                     },
                     attrs={
                         "pooled_height": pooled_height,
                         "pooled_width": pooled_width,
                         "spatial_scale": spatial_scale,
                     })
    return align_out
Beispiel #28
0
def l2_norm(x, axis, epsilon=1e-12, name=None):
    if len(x.shape) == 1:
        axis = 0
    check_variable_and_dtype(x, "X", ("float32", "float64"), "norm")

    helper = LayerHelper("l2_normalize", **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    norm = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type="norm",
                     inputs={"X": x},
                     outputs={
                         "Out": out,
                         "Norm": norm
                     },
                     attrs={
                         "axis": 1 if axis is None else axis,
                         "epsilon": epsilon,
                     })
    return F.squeeze(norm, axes=[axis])
Beispiel #29
0
def iou_similarity(x, y, box_normalized=True, name=None):
    """
    Computes intersection-over-union (IOU) between two box lists.
    Box list 'X' should be a LoDTensor and 'Y' is a common Tensor,
    boxes in 'Y' are shared by all instance of the batched inputs of X.
    Given two boxes A and B, the calculation of IOU is as follows:
    $$
    IOU(A, B) =
    \\frac{area(A\\cap B)}{area(A)+area(B)-area(A\\cap B)}
    $$
    Args:
        x (Tensor): Box list X is a 2-D Tensor with shape [N, 4] holds N
             boxes, each box is represented as [xmin, ymin, xmax, ymax],
             the shape of X is [N, 4]. [xmin, ymin] is the left top
             coordinate of the box if the input is image feature map, they
             are close to the origin of the coordinate system.
             [xmax, ymax] is the right bottom coordinate of the box.
             The data type is float32 or float64.
        y (Tensor): Box list Y holds M boxes, each box is represented as
             [xmin, ymin, xmax, ymax], the shape of X is [N, 4].
             [xmin, ymin] is the left top coordinate of the box if the
             input is image feature map, and [xmax, ymax] is the right
             bottom coordinate of the box. The data type is float32 or float64.
        box_normalized(bool): Whether treat the priorbox as a normalized box.
            Set true by default.
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
    Returns:
        Tensor: The output of iou_similarity op, a tensor with shape [N, M]
              representing pairwise iou scores. The data type is same with x.
    Examples:
        .. code-block:: python
            import paddle
            from ppdet.modeling import ops
            paddle.enable_static()
            x = paddle.static.data(name='x', shape=[None, 4], dtype='float32')
            y = paddle.static.data(name='y', shape=[None, 4], dtype='float32')
            iou = ops.iou_similarity(x=x, y=y)
    """

    if in_dygraph_mode():
        out = core.ops.iou_similarity(x, y, 'box_normalized', box_normalized)
        return out
    else:
        helper = LayerHelper("iou_similarity", **locals())
        out = helper.create_variable_for_type_inference(dtype=x.dtype)

        helper.append_op(
            type="iou_similarity",
            inputs={"X": x,
                    "Y": y},
            attrs={"box_normalized": box_normalized},
            outputs={"Out": out})
        return out
Beispiel #30
0
def bmm(x, y, name=None):
    """
	:alias_main: paddle.bmm
	:alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm

    Applies batched matrix multiplication to two tensors.

    Both of the two input tensors must be three-dementional and share the same batch size.

    if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.

    Args:
        x (Variable): The input variable which is a Tensor or LoDTensor.
        y (Variable): The input variable which is a Tensor or LoDTensor.
        name(str|None): A name for this layer(optional). If set None, the layer
            will be named automatically.

    Returns:
        Variable: The product Tensor (or LoDTensor) variable.

    Examples:
        import paddle

        paddle.disable_static()

        # In imperative mode:
        # size x: (2, 2, 3) and y: (2, 3, 2)
        x = paddle.to_tensor([[[1.0, 1.0, 1.0],
                               [2.0, 2.0, 2.0]],
                              [[3.0, 3.0, 3.0],
                               [4.0, 4.0, 4.0]]])
        y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],
                              [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
        out = paddle.bmm(x, y)
        #output size: (2, 2, 2)
        #output value:
        #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
        out_np = out.numpy()
    """
    x_shape = x.shape
    y_shape = y.shape
    if not len(x_shape) == len(y_shape) == 3:
        raise ValueError(
            "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}"
            .format(x_shape, y_shape))
    if x_shape[2] != y_shape[1]:
        raise ValueError(
            "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}"
            .format(x_shape, y_shape))
    helper = LayerHelper('bmm', **locals())
    if in_dygraph_mode():
        return core.ops.bmm(x, y)
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
    return out