Exemplo n.º 1
0
def pact(x):
    """
    Process a variable using the pact method you define
    Args:
        x(Tensor): Paddle Tensor, need to be preprocess before quantization
    Returns:
        The processed Tensor x.
    """
    helper = LayerHelper("pact", **locals())
    dtype = 'float32'
    init_thres = 20
    u_param_attr = fluid.ParamAttr(
        name=x.name + '_pact',
        initializer=fluid.initializer.ConstantInitializer(value=init_thres),
        regularizer=fluid.regularizer.L2Decay(0.0001),
        learning_rate=1)
    u_param = helper.create_parameter(attr=u_param_attr,
                                      shape=[1],
                                      dtype=dtype)
    x = fluid.layers.elementwise_sub(
        x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param)))
    x = fluid.layers.elementwise_add(
        x, fluid.layers.relu(fluid.layers.elementwise_sub(-u_param, x)))
    return x
Exemplo n.º 2
0
def check_finite_and_unscale(x, scale, name=None, float_status=None):
    """
    Check if input X contains all finite data, if yes, scale it by input Scale.

    $$Out = X / scale$$

    If any tensor in X contains Inf or Nan, the Out will generate a indicator.
    FoundInfinite will be 1 (True), and Out will not be scaled. In this case, the data of 
    Out should not be used, and its data may not be deterministic. 
    Otherwise, FoundInfinite will be 0 (False).

    Args:
        x(list|tuple): The input tensors of check_finite_and_unscale operator.
        scale: The scale of check_finite_and_unscale operator.
        float_status(Tensor): (Only used on NPU) The float status to check overflow.
    """
    check_type(x, 'x', (tuple, list), 'check_finite_and_unscale')
    for e in x:
        check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'],
                                 'check_finite_and_unscale')

    helper = LayerHelper("check_finite_and_unscale", **locals())
    found_inf = helper.create_variable_for_type_inference(dtype='bool')

    inputs = {'X': x, 'Scale': scale}
    if core.is_compiled_with_npu():
        check_variable_and_dtype(float_status, "float_status",
                                 ['float16', 'float32'],
                                 'check_finite_and_unscale')
        inputs['FloatStatus'] = float_status
    outputs = {'Out': x, 'FoundInfinite': found_inf}
    helper.append_op(type='check_finite_and_unscale',
                     inputs=inputs,
                     outputs=outputs)

    return x, found_inf
Exemplo n.º 3
0
Arquivo: nn.py Projeto: iducn/Paddle
def partial_sum(input, start_index=0, length=-1):
    """
    **PartialSum**
    This Op can sum the vars by specifying the initial position(start_index) and length(length).
    This Op exists in contrib, which means that it is not shown to the public.
    Only 2-D Tensor or LodTensor input is supported. Slice and concat can only be
    performed along the second dimension.
    .. code-block:: text

        Given:
            x = [[0, 1, 2],
                 [3, 4, 5]]
            y = [[6, 7 ,8],
                 [9, 10, 11]]
            output = partial_sum([x, y], start_index=0, length=2)
          we get:

            output = [[6, 8],
                      [12, 14]]
    Args:
        input(list): List of input Tensors with data type float32, float64, int32,
            int64.
    Returns:
        Variable: A Tensor with the same data type as input's.
    Examples:
        .. code-block:: python
        import paddle.fluid.layers as layers
        import paddle.fluid as fluid
        import numpy as np
        x = fluid.data(name="x", shape=[None, 3], dtype="float32")
        y = fluid.data(name="y", shape=[None, 3], dtype="float32")
        sum = layers.partial_sum([x,y], start_index=0, length=2)
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        xx = np.array([1,2,3,4,5,6]).reshape((2,3)).astype("float32")
        yy = np.array([6,5,4,4,5,6]).reshape((2,3)).astype("float32")
        out = exe.run(feed={"x":xx, "y":yy}, fetch_list=[sum])
    """
    for id, x in enumerate(input):
        check_variable_and_dtype(x, 'input[' + str(id) + ']',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'partial_sum')

    inputs = {'X': input}
    attrs = {}
    attrs['start_index'] = start_index
    attrs['length'] = length
    helper = LayerHelper('partial_sum', **locals())
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    helper.append_op(type='partial_sum',
                     inputs=inputs,
                     outputs={'Out': [out]},
                     attrs=attrs)
    return out
Exemplo n.º 4
0
def segment_mean(data, segment_ids, name=None):
    r"""
    Segment mean Operator.

    Ihis operator calculate the mean value of input `data` which
    with the same index in `segment_ids`.
    It computes a tensor such that $out_i = \\frac{1}{n_i}  \\sum_{j} data[j]$
    where sum is over j such that 'segment_ids[j] == i' and $n_i$ is the number
    of all index 'segment_ids[j] == i'.

    Args:
        data (tensor): a tensor, available data type float32, float64.
        segment_ids (tensor): a 1-d tensor, which have the same size 
                            with the first dimension of input data. 
                            available data type is int32, int64.
        name (str, optional): Name for the operation (optional, default is None). 
                            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
       output (Tensor): the reduced result.

    Examples:

        .. code-block:: python

            import paddle
            data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
            segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
            out = paddle.incubate.segment_mean(data, segment_ids)
            #Outputs: [[2., 2., 2.], [4., 5., 6.]]

    """
    if in_dygraph_mode():
        out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MEAN")
        return out

    check_variable_and_dtype(data, "X", ("float32", "float64"), "segment_pool")
    check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
                             "segment_pool")

    helper = LayerHelper("segment_mean", **locals())
    out = helper.create_variable_for_type_inference(dtype=data.dtype)
    summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
    helper.append_op(type="segment_pool",
                     inputs={
                         "X": data,
                         "SegmentIds": segment_ids
                     },
                     outputs={
                         "Out": out,
                         "SummedIds": summed_ids
                     },
                     attrs={"pooltype": "MEAN"})
    return out
Exemplo n.º 5
0
    def forward(self,
                text,
                text_pair=None,
                do_lower_case=True,
                max_seq_len=-1,
                is_split_into_words=False,
                pad_to_max_seq_len=False):
        if _non_static_mode():
            input_ids, seg_ids = _C_ops.faster_tokenizer(
                self.vocab, text, text_pair, "do_lower_case", do_lower_case,
                "max_seq_len", max_seq_len, "pad_to_max_seq_len",
                pad_to_max_seq_len, "is_split_into_words", is_split_into_words)
            return input_ids, seg_ids

        attrs = {
            "do_lower_case": do_lower_case,
            "max_seq_len": max_seq_len,
            "pad_to_max_seq_len": pad_to_max_seq_len,
            "is_split_into_words": is_split_into_words,
        }
        helper = LayerHelper("faster_tokenizer")
        input_ids = helper.create_variable_for_type_inference(dtype="int64")
        seg_ids = helper.create_variable_for_type_inference(dtype="int64")
        if text_pair is None:
            helper.append_op(
                type='faster_tokenizer',
                inputs={'Vocab': self.vocab,
                        'Text': text},
                outputs={'InputIds': input_ids,
                         'SegmentIds': seg_ids},
                attrs=attrs)
        else:
            helper.append_op(
                type='faster_tokenizer',
                inputs={
                    'Vocab': self.vocab,
                    'Text': text,
                    'TextPair': text_pair
                },
                outputs={'InputIds': input_ids,
                         'SegmentIds': seg_ids},
                attrs=attrs)
        return input_ids, seg_ids
Exemplo n.º 6
0
def three_nn(input, known, eps=1e-10, name=None):
    """
    **Three Nearest Neighbor Layer**

    This operator samples the top-3 nearest neighbor of each point
    coordinates specified by Input(X) between known point coordinates
    specified by Input(Known) and calcualte the distance between these
    nearest neighbors.

    Args:
        input (Variable): The input tensor of three_nn operator. This
                          is a 3-D tensor with shape of [B, N, 3].
        known (Variable): The input tensor of known points of three_nn
                          operator. This is a 3-D tensor with shape of
                          [B, M, 3].
        name(str|None): A name for this layer(optional). If set None, the layer
                        will be named automatically.

    Returns:
        distance (Variable): The output distance tensor of three_nn operator.
                             This is a 3-D tensor with shape of [B, N, 3].
        idx (Variable): The output index tensor of three_nn operator.
                             This is a 3-D tensor with shape of [B, N, 3].

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[16, 3], dtype='float32')
            known = fluid.layers.data(name='known', shape=[32, 3], dtype='float32')
            distance, idx = fluid.layers.three_nn(input, known)
    """
    helper = LayerHelper('three_nn', **locals())
    dtype = helper.input_dtype()
    dist = helper.create_variable_for_type_inference(dtype)
    idx = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type="three_nn",
                     inputs={
                         "X": input,
                         "Known": known
                     },
                     outputs={
                         "Distance": dist,
                         "Idx": idx
                     },
                     attrs={'eps': eps})
    return (dist, idx)
Exemplo n.º 7
0
def segment_max(data, segment_ids, name=None):
    """
    Segment max operator.

    This operator calculate the maximum elements of input `data` which with
    the same index in `segment_ids`.
    It computes a tensor such that $out_i = \\min_{j} data_{j}$
    where max is over j such that `segment_ids[j] == i`.

    Args:
        data (tensor): a tensor, available data type float32, float64.
        segment_ids (tensor): a 1-d tensor, which have the same size
                            with the first dimension of input data. 
                            available data type is int32, int64.

    Returns:
       output (Tensor): the reduced result.

    Examples:

        .. code-block:: python

            import paddle
            import pgl
            data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
            segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
            out = pgl.math.segment_max(data, segment_ids)
            #Outputs: [[3., 2., 3.], [4., 5., 6.]]

    """
    if in_dygraph_mode():
        out, tmp = core.ops.segment_pool(data, segment_ids, 'pooltype', "MAX")
        return out

    check_variable_and_dtype(data, "X", ("float32", "float64"), "segment_pool")
    check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
                             "segment_pool")

    helper = LayerHelper("segment_max", **locals())
    out = helper.create_variable_for_type_inference(dtype=data.dtype)
    summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
    helper.append_op(type="segment_pool",
                     inputs={
                         "X": data,
                         "SegmentIds": segment_ids
                     },
                     outputs={
                         "Out": out,
                         "SummedIds": summed_ids
                     },
                     attrs={"pooltype": "MAX"})
    return out
Exemplo n.º 8
0
def _insert_slice_op(block, idx, tensor, starts, ends, axes, new_var_name):
    """Insert slice op into block at the given block."""
    inputs = {'Input': tensor}
    infer_flags = list(1 for i in range(len(axes)))
    attrs = {
        "axes": axes,
        "starts": starts,
        "ends": ends,
        "infer_flags": infer_flags
    }
    helper = LayerHelper('slice', **locals())
    out = block.create_var(name=new_var_name,
                           dtype=tensor.dtype,
                           type=core.VarDesc.VarType.LOD_TENSOR)
    block._insert_op(idx,
                     type="slice",
                     inputs=inputs,
                     outputs={'Out': [out]},
                     attrs=attrs)
    return out
Exemplo n.º 9
0
def yolo_box_post(box0,
                  box1,
                  box2,
                  im_shape,
                  im_scale,
                  anchors0=[116, 90, 156, 198, 373, 326],
                  anchors1=[30, 61, 62, 45, 59, 119],
                  anchors2=[10, 13, 16, 30, 33, 23],
                  class_num=80,
                  conf_thresh=0.005,
                  downsample_ratio0=32,
                  downsample_ratio1=16,
                  downsample_ratio2=8,
                  clip_bbox=True,
                  scale_x_y=1.,
                  nms_threshold=0.45):
    helper = LayerHelper('yolo_box_post', **locals())
    output = helper.create_variable_for_type_inference(dtype=box0.dtype)
    nms_rois_num = helper.create_variable_for_type_inference(dtype='int32')
    inputs = {
        'Boxes0': box0,
        'Boxes1': box1,
        'Boxes2': box2,
        "ImageShape": im_shape,
        "ImageScale": im_scale
    }
    outputs = {'Out': output, 'NmsRoisNum': nms_rois_num}

    helper.append_op(
        type="yolo_box_post",
        inputs=inputs,
        attrs={
            'anchors0': anchors0,
            'anchors1': anchors1,
            'anchors2': anchors2,
            'class_num': class_num,
            'conf_thresh': conf_thresh,
            'downsample_ratio0': downsample_ratio0,
            'downsample_ratio1': downsample_ratio1,
            'downsample_ratio2': downsample_ratio2,
            'clip_bbox': clip_bbox,
            'scale_x_y': scale_x_y,
            'nms_threshold': nms_threshold
        },
        outputs=outputs)
    output.stop_gradient = True
    nms_rois_num.stop_gradient = True
    return output, nms_rois_num
Exemplo n.º 10
0
def rotated_roi_align(input,
                      rois,
                      pooled_height=1,
                      pooled_width=1,
                      spatial_scale=1.0,
                      name=None):
    """
    **RotatedRoIAlign Operator**

    Rotated Region of interest align (also known as Rotated RoI align) is to perform
    bilinear interpolation on inputs of nonuniform sizes to obtain 
    fixed-size feature maps (e.g. 7*7)
    
    Dividing each region proposal into equal-sized sections with
    the pooled_width and pooled_height. Location remains the origin
    result.
    
    Each ROI bin are transformed to become horizontal by perspective transformation and
    values in each ROI bin are computed directly through bilinear interpolation. The output is
    the mean of all values.
    Thus avoid the misaligned problem.  
    """
    helper = LayerHelper('rrpn_rotated_roi_align', **locals())
    dtype = helper.input_dtype()
    align_out = helper.create_variable_for_type_inference(dtype)
    cx = helper.create_variable_for_type_inference('float32')
    cy = helper.create_variable_for_type_inference('float32')
    helper.append_op(type="rrpn_rotated_roi_align",
                     inputs={
                         "X": input,
                         "ROIs": rois
                     },
                     outputs={
                         "Out": align_out,
                         "ConIdX": cx,
                         "ConIdY": cy
                     },
                     attrs={
                         "pooled_height": pooled_height,
                         "pooled_width": pooled_width,
                         "spatial_scale": spatial_scale,
                     })
    return align_out
Exemplo n.º 11
0
def top_pool(input, is_test=False, name=None):
    """
    This layer calculates the top pooling output based on the input.
    Scan the input from bottom to top for the vertical max-pooling.
    The output has the same shape with input.
    Args:
        input(Variable): This input is a Tensor with shape [N, C, H, W].
            The data type is float32 or float64.
    Returns:
        Variable(Tensor): The output of top_pool, with shape [N, C, H, W].
        The data type is float32 or float64.
    Examples:
        ..code-block:: python
            import paddle.fluid as fluid
            import cornerpool_lib
            input = fluid.data(
                name='input', shape=[2, 64, 10, 10], dtype='float32')
            output = corner_pool.top_pool(input)
    """
    if is_test:
        helper = LayerHelper('top_pool', **locals())
        dtype = helper.input_dtype()
        output = helper.create_variable_for_type_inference(dtype)
        max_map = helper.create_variable_for_type_inference(dtype)
        helper.append_op(
            type="top_pool",
            inputs={"X": input},
            outputs={"Output": output,
                     "MaxMap": max_map})
        return output

    H = input.shape[2]
    i = 1
    output = input
    while i < H:
        cur = output[:, :, :H - i, :]
        next = output[:, :, i:, :]
        max_v = fluid.layers.elementwise_max(cur, next)
        output = fluid.layers.concat([max_v, output[:, :, H - i:, :]], axis=2)
        i *= 2

    return output
Exemplo n.º 12
0
    def __init__(self,
                 average_window_rate,
                 parameters=None,
                 min_average_window=10000,
                 max_average_window=10000,
                 name=None):
        super(ModelAverage, self).__init__(
            learning_rate=0.0,
            parameters=parameters,
            weight_decay=None,
            grad_clip=None,
            name=name)

        self.helper = LayerHelper(self.__class__.__name__)
        self.average_window = average_window_rate
        self.min_average_window = min_average_window
        self.max_average_window = max_average_window
        self.type = "average_accumulates"

        if not framework.in_dygraph_mode():
            global_block = framework.default_main_program().global_block()
            all_parameters = parameters if parameters else global_block.all_parameters(
            )

            self._create_accumulators(global_block, all_parameters)
            for param in all_parameters:
                self._append_optimize_op(global_block, [param, None])
            self.apply_program = Program()
            block = self.apply_program.global_block()
            with framework.program_guard(main_program=self.apply_program):
                for param in all_parameters:
                    self._add_average_apply_op(block, param)
            self.restore_program = Program()
            block = self.restore_program.global_block()
            with framework.program_guard(main_program=self.restore_program):
                for param in all_parameters:
                    self._add_average_restore_op(block, param)
Exemplo n.º 13
0
def three_interp(input, weight, idx, name=None):
    """
    **Three Interpolate Layer**

    This operator calculate interpolate results from input, weight and
    index.

    Args:
        input (Variable): The input tensor of three_interp operator. This
                          is a 3-D tensor with shape of [B, M, C].
        weight (Variable): The weight tensor of three_interp operator. This
                          is a 3-D tensor with shape of [B, N, 3].
        idx (Variable): The index tensor of three_interp operator. This
                          is a 3-D tensor with shape of [B, N, 3].
        name(str|None): A name for this layer(optional). If set None, the layer
                        will be named automatically.

    Returns:
        output (Variable): The output tensor of three_interp operator.
                             This is a 3-D tensor with shape of [B, N, C].

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[16, 3], dtype='float32')
            weight = fluid.layers.data(name='weight', shape=[32, 3], dtype='float32')
            index = fluid.layers.data(name='index', shape=[32, 3], dtype='int32')
            out = fluid.layers.three_interp(x, weight, index)
    """
    helper = LayerHelper('three_interp', **locals())
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type="three_interp",
                     inputs={
                         "X": input,
                         "Weight": weight,
                         "Idx": idx
                     },
                     outputs={
                         "Out": out,
                     })
    return out
Exemplo n.º 14
0
class AuxiliaryHead(fluid.dygraph.Layer):
    def __init__(self, name_scope, num_classes):
        super(AuxiliaryHead, self).__init__(name_scope)

        self.pool1 = fluid.dygraph.Pool2D(5,
                                          'avg',
                                          pool_stride=3,
                                          pool_padding=0)
        self.conv1 = fluid.dygraph.Conv2D(128, 1, bias_attr=False)
        self.bn1 = fluid.dygraph.BatchNorm(128, act='relu6')
        self.conv2 = fluid.dygraph.Conv2D(768, 2, bias_attr=False)
        self.bn2 = fluid.dygraph.BatchNorm(768, act='relu6')
        self.classifier = fluid.dygraph.FC(num_classes, act='softmax')
        self.layer_helper = LayerHelper(self.full_name(), act='relu6')

    def forward(self, inputs):  #pylint: disable=arguments-differ
        inputs = self.layer_helper.append_activation(inputs)
        inputs = self.pool1(inputs)
        inputs = self.conv1(inputs)
        inputs = self.bn1(inputs)
        inputs = self.conv2(inputs)
        inputs = self.bn2(inputs)
        inputs = self.classifier(inputs)
        return inputs
Exemplo n.º 15
0
def l2_norm(x, axis, epsilon=1e-12, name=None):
    if len(x.shape) == 1:
        axis = 0
    check_variable_and_dtype(x, "X", ("float32", "float64"), "norm")

    helper = LayerHelper("l2_normalize", **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    norm = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type="norm",
                     inputs={"X": x},
                     outputs={
                         "Out": out,
                         "Norm": norm
                     },
                     attrs={
                         "axis": 1 if axis is None else axis,
                         "epsilon": epsilon,
                     })
    return F.squeeze(norm, axes=[axis])
Exemplo n.º 16
0
def gather_point(input, index):
    """
    **Gather Point Layer**
    Output is obtained by gathering entries of X indexed by `index` 
    and concatenate them together.
    .. math::
        Out = X[Index]
    .. code-block:: text
        Given:
        X = [[1, 2, 3],
             [3, 4, 5],
             [5, 6, 7]]
        Index = [[1, 2]
        Then:
        Out = [[3, 4, 5],
               [5, 6, 7]]
    Args:
        input (Variable): The source input with rank>=1, This
                          is a 3-D tensor with shape of [B, N, 3].
        index (Variable): The index input with shape of [B, M].
      
    Returns:
        output (Variable): The output is a tensor with shape of [B,M].
    Examples:
        .. code-block:: python
            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[-1, 5, 3], dtype='float32')
            index = fluid.layers.data(name='index', shape=[-1, 1], dtype='int32')
            output = fluid.layers.gather_point(x, index)
    """

    helper = LayerHelper('gather_point', **locals())
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type="gather_point",
                     inputs={
                         "X": input,
                         "Index": index
                     },
                     outputs={"Output": out})
    return out
Exemplo n.º 17
0
    def vector_norm(input,
                    porder=None,
                    axis=None,
                    keepdim=False,
                    asvector=False,
                    name=None):
        """
        Calculate the p-order vector norm for certain  dimension of Tensor `input`.
        Args:
          input (Variable): Tensor, data type float32, float64.
          porder (float, optional): None for porder=2.0.
          axis (int, optional): None for last dimension.
          keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
        """
        if in_dygraph_mode():
            if axis is None: axis = -1
            return core.ops.p_norm(input, 'porder', porder, 'axis', axis,
                                   'keepdim', keepdim, 'asvector', asvector)
        if porder is not None:
            check_type(porder, 'porder', (float, int), 'p_norm')
        if axis is not None:
            check_type(axis, 'axis', (int), 'p_norm')
        check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                                 'p_norm')

        attrs = {
            'axis': axis if axis is not None else -1,
            'porder': float(porder) if porder is not None else 2.0,
            'keepdim': keepdim,
            'asvector': asvector,
            'epsilon': 1e-12,
        }
        helper = LayerHelper('p_norm', **locals())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())

        helper.append_op(type='p_norm',
                         inputs={'X': input},
                         outputs={'Out': out},
                         attrs=attrs)
        return out
Exemplo n.º 18
0
def group_points(input, idx, name=None):
    """
    **Group Points Layer**

    This operator group input points with index.

    Args:
        input (Variable): The input tensor of three_interp operator. This
                          is a 3-D tensor with shape of [B, N, C].
        idx (Variable): The index tensor of three_interp operator. This
                          is a 3-D tensor with shape of [B, M, S].
        name(str|None): A name for this layer(optional). If set None, the layer
                        will be named automatically.

    Returns:
        output (Variable): The output tensor of three_interp operator.
                             This is a 4-D tensor with shape of [B, M, S, C].

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[16, 3], dtype='float32')
            index = fluid.layers.data(name='index', shape=[32, 3], dtype='int32')
            out  = fluid.layers.group_points(x, index)
    """
    helper = LayerHelper('group_points', **locals())
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type="group_points",
                     inputs={
                         "X": input,
                         "Idx": idx
                     },
                     outputs={
                         "Out": out,
                     })
    return out
Exemplo n.º 19
0
def query_ball(input, new_points, radius, n_sample):
    """
    **Query Ball Layer**

    Output is a tensor with the indicies of the features that form the query balls.

    Args:
        input(Variable): XYZ coordinates of features with shape of [B,N,3].
        new_points(Variable): Centers coordinates of the ball query with shape of [B,M,3].
        radius(float|Variable): Radius of the balls.
        n_sample(int|Variable): Maximum number of features in the balls.
    Return:
        output(Variable): Tensor with the indicies of the features that form the query balls,with shape of [B,M,n_sample]

    Examples:
        .. code-block::python

            import paddle.fluid as fluid
            x = fluid.layers.data(name='points',shape=[-1,5,3],dtype='float32')
            new_points = fluid.layers.data(name='new_points', shape=[-1,2,3], dtype='float32')
            output = fluid.layers.query_ball(x,new_points,radius=4.0,n_sample=5)



    """
    helper = LayerHelper('query_ball', **locals())
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type="query_ball",
                     inputs={
                         "Points": input,
                         "New_Points": new_points
                     },
                     attrs={
                         "N_sample": n_sample,
                         "Radius": radius
                     },
                     outputs={"Output": out})
    return out
Exemplo n.º 20
0
def create_parameter(layers, shape, dtype):
    # use layerhelper to init bias, scale, mean, variance
    helper = LayerHelper("batch_norm", **locals())
    param_name = "batch_norm_" + str(layers)
    scale = helper.create_parameter(
        attr=fluid.ParamAttr(name=param_name + '.w' + '_0'),
        shape=[shape],
        dtype=dtype,
        default_initializer=Constant(1.0))
    scale.stop_gradient = True

    bias = helper.create_parameter(
        attr=fluid.ParamAttr(name=param_name + '.b' + '_0'),
        shape=[shape],
        dtype=dtype,
        is_bias=True)
    bias.stop_gradient = True

    mean = helper.create_parameter(
        attr=ParamAttr(
            name=param_name + '.w' + '_1',
            initializer=Constant(0.0),
            trainable=False),
        shape=[shape],
        dtype=dtype)
    mean.stop_gradient = True

    variance = helper.create_parameter(
        attr=ParamAttr(
            name=param_name + '.w' + '_2',
            initializer=Constant(1.0),
            trainable=False),
        shape=[shape],
        dtype=dtype)
    variance.stop_gradient = True

    return scale, bias, mean, variance
Exemplo n.º 21
0
def fluid_batch_norm(input,
               act=None,
               is_test=False,
               momentum=0.9,
               epsilon=1e-05,
               param_attr=None,
               bias_attr=None,
               mean_attr=None,
               var_attr=None,
               data_layout='NCHW',
               in_place=False,
               name=None,
               moving_mean_name=None,
               moving_variance_name=None,
               do_model_average_for_mean_and_var=False,
               fuse_with_relu=False):
    """
    **Batch Normalization Layer**
    Editted by Lihang Liu for the reason of exposing mean_attr and var_attr.

    Can be used as a normalizer function for conv2d and fully_connected operations.
    The required data format for this layer is one of the following:

    1. NHWC `[batch, in_height, in_width, in_channels]`

    2. NCHW `[batch, in_channels, in_height, in_width]`

    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.

    :math:`input` is the input features over a mini-batch.

    ..  math::

        \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
        \ mini-batch\ mean \\\\
        \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
        \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
        \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
        \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
        y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift

    Args:
        input(variable): The input variable which is a LoDTensor.
        act(string, Default None): Activation type, linear|relu|prelu|...
        is_test(bool, Default False): Used for training or training.
        momentum(float, Default 0.9):
        epsilon(float, Default 1e-05):
        param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
             of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
             will create ParamAttr as param_attr. If the Initializer of the param_attr
             is not set, the parameter is initialized with Xavier. Default: None.
        bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
             If it is set to None or one attribute of ParamAttr, batch_norm
             will create ParamAttr as bias_attr. If the Initializer of the bias_attr
             is not set, the bias is initialized zero. Default: None.
        data_layout(string, default NCHW): NCHW|NHWC
        in_place(bool, Default False): Make the input and output of batch norm reuse memory.
        name(string, Default None): A name for this layer(optional). If set None, the layer
            will be named automatically.
        moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
        moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
        do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not.
        fuse_with_relu (bool): if True, this OP performs relu after batch norm.

    Returns:
        Variable: A tensor variable which is the result after applying batch normalization on the input.

    Examples:

        .. code-block:: python

            hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
            hidden2 = fluid.layers.batch_norm(input=hidden1)
    """
    assert bias_attr is not False, "bias_attr should not be False in batch_norm."
    helper = LayerHelper('batch_norm', **locals())
    dtype = helper.input_dtype()

    input_shape = input.shape
    if data_layout == 'NCHW':
        channel_num = input_shape[1]
    else:
        if data_layout == 'NHWC':
            channel_num = input_shape[-1]
        else:
            raise ValueError("unsupported data layout:" + data_layout)

    param_shape = [channel_num]

    # create parameter
    scale = helper.create_parameter(
        attr=helper.param_attr,
        shape=param_shape,
        dtype=dtype,
        default_initializer=Constant(1.0))

    bias = helper.create_parameter(
        attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)

    if mean_attr is None:
        mean = helper.create_parameter(
            attr=ParamAttr(
                name=moving_mean_name,
                initializer=Constant(0.0),
                trainable=False,
                do_model_average=do_model_average_for_mean_and_var),
            shape=param_shape,
            dtype=input.dtype)
    else:
        mean = helper.create_parameter(
            attr=mean_attr,
            shape=param_shape,
            dtype=input.dtype)
    mean.stop_gradient = True

    if var_attr is None:
        variance = helper.create_parameter(
            attr=ParamAttr(
                name=moving_variance_name,
                initializer=Constant(1.0),
                trainable=False,
                do_model_average=do_model_average_for_mean_and_var),
            shape=param_shape,
            dtype=input.dtype)
    else:
        variance = helper.create_parameter(
            attr=var_attr,
            shape=param_shape,
            dtype=input.dtype)
    variance.stop_gradient = True

    # create output
    # mean and mean_out share the same memory
    mean_out = mean
    # variance and variance out share the same memory
    variance_out = variance
    saved_mean = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    saved_variance = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)

    batch_norm_out = input if in_place else helper.create_variable_for_type_inference(
        dtype)

    helper.append_op(
        type="batch_norm",
        inputs={
            "X": input,
            "Scale": scale,
            "Bias": bias,
            "Mean": mean,
            "Variance": variance
        },
        outputs={
            "Y": batch_norm_out,
            "MeanOut": mean_out,
            "VarianceOut": variance_out,
            "SavedMean": saved_mean,
            "SavedVariance": saved_variance
        },
        attrs={
            "momentum": momentum,
            "epsilon": epsilon,
            "is_test": is_test,
            "use_mkldnn": False,
            "fuse_with_relu": fuse_with_relu
        })

    return helper.append_activation(batch_norm_out)
Exemplo n.º 22
0
class SimpleRNNCell(fluid.imperative.Layer):
    def __init__(self, name_scope, step_input_size, hidden_size, output_size,
                 param_attr):
        super(SimpleRNNCell, self).__init__(name_scope)
        self.step_input_size = step_input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self._dype = core.VarDesc.VarType.FP32
        from paddle.fluid.layer_helper import LayerHelper
        self._helper = LayerHelper(
            'SimpleRNNCell', act="tanh", param_attr=param_attr)

    def _build_once(self, inputs, pre_hidden):
        i2h_param_shape = [self.step_input_size, self.hidden_size]
        h2h_param_shape = [self.hidden_size, self.hidden_size]
        h2o_param_shape = [self.output_size, self.hidden_size]
        self._i2h_w = self._helper.create_parameter(
            attr=self._helper.param_attr,
            shape=i2h_param_shape,
            dtype=self._dtype,
            is_bias=False)
        self._h2h_w = self._helper.create_parameter(
            attr=self._helper.param_attr,
            shape=h2h_param_shape,
            dtype=self._dtype,
            is_bias=False)
        self._h2o_w = self._helper.create_parameter(
            attr=self._helper.param_attr,
            shape=h2o_param_shape,
            dtype=self._dtype,
            is_bias=False)

    def forward(self, input, pre_hidden):

        tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype)
        tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype)
        hidden = self._helper.create_variable_for_type_inference(self._dype)
        out = self._helper.create_variable_for_type_inference(self._dype)
        softmax_out = self._helper.create_variable_for_type_inference(
            self._dtype)
        reduce_out = self._helper.create_variable_for_type_inference(
            self._dtype)
        self._helper.append_op(
            type="mul",
            inputs={"X": input,
                    "Y": self._i2h_w},
            outputs={"Out": tmp_i2h},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="mul",
            inputs={"X": pre_hidden,
                    "Y": self._h2h_w},
            outputs={"Out": tmp_h2h},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="elementwise_add",
            inputs={'X': tmp_h2h,
                    'Y': tmp_i2h},
            outputs={'Out': hidden},
            attrs={'axis': -1,
                   'use_mkldnn': False})
        hidden = self._helper.append_activation(hidden)

        self._helper.append_op(
            type="mul",
            inputs={"X": hidden,
                    "Y": self._h2o_w},
            outputs={"Out": out},
            attrs={"x_num_col_dims": 1,
                   "y_num_col_dims": 1})

        self._helper.append_op(
            type="softmax",
            inputs={"X": out},
            outputs={"Out": softmax_out},
            attrs={"use_cudnn": False})

        self._helper.append_op(
            type='reduce_sum',
            inputs={'X': softmax_out},
            outputs={'Out': reduce_out},
            attrs={'dim': None,
                   'keep_dim': False,
                   'reduce_all': True})

        return reduce_out, hidden
Exemplo n.º 23
0
def data(name, shape, dtype='float32', lod_level=0):
    """
    **Data Layer**

    This function creates a variable on the global block. The global variable
    can be accessed by all the following operators in the graph. The variable
    is a placeholder that could be fed with input, such as Executor can feed
    input into the variable.

    Note: 
        `paddle.fluid.layers.data` is deprecated. It will be removed in a
        future version. Please use this `paddle.fluid.data`. 
       
        The `paddle.fluid.layers.data` set shape and dtype at compile time but
        does NOT check the shape or the dtype of fed data, this
        `paddle.fluid.data` checks the shape and the dtype of data fed by
        Executor or ParallelExecutor during run time.

        To feed variable size inputs, users can set None or -1 on the variable
        dimension when using :code:`paddle.fluid.data`, or feed variable size
        inputs directly to :code:`paddle.fluid.layers.data` and PaddlePaddle
        will fit the size accordingly.

        The default :code:`stop_gradient` attribute of the Variable created by
        this API is true, which means the gradient won't be passed backward
        through the data Variable. Set :code:`var.stop_gradient = False` If
        user would like to pass backward gradient.

    Args:
       name (str): The name/alias of the variable, see :ref:`api_guide_Name`
           for more details.
       shape (list|tuple): List|Tuple of integers declaring the shape. You can
           set "None" or -1 at a dimension to indicate the dimension can be of any
           size. For example, it is useful to set changeable batch size as "None" or -1.
       dtype (np.dtype|VarType|str, optional): The type of the data. Supported
           dtype: bool, float16, float32, float64, int8, int16, int32, int64,
           uint8. Default: float32.
       lod_level (int, optional): The LoD level of the LoDTensor. Usually users
           don't have to set this value. For more details about when and how to
           use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0.

    Returns:
        Variable: The global variable that gives access to the data.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy as np

          # Creates a variable with fixed size [3, 2, 1]
          # User can only feed data of the same shape to x
          x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32')

          # Creates a variable with changeable batch size -1.
          # Users can feed data of any batch size into y,
          # but size of each data sample has to be [2, 1]
          y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32')

          z = x + y

          # In this example, we will feed x and y with np-ndarray "1"
          # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
          feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)

          exe = fluid.Executor(fluid.CPUPlace())
          out = exe.run(fluid.default_main_program(),
                        feed={
                            'x': feed_data,
                            'y': feed_data
                        },
                        fetch_list=[z.name])

          # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
          print(out)

    """
    helper = LayerHelper('data', **locals())

    check_type(name, 'name', (six.binary_type, six.text_type), 'data')
    check_type(shape, 'shape', (list, tuple), 'data')

    shape = list(shape)
    for i in six.moves.range(len(shape)):
        if shape[i] is None:
            shape[i] = -1

    return helper.create_global_variable(name=name,
                                         shape=shape,
                                         dtype=dtype,
                                         type=core.VarDesc.VarType.LOD_TENSOR,
                                         stop_gradient=True,
                                         lod_level=lod_level,
                                         is_data=True,
                                         need_check_feed=True)
Exemplo n.º 24
0
def data(name, shape, dtype=None, lod_level=0):
    """
    **Data Layer**

    This function creates a variable on the global block. The global variable
    can be accessed by all the following operators in the graph. The variable
    is a placeholder that could be fed with input, such as Executor can feed
    input into the variable. When `dtype` is None, the dtype
    will get from the global dtype by `paddle.get_default_dtype()`.

    Args:
       name (str): The name/alias of the variable, see :ref:`api_guide_Name`
           for more details.
       shape (list|tuple): List|Tuple of integers declaring the shape. You can
           set "None" or -1 at a dimension to indicate the dimension can be of any
           size. For example, it is useful to set changeable batch size as "None" or -1.
       dtype (np.dtype|str, optional): The type of the data. Supported
           dtype: bool, float16, float32, float64, int8, int16, int32, int64,
           uint8. Default: None. When `dtype` is not set, the dtype will get
           from the global dtype by `paddle.get_default_dtype()`.
       lod_level (int, optional): The LoD level of the LoDTensor. Usually users
           don't have to set this value. For more details about when and how to
           use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0.

    Returns:
        Variable: The global variable that gives access to the data.

    Examples:
        .. code-block:: python

          import numpy as np
          import paddle

          # Creates a variable with fixed size [3, 2, 1]
          # User can only feed data of the same shape to x
          # the dtype is not set, so it will set "float32" by
          # paddle.get_default_dtype(). You can use paddle.get_default_dtype() to
          # change the global dtype
          x = paddle.static.data(name='x', shape=[3, 2, 1])

          # Creates a variable with changeable batch size -1.
          # Users can feed data of any batch size into y,
          # but size of each data sample has to be [2, 1]
          y = paddle.static.data(name='y', shape=[-1, 2, 1], dtype='float32')

          z = x + y

          # In this example, we will feed x and y with np-ndarray "1"
          # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
          feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)

          exe = paddle.static.Executor(paddle.framework.CPUPlace())
          out = exe.run(paddle.static.default_main_program(),
                        feed={
                            'x': feed_data,
                            'y': feed_data
                        },
                        fetch_list=[z.name])

          # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
          print(out)

    """
    helper = LayerHelper('data', **locals())
    check_type(name, 'name', (six.binary_type, six.text_type), 'data')
    check_type(shape, 'shape', (list, tuple), 'data')

    shape = list(shape)
    for i in six.moves.range(len(shape)):
        if shape[i] is None:
            shape[i] = -1

    if dtype:
        return helper.create_global_variable(
            name=name,
            shape=shape,
            dtype=dtype,
            type=core.VarDesc.VarType.LOD_TENSOR,
            stop_gradient=True,
            lod_level=lod_level,
            is_data=True,
            need_check_feed=True)
    else:
        return helper.create_global_variable(
            name=name,
            shape=shape,
            dtype=paddle.get_default_dtype(),
            type=core.VarDesc.VarType.LOD_TENSOR,
            stop_gradient=True,
            lod_level=lod_level,
            is_data=True,
            need_check_feed=True)
Exemplo n.º 25
0
def graph_khop_sampler(row,
                       colptr,
                       input_nodes,
                       sample_sizes,
                       sorted_eids=None,
                       return_eids=False,
                       name=None):
    """
    Graph Khop Sampler API.

    This API is mainly used in Graph Learning domain, and the main purpose is to 
    provide high performance graph khop sampling method with subgraph reindex step.
    For example, we get the CSC(Compressed Sparse Column) format of the input graph
    edges as `row` and `colptr`, so as to covert graph data into a suitable format 
    for sampling. And the `input_nodes` means the nodes we need to sample neighbors,
    and `sample_sizes` means the number of neighbors and number of layers we want
    to sample. 

    Args:
        row (Tensor): One of the components of the CSC format of the input graph, and 
                      the shape should be [num_edges, 1] or [num_edges]. The available
                      data type is int32, int64.
        colptr (Tensor): One of the components of the CSC format of the input graph,
                         and the shape should be [num_nodes + 1, 1] or [num_nodes]. 
                         The data type should be the same with `row`.
        input_nodes (Tensor): The input nodes we need to sample neighbors for, and the 
                              data type should be the same with `row`.
        sample_sizes (list|tuple): The number of neighbors and number of layers we want
                                   to sample. The data type should be int, and the shape
                                   should only have one dimension.
        sorted_eids (Tensor): The sorted edge ids, should not be None when `return_eids`
                              is True. The shape should be [num_edges, 1], and the data
                              type should be the same with `row`.
        return_eids (bool): Whether to return the id of the sample edges. Default is False.
        name (str, optional): Name for the operation (optional, default is None).
                              For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        edge_src (Tensor): The src index of the output edges, also means the first column of 
                           the edges. The shape is [num_sample_edges, 1] currently.
        edge_dst (Tensor): The dst index of the output edges, also means the second column
                           of the edges. The shape is [num_sample_edges, 1] currently.
        sample_index (Tensor): The original id of the input nodes and sampled neighbor nodes.
        reindex_nodes (Tensor): The reindex id of the input nodes.
        edge_eids (Tensor): Return the id of the sample edges if `return_eids` is True.

    Examples:
        
        .. code-block:: python

        import paddle

        row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
        colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
        nodes = [0, 8, 1, 2]
        sample_sizes = [2, 2]
        row = paddle.to_tensor(row, dtype="int64")
        colptr = paddle.to_tensor(colptr, dtype="int64")
        nodes = paddle.to_tensor(nodes, dtype="int64")
        
        edge_src, edge_dst, sample_index, reindex_nodes = \
            paddle.incubate.graph_khop_sampler(row, colptr, nodes, sample_sizes, False)

    """

    if _non_static_mode():
        if return_eids:
            if sorted_eids is None:
                raise ValueError(f"`sorted_eid` should not be None "
                                 f"if return_eids is True.")
            edge_src, edge_dst, sample_index, reindex_nodes, edge_eids = \
                _C_ops.graph_khop_sampler(row, sorted_eids,
                                              colptr, input_nodes,
                                              "sample_sizes", sample_sizes,
                                              "return_eids", True)
            return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids
        else:
            edge_src, edge_dst, sample_index, reindex_nodes, _ = \
                _C_ops.graph_khop_sampler(row, None,
                                              colptr, input_nodes,
                                              "sample_sizes", sample_sizes,
                                              "return_eids", False)
            return edge_src, edge_dst, sample_index, reindex_nodes

    check_variable_and_dtype(row, "Row", ("int32", "int64"),
                             "graph_khop_sampler")

    if return_eids:
        if sorted_eids is None:
            raise ValueError(f"`sorted_eid` should not be None "
                             f"if return_eids is True.")
        check_variable_and_dtype(sorted_eids, "Eids", ("int32", "int64"),
                                 "graph_khop_sampler")

    check_variable_and_dtype(colptr, "Col_Ptr", ("int32", "int64"),
                             "graph_khop_sampler")
    check_variable_and_dtype(input_nodes, "X", ("int32", "int64"),
                             "graph_khop_sampler")

    helper = LayerHelper("graph_khop_sampler", **locals())
    edge_src = helper.create_variable_for_type_inference(dtype=row.dtype)
    edge_dst = helper.create_variable_for_type_inference(dtype=row.dtype)
    sample_index = helper.create_variable_for_type_inference(dtype=row.dtype)
    reindex_nodes = helper.create_variable_for_type_inference(dtype=row.dtype)
    edge_eids = helper.create_variable_for_type_inference(dtype=row.dtype)
    helper.append_op(type="graph_khop_sampler",
                     inputs={
                         "Row": row,
                         "Eids": sorted_eids,
                         "Col_Ptr": colptr,
                         "X": input_nodes
                     },
                     outputs={
                         "Out_Src": edge_src,
                         "Out_Dst": edge_dst,
                         "Sample_Index": sample_index,
                         "Reindex_X": reindex_nodes,
                         "Out_Eids": edge_eids
                     },
                     attrs={
                         "sample_sizes": sample_sizes,
                         "return_eids": return_eids
                     })
    if return_eids:
        return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids
    else:
        return edge_src, edge_dst, sample_index, reindex_nodes
def run_momentum_op(params,
                    grads,
                    velocitys,
                    master_params,
                    learning_rate,
                    place,
                    multi_precision,
                    mu=0.9,
                    rescale_grad=0.01,
                    use_merged=False):
    assert len(params) == len(grads)
    assert len(params) == len(velocitys)
    if multi_precision:
        assert len(params) == len(master_params)
    op_type = 'merged_momentum' if use_merged else 'momentum'
    main = paddle.static.Program()
    startup = paddle.static.Program()
    with paddle.static.program_guard(main, startup):
        helper = LayerHelper(op_type, **locals())
        attrs = {
            'mu': mu,
            'multi_precision': multi_precision,
            'rescale_grad': rescale_grad,
        }

        param_vars = [
            helper.create_variable(persistable=True,
                                   shape=p.shape,
                                   dtype=p.dtype) for p in params
        ]
        grad_vars = [
            helper.create_variable(shape=g.shape, dtype=g.dtype) for g in grads
        ]
        velocity_vars = [
            helper.create_variable(persistable=True,
                                   shape=v.shape,
                                   dtype=v.dtype) for v in velocitys
        ]
        lr_var = helper.create_variable(persistable=True,
                                        shape=learning_rate.shape,
                                        dtype=learning_rate.dtype)

        feed_dict = OrderedDict()

        feed_dict.update(
            OrderedDict([(p_var.name, p_val)
                         for p_var, p_val in zip(param_vars, params)]))
        feed_dict.update(
            OrderedDict([(v_var.name, v_val)
                         for v_var, v_val in zip(velocity_vars, velocitys)]))
        fetch_list = list(feed_dict.keys())

        feed_dict.update(
            OrderedDict([(g_var.name, g_val)
                         for g_var, g_val in zip(grad_vars, grads)]))
        feed_dict.update({lr_var.name: learning_rate})

        if multi_precision:
            master_param_vars = [
                helper.create_variable(persistable=True,
                                       shape=p.shape,
                                       dtype=p.dtype) for p in master_params
            ]
            feed_dict.update(
                OrderedDict([
                    (mp_var.name, mp_val)
                    for mp_var, mp_val in zip(master_param_vars, master_params)
                ]))
            # CPUPlace does not use MasterParam
            if isinstance(place, paddle.CUDAPlace):
                fetch_list = fetch_list + [
                    mp_var.name for mp_var in master_param_vars
                ]
        else:
            master_param_vars = None

        if not use_merged:
            for i, (p, g,
                    v) in enumerate(zip(param_vars, grad_vars, velocity_vars)):
                inputs = {
                    'Param': p,
                    'Grad': g,
                    'Velocity': v,
                    'LearningRate': lr_var,
                }
                outputs = {'ParamOut': p, 'VelocityOut': v}
                if multi_precision:
                    inputs['MasterParam'] = master_param_vars[i]
                    outputs['MasterParamOut'] = master_param_vars[i]
                helper.append_op(type=op_type,
                                 inputs=inputs,
                                 outputs=outputs,
                                 attrs=attrs)
        else:
            inputs = {
                'Param': param_vars,
                'Grad': grad_vars,
                'Velocity': velocity_vars,
                'LearningRate': lr_var,
            }
            outputs = {'ParamOut': param_vars, 'VelocityOut': velocity_vars}
            if multi_precision:
                inputs['MasterParam'] = master_param_vars
                outputs['MasterParamOut'] = master_param_vars
            helper.append_op(type=op_type,
                             inputs=inputs,
                             outputs=outputs,
                             attrs=attrs)

    exe = paddle.static.Executor(place)
    with paddle.static.scope_guard(paddle.static.Scope()):
        exe.run(startup)
        return exe.run(main, feed=feed_dict, fetch_list=fetch_list)
Exemplo n.º 27
0
def scatter(x, index, updates, overwrite=True, name=None):
    """
    **Scatter Layer**
    Output is obtained by updating the input on selected indices based on updates.
    
    .. code-block:: python
    
        import numpy as np
        #input:
        x = np.array([[1, 1], [2, 2], [3, 3]])
        index = np.array([2, 1, 0, 1])
        # shape of updates should be the same as x
        # shape of updates with dim > 1 should be the same as input
        updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
        overwrite = False
        # calculation:
        if not overwrite:
            for i in range(len(index)):
                x[index[i]] = np.zeros((2))
        for i in range(len(index)):
            if (overwrite):
                x[index[i]] = updates[i]
            else:
                x[index[i]] += updates[i]
        # output:
        out = np.array([[3, 3], [6, 6], [1, 1]])
        out.shape # [3, 2]
    **NOTICE**: The order in which updates are applied is nondeterministic, 
    so the output will be nondeterministic if index contains duplicates.
    Args:
        x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.
        index (Tensor): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
        updates (Tensor): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
        overwrite (bool): The mode that updating the output when there are same indices. 
          If True, use the overwrite mode to update the output of the same index,
          if False, use the accumulate mode to update the output of the same index.Default value is True.
        name(str, optional): The default value is None. Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
 
    Returns:
        Tensor: The output is a Tensor with the same shape as x.
    Examples:
        .. code-block:: python
            
            import paddle
            x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
            index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
            updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
  
            output1 = paddle.scatter(x, index, updates, overwrite=False)
            # [[3., 3.],
            #  [6., 6.],
            #  [1., 1.]]
            output2 = paddle.scatter(x, index, updates, overwrite=True)
            # CPU device:
            # [[3., 3.],
            #  [4., 4.],
            #  [1., 1.]]
            # GPU device maybe have two results because of the repeated numbers in index
            # result 1:
            # [[3., 3.],
            #  [4., 4.],
            #  [1., 1.]]
            # result 2:
            # [[3., 3.],
            #  [2., 2.],
            #  [1., 1.]]
    """
    if in_dygraph_mode():
        return core.ops.scatter(x, index, updates, 'overwrite', overwrite)

    check_variable_and_dtype(
        x, 'dtype', ['float32', 'int32', 'int64', 'float64'], 'scatter')
    check_type(overwrite, 'overwrite', bool, 'scatter')
    helper = LayerHelper('scatter', **locals())
    out = helper.create_variable_for_type_inference(x.dtype)
    helper.append_op(
        type="scatter",
        inputs={"X": x,
                "Ids": index,
                "Updates": updates},
        attrs={'overwrite': overwrite},
        outputs={"Out": out})
    return out
Exemplo n.º 28
0
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
    """
    This operator changes the shape of ``x`` without changing its data.

    The target shape can be given by ``shape`` or ``actual_shape``.
    When ``shape`` and ``actual_shape`` are set at the same time,
    ``actual_shape`` has a higher priority than ``shape``
    but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
    guarantee shape inference in compile-time.

    Some tricks exist when specifying the target shape.

    1. -1 means the value of this dimension is inferred from the total element
    number of x and remaining dimensions. Thus one and only one dimension can
    be set -1.

    2. 0 means the actual dimension value is going to be copied from the
    corresponding dimension of x. The index of 0s in shape can not exceed
    the dimension of x.

    Args:
        x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``int64``.
        shape(list|tuple|Variable): Define the target shape. At most one dimension of the target shape can be -1.
                        The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
                        If ``shape`` is an Variable, it should be an 1-D Tensor .
        actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
                                according to this given shape rather than ``shape`` specifying shape.
                                That is to say ``actual_shape`` has a higher priority
                                than ``shape(list|tuple)`` but not ``shape(Variable)``. \
                                This argument ``actual_shape`` will be removed in a future version. \
        act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
        inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
                       are the same variable. Otherwise, the input and output of
                       ``layers.reshape`` are different variable. Default False. Note that if ``x``
                       is more than one OPs' input, ``inplace`` must be False.
        name(str, optional): The default value is None. Normally there is no need for user to set this property.
                            For more information, please refer to :ref:`api_guide_Name` .

    Returns:
        Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.


    Examples:
        .. code-block:: python
            import paddle_fl.mpc as pfl_mpc

            pfl_mpc.init("aby3", int(args.role), "localhost", args.server, int(args.port))
            data_1 = pfl_mpc.data(name='x', shape=[3, 3], dtype='int64')
            op_reshape = pfl_mpc.layers.reshape(data_1, [2, 1, 9])
    """

    check_mpc_variable_and_dtype(x, 'x', ['int64'], 'reshape')
    check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
    check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')

    helper = MpcLayerHelper("reshape2", **locals())
    _helper = LayerHelper("reshape2", **locals())

    def get_new_shape_tensor(list_shape):
        new_shape_tensor = []
        for dim in list_shape:
            if isinstance(dim, Variable):
                dim.stop_gradient = True
                new_shape_tensor.append(dim)
            else:
                assert (isinstance(dim, int))
                temp_out = _helper.create_variable_for_type_inference('int32')
                fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
                new_shape_tensor.append(temp_out)
        return new_shape_tensor

    def get_attr_shape(list_shape):
        unk_dim_idx = -1
        attrs_shape = []
        for dim_idx, dim_size in enumerate(list_shape):
            if isinstance(dim_size, Variable):
                attrs_shape.append(-1)
            else:
                attrs_shape.append(dim_size)
                if dim_size == -1:
                    assert unk_dim_idx == -1, (
                        "Only one dimension value of 'shape' in reshape can "
                        "be -1. But received shape[%d] is also -1." % dim_idx)
                    unk_dim_idx = dim_idx
                elif dim_size == 0:
                    assert dim_idx < len(x.shape), (
                        "The index of 0 in `shape` must be less than "
                        "the input tensor X's dimensions. "
                        "But received shape[%d] = 0, X's dimensions = %d." %
                        (dim_idx, len(x.shape)))
                else:
                    assert dim_size > 0, (
                        "Each dimension value of 'shape' in reshape must not "
                        "be negative except one unknown dimension. "
                        "But received shape[%d] = %s." %
                        (dim_idx, str(dim_size)))
        return attrs_shape

    inputs = {"X": x}
    attrs = {}
    if isinstance(shape, Variable):
        shape.stop_gradient = True
        inputs["Shape"] = shape
    elif isinstance(shape, (list, tuple)):
        assert len(shape) > 0, (
            "The size of 'shape' in reshape can't be zero, "
            "but received %s." % len(shape))
        attrs["shape"] = get_attr_shape(shape)

        if utils._contain_var(shape):
            inputs['ShapeTensor'] = get_new_shape_tensor(shape)
        elif isinstance(actual_shape, Variable):
            actual_shape.stop_gradient = True
            inputs["Shape"] = actual_shape

    out = x if inplace else helper.create_mpc_variable_for_type_inference(
        dtype=x.dtype)
    x_shape = helper.create_mpc_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type="reshape2",
                     inputs=inputs,
                     attrs=attrs,
                     outputs={
                         "Out": out,
                         "XShape": x_shape
                     })

    return helper.append_activation(out)
Exemplo n.º 29
0
Arquivo: nn.py Projeto: iducn/Paddle
def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'):
    """
    **Tdm Child**
     According to the input node_id on the given tree, return the corresponding child node_id and 
      whether child is a leaf node by leaf_mask value.
    .. code-block:: text

        Given:
            tree[[0], [1, 2], [3, 4], [5, 6]] # A binary tree with seven nodes
            x = [[2], [3]]
            node_nums = 7
            child_nums = 2

          we get:
            child = [[5, 6],
                     [0, 0]]
            leaf_mask = [[1, 1],
                         [0, 0]]
    Args:
        x(Variable): Variable contained the node_id information, dtype support int32/int64.
        node_nums(int): Number of total nodes.
        child_nums(int): Maximum number of child nodes per node.
        param_attr(ParamAttr): To specify the tdm-tree-info parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in: ref: `api_fluid_ParamAttr`, should
            has shape(node_nums, 3 + child_nums), dtype support int32/int64. 
            The dimension[1] of tdm-tree-info contains the following: 
            1. Item_id(int, shape(1)), if node is a leaf node, give its item_id corresponding to node_id, else give 0.
            2. Layer_id(int, shape(1)), indicates which layer the node is on.
            3. Parent_id(int, shape(1)), node's parent node.
            4. Child_id(int, shape(child_nums)), all child node's node_id of this node should be given. 
            If the number of child nodes is insufficient, padding 0 until child nums equal to child_nums
        dtype(str): The data type of output child and leaf_mask, support int32/int64.

    Returns:
        tuple: A tuple including input node's child(Variable) and leaf_mask(Variable). 
            If child is a leaf node, leaf_mask equal ot 1, otherwise equal to 0.

    Examples:
        .. code-block:: python
        import paddle.fluid as fluid
        import numpy as np
        x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
        tree_info = [[0,0,0,1,2],
                     [0,1,0,3,4],[0,1,0,5,6],
                     [0,2,1,0,0],[1,2,1,0,0],[2,2,2,0,0],[3,2,2,0,0]]
        tree_info_np = np.array(tree_info)
        tree_info_np = np.reshape(tree_info_np, (7,5))
        node_nums = 7
        child_nums = 2
        child, leaf_mask  = fluid.contrib.layers.tdm_child(x, node_nums, child_nums,
                                param_attr=fluid.ParamAttr(
                                    initializer=fluid.initializer.NumpyArrayInitializer(
                                                                            tree_info_np)))
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        xx = np.array([[2],[3]]).reshape((2,1)).astype("int32")
        child_res, leaf_mask_res = exe.run(feed={"x":xx}, fetch_list=[child, leaf_mask])
     """
    helper = LayerHelper("tdm_child", **locals())
    check_dtype(dtype, 'dtype', ['int32', 'int64'],
                'fluid.contrib.layers.tdm_child')
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    tree_info = helper.create_parameter(attr=helper.param_attr,
                                        shape=[node_nums, 3 + child_nums],
                                        dtype=dtype,
                                        default_initializer=Constant(0))
    tree_info.stop_gradient = True

    child = helper.create_variable_for_type_inference(dtype=dtype)
    leaf_mask = helper.create_variable_for_type_inference(dtype=dtype)

    helper.append_op(type='tdm_child',
                     inputs={
                         'X': x,
                         'TreeInfo': tree_info
                     },
                     outputs={
                         'Child': child,
                         'LeafMask': leaf_mask
                     },
                     attrs={
                         'child_nums': child_nums,
                         'dtype': c_dtype
                     },
                     stop_gradient=True)
    return (child, leaf_mask)
Exemplo n.º 30
0
Arquivo: nn.py Projeto: iducn/Paddle
def shuffle_batch(x, seed=None):
    """
    This layer shuffle input tensor :attr:`x` . Normally, :attr:`x` is 2-D LoDTensor.

    :attr:`x` is a LoDTensor to be shuffled with shape :math:`[N_1, N_2, ..., N_k, D]` . Note that the last dim of input will not be shuffled.
    :math:`N_1 * N_2 * ... * N_k` numbers of elements with length :math:`D` will be shuffled randomly.

    For Example:

    .. code-block:: text

      Input:
        x.data = [[1, 2], [3, 4], [5, 6], [7, 8]]
        x.dims = [4, 2]

      Attrs:
        seed = 2019

      Output:
        Out.data =[[7, 8], [1, 2], [3, 4], [5, 6]]
        Out.dims = [4, 2]

    Args:
        x (Variable): The input variable. The input variable is a N-D LoDTensor with type int, float32 or float64.
        seed (None|int|Variable): The start up seed. If set, seed will be set as the start up seed of shuffle engine.
                If not set(Default), start up seed of shuffle engine will be generated randomly.

    Returns:
        Variables: The shuffled LoDTensor with the same shape and lod as input.

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.layers.data(name="x", shape=[-1, 4])
            out = fluid.contrib.layers.shuffle_batch(x)
    """
    helper = LayerHelper('shuffle_batch', **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    shuffle_idx = helper.create_variable_for_type_inference(dtype=np.int64)
    if seed is None and helper.main_program.random_seed != 0:
        seed = helper.main_program.random_seed
    if seed is None:
        seed = np.random.randint(-65536, 65535)
    op_attrs = {}
    if isinstance(seed, int):
        op_attrs["startup_seed"] = seed
        seed = helper.create_variable(
            name=unique_name.generate("shuffle_batch_seed"),
            dtype="int64",
            persistable=True)
    helper.append_op(type='shuffle_batch',
                     inputs={
                         'X': x,
                         'Seed': seed
                     },
                     outputs={
                         'Out': out,
                         'ShuffleIdx': shuffle_idx,
                         'SeedOut': seed
                     },
                     attrs=op_attrs)
    return out