コード例 #1
0
    def test_api(self):
        x_1 = paddle.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
        paddle.concat([x_1, x_1], 0)

        input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
        input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
        x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
        x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
        positive_1_int32 = paddle.fill_constant([1], "int32", 1)
        positive_1_int64 = paddle.fill_constant([1], "int64", 1)
        negative_int64 = paddle.fill_constant([1], "int64", -3)
        out_1 = paddle.concat(x=[x_2, x_3], axis=1)
        out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
        out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
        out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        [res_1, res_2, res_3,
         res_4] = exe.run(paddle.static.default_main_program(),
                          feed={
                              "x_1": input_2,
                              "x_2": input_2,
                              "x_3": input_3
                          },
                          fetch_list=[out_1, out_2, out_3, out_4])
        assert np.array_equal(res_1, np.concatenate((input_2, input_3),
                                                    axis=1))
        assert np.array_equal(res_2, np.concatenate((input_2, input_3),
                                                    axis=1))
        assert np.array_equal(res_3, np.concatenate((input_2, input_3),
                                                    axis=1))
        assert np.array_equal(res_4, np.concatenate((input_2, input_3),
                                                    axis=1))
コード例 #2
0
    def get_target_tensor(self, prediction, target_is_real):
        """Create label tensors with the same size as the input.

        Parameters:
            prediction (tensor) - - tpyically the prediction from a discriminator
            target_is_real (bool) - - if the ground truth label is for real images or fake images

        Returns:
            A label tensor filled with ground truth label, and with the size of the input
        """

        if target_is_real:
            if not hasattr(self, 'target_real_tensor'):
                self.target_real_tensor = paddle.fill_constant(
                    shape=paddle.shape(prediction),
                    value=self.target_real_label,
                    dtype='float32')
            target_tensor = self.target_real_tensor
        else:
            if not hasattr(self, 'target_fake_tensor'):
                self.target_fake_tensor = paddle.fill_constant(
                    shape=paddle.shape(prediction),
                    value=self.target_fake_label,
                    dtype='float32')
            target_tensor = self.target_fake_tensor

        # target_tensor.stop_gradient = True
        return target_tensor
コード例 #3
0
ファイル: test_randn_op.py プロジェクト: goodcoder-cnn/Paddle
    def test_api(self):
        shape = [1000, 784]
        train_program = Program()
        startup_program = Program()
        with program_guard(train_program, startup_program):
            x1 = paddle.randn(shape, 'float32')
            x2 = paddle.randn(shape, 'float64')

            dim_1 = paddle.fill_constant([1], "int64", 20)
            dim_2 = paddle.fill_constant([1], "int32", 50)
            x3 = paddle.randn([dim_1, dim_2, 784])

            var_shape = paddle.static.data('X', [2], 'int32')
            x4 = paddle.randn(var_shape)

        place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        res = exe.run(train_program,
                      feed={'X': np.array(shape, dtype='int32')},
                      fetch_list=[x1, x2, x3, x4])

        for out in res:
            self.assertAlmostEqual(np.mean(out), .0, delta=0.1)
            self.assertAlmostEqual(np.std(out), 1., delta=0.1)
コード例 #4
0
ファイル: collective.py プロジェクト: goodcoder-cnn/Paddle
def barrier(group=0):
    """

    Barrier among all participators in the group.

    Args:
        group (int): The id of the process group to work on.

    Returns:
        None.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.distributed import init_parallel_env

            paddle.disable_static()
            paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
            init_parallel_env()
            paddle.distributed.barrier()
    """
    op_type = 'barrier'
    temp = paddle.fill_constant([1], dtype="int32", value="1")
    if in_dygraph_mode():
        return core.ops.barrier(temp, temp, 'ring_id', group)
    if not isinstance(group, int):
        raise ValueError("The type of 'group' for barrier must be int.")
    helper = LayerHelper(op_type, **locals())
    helper.append_op(type=op_type,
                     inputs={'X': [temp]},
                     outputs={'Out': [temp]},
                     attrs={'ring_id': group})
コード例 #5
0
def init_weights(layer):
    if type(layer) == nn.Linear:
        new_weight = paddle.fill_constant(layer.weight.shape,
                                          layer.weight.dtype,
                                          value=0.9)
        layer.weight.set_value(new_weight)
        new_bias = paddle.fill_constant(layer.bias.shape,
                                        layer.bias.dtype,
                                        value=-0.1)
        layer.bias.set_value(new_bias)
    elif type(layer) == nn.Conv2d:
        new_weight = paddle.fill_constant(layer.weight.shape,
                                          layer.weight.dtype,
                                          value=0.7)
        layer.weight.set_value(new_weight)
        new_bias = paddle.fill_constant(layer.bias.shape,
                                        layer.bias.dtype,
                                        value=-0.2)
        layer.bias.set_value(new_bias)
コード例 #6
0
ファイル: test_randn_op.py プロジェクト: goodcoder-cnn/Paddle
    def test_api(self):
        shape = [1000, 784]
        place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
        paddle.disable_static(place)
        x1 = paddle.randn(shape, 'float32')
        x2 = paddle.randn(shape, 'float64')

        dim_1 = paddle.fill_constant([1], "int64", 20)
        dim_2 = paddle.fill_constant([1], "int32", 50)
        x3 = paddle.randn(shape=[dim_1, dim_2, 784])

        var_shape = paddle.to_variable(np.array(shape))
        x4 = paddle.randn(var_shape)

        for out in [x1, x2, x3, x4]:
            self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1)
            self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1)
        paddle.enable_static()
コード例 #7
0
    def run_retain(self, need_retain):
        g = Generator()
        d = Discriminator()

        optim_g = paddle.optimizer.Adam(parameters=g.parameters())
        optim_d = paddle.optimizer.Adam(parameters=d.parameters())

        gan_criterion = paddle.nn.MSELoss()
        l1_criterion = paddle.nn.L1Loss()

        A = np.random.rand(2, 3, 32, 32).astype('float32')
        B = np.random.rand(2, 3, 32, 32).astype('float32')

        realA = paddle.to_variable(A)
        realB = paddle.to_variable(B)
        fakeB = g(realA)

        optim_d.clear_gradients()
        fake_AB = paddle.concat((realA, fakeB), 1)
        G_pred_fake = d(fake_AB.detach())

        false_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 0.0)

        G_gradient_penalty, _ = self.cal_gradient_penalty(
            d, realA, fakeB, lambda_gp=10.0)
        loss_d = gan_criterion(G_pred_fake, false_target) + G_gradient_penalty

        loss_d.backward(retain_graph=need_retain)
        optim_d.minimize(loss_d)

        optim_g.clear_gradients()
        fake_AB = paddle.concat((realA, fakeB), 1)
        G_pred_fake = d(fake_AB)
        true_target = paddle.fill_constant(G_pred_fake.shape, 'float32', 1.0)
        loss_g = l1_criterion(fakeB, realB) + gan_criterion(G_pred_fake,
                                                            true_target)

        loss_g.backward()
        optim_g.minimize(loss_g)
コード例 #8
0
    def test_api(self):
        with program_guard(Program(), Program()):
            # results are from [0, 5).
            out1 = paddle.randint(5)
            # shape is a list and dtype is 'int32'
            out2 = paddle.randint(low=-100,
                                  high=100,
                                  shape=[64, 64],
                                  dtype='int32')
            # shape is a tuple and dtype is 'int64'
            out3 = paddle.randint(low=-100,
                                  high=100,
                                  shape=(32, 32, 3),
                                  dtype='int64')
            # shape is a tensorlist and dtype is 'float32'
            dim_1 = paddle.fill_constant([1], "int64", 32)
            dim_2 = paddle.fill_constant([1], "int32", 50)
            out4 = paddle.randint(low=-100,
                                  high=100,
                                  shape=[dim_1, 5, dim_2],
                                  dtype='int32')
            # shape is a tensor and dtype is 'float64'
            var_shape = paddle.static.data(name='var_shape',
                                           shape=[2],
                                           dtype="int64")
            out5 = paddle.randint(low=1,
                                  high=1000,
                                  shape=var_shape,
                                  dtype='int64')

            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
            exe = paddle.static.Executor(place)
            outs = exe.run(
                feed={'var_shape': np.array([100, 100]).astype('int64')},
                fetch_list=[out1, out2, out3, out4, out5])
コード例 #9
0
def cal_gradient_penalty(netD,
                         real_data,
                         fake_data,
                         edge_data=None,
                         type='mixed',
                         constant=1.0,
                         lambda_gp=10.0):
    if lambda_gp > 0.0:
        if type == 'real':  # either use real images, fake images, or a linear interpolation of two.
            interpolatesv = real_data
        elif type == 'fake':
            interpolatesv = fake_data
        elif type == 'mixed':
            alpha = paddle.rand((real_data.shape[0], 1))
            alpha = paddle.expand(
                alpha, [1, np.prod(real_data.shape) // real_data.shape[0]])
            alpha = paddle.reshape(alpha, real_data.shape)
            interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
        else:
            raise NotImplementedError('{} not implemented'.format(type))
        # interpolatesv.requires_grad_(True)
        interpolatesv.stop_gradient = False
        real_data.stop_gradient = True
        fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
        disc_interpolates = netD(fake_AB)

        # FIXME: use paddle.ones
        outs = paddle.fill_constant(disc_interpolates.shape,
                                    disc_interpolates.dtype, 1.0)
        gradients = paddle.imperative.grad(
            outputs=disc_interpolates,
            inputs=fake_AB,
            grad_outputs=outs,  # paddle.ones(list(disc_interpolates.shape)),
            create_graph=True,
            retain_graph=True,
            only_inputs=True,
            # no_grad_vars=set(netD.parameters())
        )

        gradients = paddle.reshape(gradients[0],
                                   [real_data.shape[0], -1])  # flat the data

        gradient_penalty = paddle.reduce_mean(
            (paddle.norm(gradients + 1e-16, 2, 1) - constant)**
            2) * lambda_gp  # added eps
        return gradient_penalty, gradients
    else:
        return 0.0, None
コード例 #10
0
    def cal_gradient_penalty(self,
                             netD,
                             real_data,
                             fake_data,
                             edge_data=None,
                             type='mixed',
                             constant=1.0,
                             lambda_gp=10.0):
        if lambda_gp > 0.0:
            if type == 'real':
                interpolatesv = real_data
            elif type == 'fake':
                interpolatesv = fake_data
            elif type == 'mixed':
                alpha = paddle.rand((real_data.shape[0], 1))
                alpha = paddle.expand(alpha, [
                    real_data.shape[0],
                    np.prod(real_data.shape) // real_data.shape[0]
                ])
                alpha = paddle.reshape(alpha, real_data.shape)
                interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
            else:
                raise NotImplementedError('{} not implemented'.format(type))
            interpolatesv.stop_gradient = False
            real_data.stop_gradient = True
            fake_AB = paddle.concat((real_data.detach(), interpolatesv), 1)
            disc_interpolates = netD(fake_AB)

            outs = paddle.fill_constant(disc_interpolates.shape,
                                        disc_interpolates.dtype, 1.0)
            gradients = paddle.grad(
                outputs=disc_interpolates,
                inputs=fake_AB,
                grad_outputs=outs,
                create_graph=True,
                retain_graph=True,
                only_inputs=True)

            gradients = paddle.reshape(gradients[0], [real_data.shape[0], -1])

            gradient_penalty = paddle.reduce_mean((paddle.norm(
                gradients + 1e-16, 2, 1) - constant)**
                                                  2) * lambda_gp  # added eps
            return gradient_penalty, gradients
        else:
            return 0.0, None
コード例 #11
0
def constant_(x, value):
    temp_value = paddle.fill_constant(x.shape, x.dtype, value)
    x.set_value(temp_value)
    return x
コード例 #12
0
def margin_ranking_loss(input,
                        other,
                        label,
                        margin=0.0,
                        reduction='mean',
                        name=None):
    """

    This op the calcluate the the margin rank loss between the input, other and label, use the math function as follows.

    .. math::
        margin\_rank\_loss = max(0, -label * (input - other) + margin)

    If :attr:`reduction` set to ``'mean'``, the reduced mean loss is:

    .. math::
        Out = MEAN(margin\_rank\_loss)

    If :attr:`reduction` set to ``'sum'``, the reduced sum loss is:

    .. math::
        Out = SUM(margin\_rank\_loss)

    If :attr:`reduction` set to ``'none'``, just return the origin ``margin_rank_loss``.

    Parameters:
        input(Tensor): the first input tensor, it's data type should be float32, float64.
        other(Tensor): the second input tensor, it's data type should be float32, float64.
        label(Tensor): the label value corresponding to input, it's data type should be float32, float64.
        margin (float, optional): The margin value to add, default value is 0;
        reduction (str, optional): Indicate the reduction to apply to the loss, the candicates are ``'none'``, ``'mean'``, ``'sum'``.If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. Default is ``'mean'``.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns: Tensor, if :attr:`reduction` is ``'mean'`` or ``'sum'``, the out shape is :math:`[1]`, otherwise the shape is the same as `input` .The same dtype as input tensor.

    Examples:

        .. code-block:: python

            import paddle
            paddle.disable_static()

            input = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32')
            other = paddle.to_tensor([[2, 1], [2, 4]], dtype='float32')
            label = paddle.to_tensor([[1, -1], [-1, -1]], dtype='float32')
            loss = paddle.nn.functional.margin_ranking_loss(input, other, label)
            print(loss.numpy()) # [0.75]
    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but "
            "received %s, which is not allowed." % reduction)
    if fluid.framework.in_dygraph_mode():
        out = core.ops.elementwise_sub(other, input)
        out = core.ops.elementwise_mul(out, label)
        if margin != 0.0:
            margin = fluid.dygraph.base.to_variable([margin], dtype=out.dtype)
            out = core.ops.elementwise_add(out, margin)
        out = core.ops.relu(out)
        if reduction == 'sum':
            return core.ops.reduce_sum(out, 'reduce_all', True)
        elif reduction == 'mean':
            return core.ops.mean(out)
        return out

    helper = LayerHelper("margin_ranking_loss", **locals())
    fluid.data_feeder.check_variable_and_dtype(input, 'input',
                                               ['float32', 'float64'],
                                               'margin_rank_loss')
    fluid.data_feeder.check_variable_and_dtype(other, 'other',
                                               ['float32', 'float64'],
                                               'margin_rank_loss')
    fluid.data_feeder.check_variable_and_dtype(label, 'label',
                                               ['float32', 'float64'],
                                               'margin_rank_loss')

    out = paddle.elementwise_sub(other, input)
    out = paddle.multiply(out, label)

    if margin != 0.0:
        margin_var = out.block.create_var(dtype=out.dtype)
        paddle.fill_constant([1], out.dtype, margin, out=margin_var)
        out = paddle.add(out, margin_var)

    result_out = helper.create_variable_for_type_inference(input.dtype)

    if reduction == 'none':
        helper.append_op(type="relu",
                         inputs={"X": out},
                         outputs={"Out": result_out})
        return result_out
    elif reduction == 'sum':
        out = paddle.nn.functional.relu(out)
        attrs = {"dim": [0], "keep_dim": False, "reduce_all": True}
        helper.append_op(type="reduce_sum",
                         inputs={"X": out},
                         outputs={"Out": result_out},
                         attrs=attrs)
        return result_out
    elif reduction == 'mean':
        out = paddle.nn.functional.relu(out)
        helper.append_op(type="mean",
                         inputs={"X": out},
                         outputs={"Out": result_out},
                         attrs={})
        return result_out
コード例 #13
0
def binary_cross_entropy_with_logits(logit,
                                     label,
                                     weight=None,
                                     reduction='mean',
                                     pos_weight=None,
                                     name=None):
    """
    This operator combines the sigmoid layer and the :ref:`api_nn_loss_BCELoss` layer.
    Also, we can see it as the combine of ``sigmoid_cross_entropy_with_logits``
    layer and some reduce operations.

    This measures the element-wise probability error in classification tasks
    in which each class is independent.
    This can be thought of as predicting labels for a data-point, where labels
    are not mutually exclusive. For example, a news article can be about
    politics, technology or sports at the same time or none of these.

    First this operator calculate loss function as follows:

    .. math::
           Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit))

    We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get:

    .. math::
           Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit})

    For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0,
    we reformulate the loss as follows:

    .. math::
           Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|})

    Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the
    weight tensor on the loss `Out`. The ``weight`` tensor will attach different
    weight on every items in the batch. The ``pos_weight`` will attach different
    weight on the positive label of each class.

    Finally, this operator applies reduce operation on the loss.
    If :attr:`reduction` set to ``'none'``, the operator will return the original loss `Out`.
    If :attr:`reduction` set to ``'mean'``, the reduced mean loss is :math:`Out = MEAN(Out)`.
    If :attr:`reduction` set to ``'sum'``, the reduced sum loss is :math:`Out = SUM(Out)`.

    Note that the target labels ``label`` should be numbers between 0 and 1.

    Args:
        logit (Tensor): The input predications tensor. 2-D tensor with shape: [N, *],
            N is batch_size, `*` means number of additional dimensions. The ``logit``
            is usually the output of Linear layer. Available dtype is float32, float64.
        label (Tensor): The target labels tensor. 2-D tensor with the same shape as
            ``logit``. The target labels which values should be numbers between 0 and 1.
            Available dtype is float32, float64.
        weight (Tensor, optional): A manual rescaling weight given to the loss of each
            batch element. If given, it has to be a 1D Tensor whose size is `[N, ]`,
            The data type is float32, float64. Default is ``'None'``.
        reduction (str, optional): Indicate how to average the loss by batch_size,
            the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
            If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
            If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
            If :attr:`reduction` is ``'sum'``, the summed loss is returned.
            Default is ``'mean'``.
        pos_weight (Tensor, optional): A weight of positive examples. Must be a vector
            with length equal to the number of classes. The data type is float32, float64.
            Default is ``'None'``.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        output (Tensor): If ``reduction`` is ``'none'``, the shape of output is
            same as ``logit`` , else the shape of output is scalar.

    Examples:

        .. code-block:: python

            import paddle
            paddle.disable_static()
            logit = paddle.to_tensor([5.0, 1.0, 3.0])
            label = paddle.to_tensor([1.0, 0.0, 1.0])
            output = paddle.nn.functional.binary_cross_entropy_with_logits(logit, label)
            print(output.numpy())  # [0.45618808]

    """
    if reduction not in ['sum', 'mean', 'none']:
        raise ValueError(
            "The value of 'reduction' in binary_cross_entropy_with_logits "
            "should be 'sum', 'mean' or 'none', but received %s, which is not allowed."
            % reduction)

    if in_dygraph_mode():
        one = _varbase_creator(dtype=logit.dtype)
        core.ops.fill_constant(one, 'value', float(1.0), 'force_cpu', False,
                               'dtype', one.dtype, 'str_value', '1.0', 'shape',
                               [1])
        out = core.ops.sigmoid_cross_entropy_with_logits(logit, label)
        if pos_weight is not None:
            log_weight = core.ops.elementwise_add(
                core.ops.elementwise_mul(
                    label, core.ops.elementwise_sub(pos_weight, one)), one)
            out = core.ops.elementwise_mul(out, log_weight)
        if weight is not None:
            out = core.ops.elementwise_mul(out, weight)

        if reduction == "sum":
            return core.ops.reduce_sum(out, 'reduce_all', True)
        elif reduction == "mean":
            return core.ops.mean(out)
        else:
            return out

    fluid.data_feeder.check_variable_and_dtype(
        logit, 'logit', ['float32', 'float64'],
        'binary_cross_entropy_with_logits')
    fluid.data_feeder.check_variable_and_dtype(
        label, 'label', ['float32', 'float64'],
        'binary_cross_entropy_with_logits')
    sigmoid_name = None
    if reduction == 'none' and pos_weight is None and weight is None:
        sigmoid_name = name

    out = paddle.nn.functional.sigmoid_cross_entropy_with_logits(
        logit, label, name=sigmoid_name)

    one = paddle.fill_constant(shape=[1], value=1.0, dtype=logit.dtype)
    if pos_weight is not None:
        fluid.data_feeder.check_variable_and_dtype(
            pos_weight, 'pos_weight', ['float32', 'float64'],
            'binary_cross_entropy_with_logits')
        log_weight = paddle.add(
            paddle.multiply(label, paddle.elementwise_sub(pos_weight, one)),
            one)
        pos_weight_name = name if reduction == 'none' and weight is None else None
        out = paddle.multiply(out, log_weight, name=pos_weight_name)

    if weight is not None:
        fluid.data_feeder.check_variable_and_dtype(
            weight, 'weight', ['float32', 'float64'],
            'binary_cross_entropy_with_logits')
        weight_name = name if reduction == 'none' else None
        out = paddle.multiply(out, weight, name=weight_name)

    if reduction == "sum":
        return paddle.sum(out, name=name)
    elif reduction == "mean":
        return paddle.mean(out, name=name)
    return out
コード例 #14
0
def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
    """
    This op normalizes ``x`` along dimension ``axis`` using :math:`L_p` norm. This layer computes

    .. math::

        y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) }
    
    .. math::
        \lvert \lvert x \rvert \rvert_p = \left(\sum_i {\lvert x_i\rvert^p}  \right)^{1/p}

    where, :math:`\sum_i{\lvert x_i\rvert^p}` is calculated along the ``axis`` dimension.


    Args:
        x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
        p (float|int, optional): The exponent value in the norm formulation. Default: 2
        axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension. 
        epsilon (float, optional): Small float added to denominator to avoid dividing by zero. Default is 1e-12.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor, the output has the same shape and data type with ``x``.

    Examples:

        .. code-block:: python

            import numpy as np
            import paddle
            import paddle.nn.functional as F

            paddle.disable_static()
            x = np.arange(6, dtype=np.float32).reshape(2,3)
            x = paddle.to_tensor(x)
            y = F.normalize(x)
            print(y.numpy())
            # [[0.         0.4472136  0.8944272 ]
            # [0.42426404 0.5656854  0.7071067 ]]

            y = F.normalize(x, p=1.5)
            print(y.numpy())
            # [[0.         0.40862012 0.81724024]
            # [0.35684016 0.4757869  0.5947336 ]]

            y = F.normalize(x, axis=0)
            print(y.numpy())
            # [[0.         0.24253564 0.37139067]
            # [1.         0.97014254 0.9284767 ]]
    """
    if in_dygraph_mode():
        eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
        out = core.ops.p_norm(x, 'axis', axis, 'porder', float(p), 'keepdim',
                              True, 'epsilon', epsilon)
        return x / core.ops.elementwise_max(out, eps)

    check_type(p, 'p', (float, int), 'normalize')
    check_type(axis, 'axis', (int), 'normalize')
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'normalize')
    if len(x.shape) == 1 and axis != 0 and axis != -1:
        raise ValueError(
            "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}"
            .format(axis))

    attrs = {
        'axis': axis,
        'porder': float(p),
        'keepdim': True,
        'epsilon': epsilon,
    }
    helper = LayerHelper('p_norm', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(type='p_norm',
                     inputs={'X': x},
                     outputs={'Out': out},
                     attrs=attrs)
    eps = out.block.create_var(dtype=out.dtype)
    paddle.fill_constant([1], out.dtype, epsilon, out=eps)
    return paddle.elementwise_div(x, paddle.maximum(out, eps), name=name)