Beispiel #1
0
def check_mpc_variable_and_dtype(input,
                                 input_name,
                                 expected_dtype,
                                 op_name,
                                 extra_message=''):
    check_type(input, input_name, MpcVariable, op_name, extra_message)
    check_dtype(input.dtype, input_name, expected_dtype, op_name,
                extra_message)
Beispiel #2
0
def create_mpc_parameter(shape,
                         dtype,
                         name=None,
                         attr=None,
                         is_bias=False,
                         default_initializer=None):
    """
    :api_attr: Static Graph
    This function creates a mpc parameter. The parameter is a learnable variable, which can have
    gradient, and can be optimized.
    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
        default_initializer (Initializer, optional): Initializer for the parameter
    Returns:
        The created parameter.
    Examples:
        .. code-block:: python
            import paddle_fl.mpc as pfl_mpc
            pfl_mpc.init("aby3", role, "localhost", redis_server, redis_port)
            W = pfl_mpc.layers.create_mpc_parameter(shape=[784, 200], dtype='int64')
    """
    check_type(shape, 'shape', (list, tuple, numpy.ndarray),
               'create_mpc_parameter')
    for item in shape:
        if six.PY2:
            check_type(item, 'item of shape',
                       (int, long, numpy.uint8, numpy.int8, numpy.int16,
                        numpy.int32, numpy.int64), 'create_mpc_parameter')
        else:
            check_type(item, 'item of shape',
                       (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                        numpy.int64), 'create_mpc_parameter')

    check_dtype(dtype, 'dtype', ['int64'], 'create_mpc_parameter')
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_mpc_parameter')
    check_type(default_initializer, 'default_initializer',
               (type(None), Initializer), 'create_mpc_parameter')

    helper = MpcLayerHelper("create_mpc_parameter", **locals())
    if attr is None:
        attr = ParamAttr(name=name)
    return helper.create_mpc_parameter(attr, shape, dtype, is_bias,
                                       default_initializer)
Beispiel #3
0
    def backward(ctx, *args, **kwargs):

        # by now the backward function only insert the gradient allreduce for dist op itself
        dist_op_context = ctx.dist_op_context
        main_block = dist_op_context.get_dst_main_program().global_block()
        backward_op = dist_op_context.get_cur_src_op()
        rank_id = dist_op_context.get_rank_id()
        dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
        assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
            str(backward_op))

        # FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
        if rank_id not in dist_attr.process_mesh.processes:
            rank_id = _get_corresponding_rank(ctx, dist_attr.process_mesh,
                                              rank_id)

        assert 'Ids' in kwargs, "input [{}] is not given".format('Ids')
        assert 'W' in kwargs, "input [{}] is not given".format('W')
        assert 'Out@GRAD' in kwargs, "input [{}] is not given".format('Out')
        assert 'W@GRAD' in kwargs, "output [{}] is not given".format('W@GRAD')

        assert len(
            kwargs['Ids']
        ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
            kwargs['Ids'])
        assert len(
            kwargs['W']
        ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
            kwargs['W'])
        assert len(
            kwargs['Out@GRAD']
        ) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
            kwargs['Out'])
        assert len(
            kwargs['W@GRAD']
        ) == 1, "row_parallel_embedding output Ids take 1 variable but got {}".format(
            kwargs['W@GRAD'])

        Ids_var = main_block.var(kwargs['Ids'][0])
        Weight_var = main_block.var(kwargs['W'][0])
        Out_grad = main_block.var(kwargs['Out@GRAD'][0])
        Weight_grad = main_block.var(kwargs['W@GRAD'][0])

        embedding_row_dim_mapping = dist_attr.get_input_dims_mapping(
            Weight_var.name)[0]
        assert embedding_row_dim_mapping >= 0, "row_parallel_embedding's row should be divided by a specific mesh axis, but got [{}]".format(
            embedding_row_dim_mapping)
        process_mesh_shape = dist_attr.process_mesh.topology
        process_mesh_group = dist_attr.process_mesh.processes

        # A generalized method to caculate embedding offset using cartisian product
        relative_idx = _get_idx_in_axis(process_mesh_group, process_mesh_shape,
                                        embedding_row_dim_mapping, rank_id)
        per_part_size = Weight_var.shape[0]
        relative_idx = relative_idx * per_part_size

        check_variable_and_dtype(
            Out_grad, 'tensor',
            ['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity')

        intermediate_var_0 = main_block.create_var(
            name=unique_name.generate_with_ignorable_key(".".join(
                ["c_embedding", '@tmp_0@GRAD'])),
            dtype=Out_grad.dtype,
            shape=Out_grad.shape,
            type=core.VarDesc.VarType.LOD_TENSOR,
            persistable=False,
            stop_gradient=Out_grad.stop_gradient)

        # copy X_var's dist_attr to intermediate_var_0's dist_attr
        out_grad_dist_attr = dist_attr.get_input_dist_attr(Out_grad.name)
        assert out_grad_dist_attr is not None
        ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
                                             out_grad_dist_attr)

        group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
                                      embedding_row_dim_mapping, rank_id)
        group = new_process_group(group_ranks)

        c_identity_op = main_block.append_op(
            type='c_identity',
            inputs={'X': [Out_grad]},
            outputs={'Out': intermediate_var_0},
            attrs={
                'ring_id': group.id,
                'use_calc_stream': True,
                'use_model_parallel': True,
                OP_ROLE_KEY: OpRole.Backward,
            })
        check_variable_and_dtype(intermediate_var_0, 'x',
                                 ['float16', 'float32', 'float64'], 'linear')
        check_dtype(intermediate_var_0.dtype, 'dtype',
                    ['float16', 'float32', 'float64'], 'linear')

        set_comm_op_dist_attr_for_program(c_identity_op, dist_attr.process_mesh,
                                          out_grad_dist_attr, ctx)

        main_block._sync_with_cpp()
        c_embedding_grad_op_desc = main_block.desc.append_op()
        c_embedding_grad_op_desc.set_type("c_embedding_grad")
        c_embedding_grad_op_desc.set_input('Ids', [Ids_var.name])
        c_embedding_grad_op_desc.set_input('W', [Weight_var.name])
        c_embedding_grad_op_desc.set_input('Out@GRAD',
                                           [intermediate_var_0.name])
        c_embedding_grad_op_desc.set_output('W@GRAD', [Weight_grad.name])
        c_embedding_grad_op_desc._set_attr('start_index', relative_idx)
        c_embedding_grad_op_desc._set_attr(OP_ROLE_KEY, OpRole.Backward)
        main_block._sync_with_cpp()

        c_embedding_grad_op = main_block.ops[-1]
        assert c_embedding_grad_op.type == "c_embedding_grad"
        naive_copy_op_dist_attr_for_program(c_embedding_grad_op, backward_op,
                                            ctx)

        # check if need gradient allreduce
        need_gradient_allreduce = False

        process_mesh = dist_attr.process_mesh
        var_dim_mapping = dist_attr.get_input_dims_mapping(Ids_var.name)
        mesh_shape = process_mesh.topology
        batch_size_axis = var_dim_mapping[0]
        if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
            need_gradient_allreduce = True

            group_ranks = _get_comm_group(process_mesh.processes,
                                          process_mesh.topology,
                                          batch_size_axis, rank_id)
            dp_degree = len(group_ranks)
            dp_group = new_process_group(group_ranks)

        if need_gradient_allreduce:
            W_Grad_var = main_block.var(kwargs['W@GRAD'][0])
            allreduce_op = main_block.append_op(
                type='c_allreduce_sum',
                inputs={'X': [W_Grad_var]},
                outputs={'Out': [W_Grad_var]},
                attrs={
                    'ring_id': dp_group.id,
                    'use_calc_stream': True,
                    OP_ROLE_KEY: OpRole.Backward
                })
            scale_op = main_block.append_op(
                type='scale',
                inputs={'X': W_Grad_var},
                outputs={'Out': W_Grad_var},
                attrs={'scale': 1.0 / dp_degree,
                       OP_ROLE_KEY: OpRole.Backward})
            main_block._sync_with_cpp()

            dims_mapping = ctx.get_tensor_dist_attr_for_program(
                W_Grad_var).dims_mapping
            process_mesh = dist_attr.process_mesh
            for op in [allreduce_op, scale_op]:
                op_attr = OperatorDistributedAttribute()
                op_attr.process_mesh = process_mesh
                op_attr.set_output_dims_mapping(W_Grad_var.name, dims_mapping)
                op_attr.set_input_dims_mapping(W_Grad_var.name, dims_mapping)
                ctx.set_op_dist_attr_for_program(op, op_attr)
Beispiel #4
0
def mean_normalize(f_min, f_max, f_mean, sample_num):
    '''
    Mean normalization is a method used to normalize the range of independent
    variables or features of data.
    Refer to:
    https://en.wikipedia.org/wiki/Feature_scaling#Mean_normalization

    Args:
        f_min (Variable): A 2-D tensor with shape [P, N], where P is the party
                          num and N is the feature num. Each row contains the
                          local min feature val of N features.
        f_max (Variable): A 2-D tensor with shape [P, N], where P is the party
                          num and N is the feature num. Each row contains the
                          local max feature val of N features.
        f_mean (Variable): A 2-D tensor with shape [P, N], where P is the party
                           num and N is the feature num. Each row contains the
                           local min feature val of N features.
        sample_num (Variable): A 1-D tensor with shape [P], where P is the
                               party num. Each element contains sample num
                               of party_i.

    Returns:
        f_range (Variable): A 1-D tensor with shape [N], where N is the
                            feature num. Each element contains global
                            range of feature_i.
        f_mean_out (Variable): A 1-D tensor with shape [N], where N is the
                               feature num. Each element contains global
                               range of feature_i.
    Examples:
        .. code-block:: python
            import paddle_fl.mpc as pfl_mpc

            pfl_mpc.init("aby3", role, "localhost", redis_server, redis_port)

            # 2 for share, 4 for 4 party, 100 for feat_num
            input_size = [2, 4, 100]

            mi = pfl_mpc.data(name='mi', shape=input_size, dtype='int64')
            ma = pfl_mpc.data(name='ma', shape=input_size, dtype='int64')
            me = pfl_mpc.data(name='me', shape=input_size, dtype='int64')
            sn = pfl_mpc.data(name='sn', shape=input_size[:-1], dtype='int64')

            out0, out1 = pfl_mpc.layers.mean_normalize(f_min=mi, f_max=ma,
                    f_mean=me, sample_num=sn)

            exe = fluid.Executor(place=fluid.CPUPlace())

            # feed encrypted data
            f_range, f_mean = exe.run(feed={'mi': f_min, 'ma': f_max,
            'me': f_mean, 'sn': sample_num}, fetch_list=[out0, out1])
    '''
    helper = MpcLayerHelper("mean_normalize", **locals())

    # dtype = helper.input_dtype()
    dtype = 'int64'

    check_dtype(dtype, 'f_min', ['int64'], 'mean_normalize')
    check_dtype(dtype, 'f_max', ['int64'], 'mean_normalize')
    check_dtype(dtype, 'f_mean', ['int64'], 'mean_normalize')
    check_dtype(dtype, 'sample_num', ['int64'], 'mean_normalize')

    f_range = helper.create_mpc_variable_for_type_inference(dtype=f_min.dtype)
    f_mean_out = helper.create_mpc_variable_for_type_inference(
        dtype=f_min.dtype)

    # to avoid circular dependencies
    from .math import reduce_sum

    total_num = reduce_sum(sample_num)

    op_type = 'mean_normalize'

    helper.append_op(
        type='mpc_' + op_type,
        inputs={
            "Min": f_min,
            "Max": f_max,
            "Mean": f_mean,
            "SampleNum": sample_num,
            "TotalNum": total_num,
        },
        outputs={
            "Range": f_range,
            "MeanOut": f_mean_out,
        },
    )

    return f_range, f_mean_out
Beispiel #5
0
def fc(input,
       size,
       num_flatten_dims=1,
       param_attr=None,
       bias_attr=None,
       act=None,
       name=None):
    """
    **Fully Connected Layer**
    This operator creates a fully connected layer in the network. It can take
    a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
    Args in detail). It creates a variable called weight for each input Tensor,
    which represents a fully connected weight matrix from each input unit to
    each output unit. The fully connected layer multiplies each input Tensor
    with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
    where M is batch size. If a list of Tensor is given, the results of
    multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
    is not None, a bias variable will be created and added to the output.
    Finally, if :attr:`act` is not None, it will be applied to the output as well.
    When the input is a single Tensor(or LoDTensor):
    .. math::
        Out = Act({XW + b})
    When the input is a list of Tensor(or LoDTensor):
    .. math::
        Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
    In the above equation:
    * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
    * :math:`X_i`: The i-th input tensor.
    * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
    * :math:`b`: The bias parameter created by this layer (if needed).
    * :math:`Act`: The activation function.
    * :math:`Out`: The output Tensor.
    .. code-block:: text
        Case 1:
        Given a single Tensor data_1, and num_flatten_dims = 2:
            data_1.data = [[[0.1, 0.2],
                            [0.3, 0.4]]]
            data_1.shape = (1, 2, 2) # 1 is batch_size
            out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)
        Then output is:
            out.data = [[0.83234344], [0.34936576]]
            out.shape = (1, 2, 1)
        Case 2:
        Given a list of Tensor:
            data_1.data = [[[0.1, 0.2],
                           [0.3, 0.4]]]
            data_1.shape = (1, 2, 2) # 1 is batch_size
            data_2 = [[[0.1, 0.2, 0.3]]]
            data_2.shape = (1, 1, 3)
            out = fluid.layers.fc(input=[data_1, data_2], size=2)
        Then:
            out.data = [[0.18669507, 0.1893476]]
            out.shape = (1, 2)
    Args:
        input (MpcVariable|list of MpcVariable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
            a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
            type should be float32 or float64.
        size(int): The number of output units in this layer, which also means the feature size of output
            Tensor(or LoDTensor).
        num_flatten_dims (int): The fc layer can accept an input Tensor with more than
            two dimensions. If this happens, the multidimensional tensor will first be flattened
            into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
            Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
            dimensions will be flatten to form the first dimension of the final matrix (height of
            the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
            form the second dimension of the final matrix (width of the matrix). For example, assuming that
            X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
            Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
        param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
            default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
        act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
            sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
        name (str, optional): The default value is None.  Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name` .
    Returns:
        MpcVariable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
    Raises:
        ValueError: If dimensions of the input Tensor is less than 2.
    Examples: todo
    """

    helper = MpcLayerHelper("fc", **locals())
    check_type(input, 'input', (list, tuple, MpcVariable), 'fc')
    if isinstance(input, (list, tuple)):
        for i, input_x in enumerate(input):
            check_type(input_x, 'input[' + str(i) + ']', MpcVariable, 'fc')
    dtype = helper.input_dtype()
    check_dtype(dtype, 'input', ['int64'], 'fc')
    mul_results = []
    for input_var, param_attr in helper.iter_inputs_and_params():
        input_shape = input_var.shape
        if num_flatten_dims == -1:
            num_flatten_dims = len(input_shape) - 1
            param_num_flatten_dims = num_flatten_dims
        else:
            param_num_flatten_dims = num_flatten_dims + 1  # The first dimension '2' of input is share number.
        param_shape = [
            reduce(lambda a, b: a * b, input_shape[param_num_flatten_dims:], 1)
        ] + [size]
        w = helper.create_mpc_parameter(attr=param_attr,
                                        shape=param_shape,
                                        dtype=dtype,
                                        is_bias=False)
        tmp = helper.create_mpc_variable_for_type_inference(dtype)
        helper.append_op(type="mpc_mul",
                         inputs={
                             "X": input_var,
                             "Y": w
                         },
                         outputs={"Out": tmp},
                         attrs={
                             "x_num_col_dims": num_flatten_dims,
                             "y_num_col_dims": 1
                         })
        mul_results.append(tmp)

    if len(mul_results) == 1:
        pre_bias = mul_results[0]
    else:
        pre_bias = helper.create_mpc_variable_for_type_inference(dtype)
        helper.append_op(type="mpc_sum",
                         inputs={"X": mul_results},
                         outputs={"Out": pre_bias},
                         attrs={"use_mkldnn": False})
    # add bias
    pre_activation = helper.append_mpc_bias_op(pre_bias,
                                               dim_start=num_flatten_dims)
    # add activation
    return helper.append_mpc_activation(pre_activation)
Beispiel #6
0
def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'):
    """
    **Tdm Child**
     According to the input node_id on the given tree, return the corresponding child node_id and 
      whether child is a leaf node by leaf_mask value.
    .. code-block:: text

        Given:
            tree[[0], [1, 2], [3, 4], [5, 6]] # A binary tree with seven nodes
            x = [[2], [3]]
            node_nums = 7
            child_nums = 2

          we get:
            child = [[5, 6],
                     [0, 0]]
            leaf_mask = [[1, 1],
                         [0, 0]]
    Args:
        x(Variable): Variable contained the node_id information, dtype support int32/int64.
        node_nums(int): Number of total nodes.
        child_nums(int): Maximum number of child nodes per node.
        param_attr(ParamAttr): To specify the tdm-tree-info parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in: ref: `api_fluid_ParamAttr`, should
            has shape(node_nums, 3 + child_nums), dtype support int32/int64. 
            The dimension[1] of tdm-tree-info contains the following: 
            1. Item_id(int, shape(1)), if node is a leaf node, give its item_id corresponding to node_id, else give 0.
            2. Layer_id(int, shape(1)), indicates which layer the node is on.
            3. Parent_id(int, shape(1)), node's parent node.
            4. Child_id(int, shape(child_nums)), all child node's node_id of this node should be given. 
            If the number of child nodes is insufficient, padding 0 until child nums equal to child_nums
        dtype(str): The data type of output child and leaf_mask, support int32/int64.

    Returns:
        tuple: A tuple including input node's child(Variable) and leaf_mask(Variable). 
            If child is a leaf node, leaf_mask equal ot 1, otherwise equal to 0.

    Examples:
        .. code-block:: python
        import paddle.fluid as fluid
        import numpy as np
        x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
        tree_info = [[0,0,0,1,2],
                     [0,1,0,3,4],[0,1,0,5,6],
                     [0,2,1,0,0],[1,2,1,0,0],[2,2,2,0,0],[3,2,2,0,0]]
        tree_info_np = np.array(tree_info)
        tree_info_np = np.reshape(tree_info_np, (7,5))
        node_nums = 7
        child_nums = 2
        child, leaf_mask  = fluid.contrib.layers.tdm_child(x, node_nums, child_nums,
                                param_attr=fluid.ParamAttr(
                                    initializer=fluid.initializer.NumpyArrayInitializer(
                                                                            tree_info_np)))
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        xx = np.array([[2],[3]]).reshape((2,1)).astype("int32")
        child_res, leaf_mask_res = exe.run(feed={"x":xx}, fetch_list=[child, leaf_mask])
     """
    helper = LayerHelper("tdm_child", **locals())
    check_dtype(dtype, 'dtype', ['int32', 'int64'],
                'fluid.contrib.layers.tdm_child')
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    tree_info = helper.create_parameter(attr=helper.param_attr,
                                        shape=[node_nums, 3 + child_nums],
                                        dtype=dtype,
                                        default_initializer=Constant(0))
    tree_info.stop_gradient = True

    child = helper.create_variable_for_type_inference(dtype=dtype)
    leaf_mask = helper.create_variable_for_type_inference(dtype=dtype)

    helper.append_op(type='tdm_child',
                     inputs={
                         'X': x,
                         'TreeInfo': tree_info
                     },
                     outputs={
                         'Child': child,
                         'LeafMask': leaf_mask
                     },
                     attrs={
                         'child_nums': child_nums,
                         'dtype': c_dtype
                     },
                     stop_gradient=True)
    return (child, leaf_mask)
Beispiel #7
0
def embedding(input,
              size,
              is_sparse=False,
              is_distributed=False,
              padding_idx=None,
              param_attr=None,
              dtype='int64'):
    """
    The operator is used to lookup embeddings vector of ids provided by :attr:`input` . 
    It automatically constructs a 2D embedding matrix based on the
    input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .
    The `input` is the mpc one-hot tensor of indexes, it last dimensions is equal to `emb_size`,
    its shape size must be 3, i.e., (2, x, emb_size)

    The shape of output Tensor is generated by replacing an emb_size dimension to the
    last dimension of the input Tensor shape.

    **Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` , 
    otherwise the program will throw an exception and exit.
    ** Params of `is_sparse`, `is_distributed`, `padding_idx` have not been implemented.

    .. code-block:: text

        Case 1:

        input is a Tensor.
            input.data = aby3.make_share([[1, 0, 0], [0, 1, 0]])
            input.shape = [2, 2, 3]
            w.data = aby3.make_share([[1, 2], [2, 3], [3, 4]])
        Given size = [2, 3, 2]
        output is a Tensor:
            out.shape = [2, 2, 2]
            out.data.reconstruct = [[1, 2], [2, 3]]

    Args:
        input(MpcVariable): A Tensor or LoDTensor with type int64, which contains the id information.
            The value of the input id should satisfy :math:`0<= id < size[0]` .
        size(tuple|list): The shape of lookup table parameter. It should have two elements which
            indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
        is_sparse(bool, not implemented): The flag indicating whether to use sparse update. This parameter only
            affects the performance of the backwards gradient update. It is recommended to set 
            True because sparse update is faster. But some optimizer does not support sparse update,
            such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` , 
            :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
            :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
            In these case, is_sparse must be False. Default: False.
        is_distributed(bool, not implemented): Whether to store the embedding matrix in a distributed manner. Only used
            in multi-machine distributed CPU training. Default: False.
        padding_idx(int|long|None, not implemented): padding_idx needs to be in the interval [-vocab_size, vocab_size). 
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter. 
            The local word vector needs to be transformed into numpy format, and the shape of local word
            vector shoud be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
            is used to load custom or pre-trained word vectors.
        dtype(str|core.VarDesc.VarType.INT64): It refers to the data type of output Tensor.
            It must be int64.

    Returns:
        Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import paddle_fl.mpc as pfl
          import numpy as np
          # data should be mpc one hot tensor
          data = pfl.data(name='x', shape=[4, 3], dtype='int64')

          # exampel 1
          emb_1 = fluid.embedding(input=data, size=[3, 4])

          # example 2: load custom or pre-trained word vectors
          weight_data = np.random.random(size=(2, 3, 4))  # mpc word vectors with numpy format
          w_param_attrs = fluid.ParamAttr(
              name="emb_weight",
              learning_rate=0.5,
              initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
              trainable=True)
          emb_2 = fluid.embedding(input=data, size=(3, 4), param_attr=w_param_attrs, dtype='int64')
    """

    if is_sparse:
        warnings.warn(
            "the process on sparse data is the same with dense data,"
            " this is, 'is_sparse' always set as 'False' in paddle_encrypted.")
    if is_distributed:
        warnings.warn(
            "distributed deployment of paddle_encrypted has not been implemented."
            " this is, 'is_distributed' always set as 'False' in paddle_encrypted."
        )
    if padding_idx:
        warnings.warn(
            "padding_idx is not supported in paddle_encrypted."
            " this is, 'padding_idx' always set as 'None' in paddle_encrypted."
        )
    helper = MpcLayerHelper('embedding', **locals())
    check_variable_and_dtype(input, 'input', ['int64'],
                             'paddle_encrypted.embedding')
    check_dtype(dtype, 'dtype', ['int64'], 'paddle_encrypted.embedding')

    w = helper.create_mpc_parameter(attr=helper.param_attr,
                                    shape=size,
                                    dtype='int64',
                                    is_bias=False)

    tmp = helper.create_mpc_variable_for_type_inference(dtype)
    helper.append_op(type='mpc_lookup_table_v2',
                     inputs={
                         'Ids': input,
                         'W': w
                     },
                     outputs={'Out': tmp},
                     attrs={
                         'is_sparse': False,
                         'is_distributed': False,
                         'remote_prefetch': False,
                         'padding_idx': None
                     })
    return tmp
Beispiel #8
0
def precision_recall(input, label, threshold=0.5):
    """
    Precision (also called positive predictive value) is the fraction of
    relevant instances among the retrieved instances.
    Recall (also known as sensitivity) is the fraction of
    relevant instances that have been retrieved over the
    total amount of relevant instances
    F1-score is a measure of a test's accuracy.
    It is calculated from the precision and recall of the test.
    Refer to:
    https://en.wikipedia.org/wiki/Precision_and_recall
    https://en.wikipedia.org/wiki/F1_score

    Noted that this class manages the metrics only for binary classification task.
    Noted that in both precision and recall, define 0/0 equals to 0.

    Args:
        input (Variable): ciphtext predicts for 1 in binary classification.
        label (Variable): labels in ciphertext.
        threshold (float): predict threshold.
    Returns:
        batch_out (Variable): plaintext of batch metrics [precision, recall, f1-score]
            Note that values in batch_out are fixed-point number.
            To get float type values, div fetched batch_out by
            3 * mpc_data_utils.mpc_one_share (which equals to 2**16).
        acc_out (Variable): plaintext of accumulated metrics [precision, recall, f1-score]
            To get float type values, div fetched acc_out by
            3 * mpc_data_utils.mpc_one_share (which equals to 2**16).

    Examples:
        .. code-block:: python
            import sys
            import numpy as np
            import paddle.fluid as fluid
            import paddle_fl.mpc as pfl_mpc
            import mpc_data_utils as mdu

            role = int(sys.argv[1])

            redis_server = "127.0.0.1"
            redis_port = 9937
            loop = 5
            np.random.seed(0)

            input_size = [100]

            threshold = 0.6

            preds, labels = [], []
            preds_cipher, labels_cipher = [], []
            #simulating mpc share

            share = lambda x: np.array([x * mdu.mpc_one_share] * 2).astype('int64').reshape([2] + input_size)
            for _ in range(loop):

                preds.append(np.random.random(input_size))
                labels.append(np.rint(np.random.random(input_size)))
                preds_cipher.append(share(preds[-1]))
                labels_cipher.append(share(labels[-1]))

            pfl_mpc.init("aby3", role, "localhost", redis_server, redis_port)
            x = pfl_mpc.data(name='x', shape=input_size, dtype='int64')
            y = pfl_mpc.data(name='y', shape=input_size, dtype='int64')
            out0, out1 = pfl_mpc.layers.precision_recall(input=x, label=y, threshold=threshold)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            for i in range(loop):
                batch_res, acc_res = exe.run(feed={'x': preds_cipher[i], 'y': labels_cipher[i]},
                        fetch_list=[out0, out1])
                fixed_point_one = 3.0 * mdu.mpc_one_share
                # result could be varified by calcuatling metrics with plaintext preds, labels
                print(batch_res / fixed_point_one , acc_res / fixed_point_one)

    """
    helper = MpcLayerHelper("precision_recall", **locals())

    dtype = helper.input_dtype()

    check_dtype(dtype, 'input', ['int64'], 'precision_recall')
    check_dtype(dtype, 'label', ['int64'], 'precision_recall')

    batch_out = helper.create_mpc_variable_for_type_inference(dtype=input.dtype)
    acc_out = helper.create_mpc_variable_for_type_inference(dtype=input.dtype)

    stat = helper.create_global_mpc_variable(
            persistable=True,
            dtype='int64', shape=[3],
            )

    helper.set_variable_initializer(stat, Constant(value=0))

    op_type = 'precision_recall'

    helper.append_op(
        type='mpc_' + op_type,
        inputs={
            "Predicts": input,
            "Labels": label,
            "StatesInfo": stat,
            },
        outputs={
            "BatchMetrics": batch_out,
            "AccumMetrics": acc_out,
            "AccumStatesInfo": stat,
             },
        attrs={
            "threshold": threshold,
            "class_number": 1,
        })

    return batch_out, acc_out
def fused_feedforward(x,
                      linear1_weight,
                      linear2_weight,
                      linear1_bias=None,
                      linear2_bias=None,
                      ln1_scale=None,
                      ln1_bias=None,
                      ln2_scale=None,
                      ln2_bias=None,
                      dropout1_rate=0.5,
                      dropout2_rate=0.5,
                      activation="relu",
                      ln1_epsilon=1e-5,
                      ln2_epsilon=1e-5,
                      pre_layer_norm=False,
                      training=True,
                      mode='upscale_in_train',
                      name=None):
    """
    This is a fusion operator to compute feed forward layer in transformer model architecture.
    This operator only supports running on GPU. The function of the operator is consistent with
    the following pseudo code:

    .. code-block:: python

        residual = src;
        if pre_layer_norm:
            src = layer_norm(src)
        src = linear(dropout(activation(dropout(linear(src)))))
        if not pre_layer_norm:
            src = layer_norm(out)

    Args:
        x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16, float32 or float64, the shape is`[batch\_size, sequence\_length, d_model]`.
        linear1_weight (Tensor): The weight of first linear, the data type is same as `x`, the shape is `[d\_model, dim\_feedforward]`.
        linear2_weight (Tensor): The weight of second linear, the data type is same as `x`, the shape is `[dim\_feedforward, d\_model]`.
        linear1_bias (Tensor, optional): The bias of first linear, the data type is same as `x`, the shape is `[dim_feedforward]`. Default None.
        linear2_bias (Tensor, optional): The bias of second linear, the data type is same as `x`, the shape is `[d_model]`. Default None.
        ln1_scale (Tensor, optional): the weight of first layer_norm, the data type is float32 or float64, the shape is same as `x`. Default None.
        ln1_bias (Tensor, optional): The bias of first layer_norm, the data type is float32 or float64, the shape is `[d\_model]`. Default None.
        ln2_scale (Tensor, optional): The weight of second layer_norm, the data type is float32 or float64, the shape is same as `x`. Default None.
        ln2_bias (Tensor, optional): The bias of second layer_norm, the data type is float32 or float64, the shape is `[d\_model]`. Default None.
        dropout1_rate (float, optional): The first dropout probability of setting units to zero. Default 0.5.
        dropout2_rate (float, optional): The second dropout probability of setting units to zero. Default 0.5.
        activation (str, optional): The activation. Default "relu".
        ln1_epsilon (float, optional): Small float of first layer_norm added to denominator to avoid dividing by zero. Default is 1e-5.
        ln2_epsilon (float, optional): Small float of second layer_norm added to denominator to avoid dividing by zero. Default is 1e-5.
        pre_layer_norm (bool, optional): add layer_norm in the pre-processing stage or post-processing state.
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default True.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: The output Tensor, the data type and shape is same as `x`.

    Examples:
        .. code-block:: python

            # required: gpu
            import paddle
            import numpy as np
            x_data = np.random.random((1, 8, 8)).astype("float32")
            linear1_weight_data = np.random.random((8, 8)).astype("float32")
            linear2_weight_data = np.random.random((8, 8)).astype("float32")
            x = paddle.to_tensor(x_data)
            linear1_weight = paddle.to_tensor(linear1_weight_data)
            linear2_weight = paddle.to_tensor(linear2_weight_data)
            out = paddle.incubate.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight)
            print(out.numpy().shape)
            # (1, 8, 8)
    """
    _verify_dropout_rate(dropout1_rate)
    _verify_dropout_rate(dropout2_rate)

    seed = None
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'")
    mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

    if in_dygraph_mode():
        if default_main_program().random_seed != 0:
            seed = default_main_program().random_seed
        out, _, _, _, _, _, _, _, _, _, _ = _C_ops.fused_feedforward(
            x, None, None, linear1_weight, linear1_bias, linear2_weight,
            linear2_bias, ln1_scale, ln1_bias, ln2_scale, ln2_bias,
            'pre_layer_norm', pre_layer_norm, 'ln1_epsilon', ln1_epsilon,
            'ln2_epsilon', ln2_epsilon, 'act_method', activation,
            'dropout1_rate', dropout1_rate, 'dropout2_rate', dropout2_rate,
            "dropout1_is_test", not training, "dropout2_is_test", not training,
            "dropout1_fix_seed", seed is not None, "dropout2_fix_seed",
            seed is not None, "dropout1_seed", seed
            if seed is not None else 0, "dropout2_seed", seed
            if seed is not None else 0, 'dropout1_implementation', mode,
            'dropout2_implementation', mode)
        return out

    helper = LayerHelper("fused_feedforward")
    dtype = x.dtype
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'fused_feedforward')
    check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
                'fused_feedforward')

    out = helper.create_variable_for_type_inference(x.dtype)
    dropout1_mask = helper.create_variable_for_type_inference(
        'uint8', stop_gradient=True)
    dropout2_mask = helper.create_variable_for_type_inference(
        'uint8', stop_gradient=True)
    ln1_mean = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln1_variance = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln2_mean = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln2_variance = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    linear1_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln1_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    dropout1_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    dropout2_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)

    if (seed is None or seed == 0) and helper.main_program.random_seed != 0:
        seed = helper.main_program.random_seed

    helper.append_op(
        type='fused_feedforward',
        inputs={
            'X': x,
            'Linear1Weight': linear1_weight,
            'Linear1Bias': linear1_bias,
            'Linear2Weight': linear2_weight,
            'Linear2Bias': linear2_bias,
            'Ln1Scale': ln1_scale,
            'Ln1Bias': ln1_bias,
            'Ln2Scale': ln2_scale,
            'Ln2Bias': ln2_bias,
        },
        outputs={
            'Out': out,
            'Dropout1Mask': dropout1_mask,
            'Dropout2Mask': dropout2_mask,
            'Ln1Mean': ln1_mean,
            'Ln1Variance': ln1_variance,
            'Ln2Mean': ln2_mean,
            'Ln2Variance': ln2_variance,
            'Linear1Out': linear1_out,
            'Ln1Out': ln1_out,
            'Dropout1Out': dropout1_out,
            'Dropout2Out': dropout2_out,
        },
        attrs={
            'dropout1_rate': dropout1_rate,
            'dropout2_rate': dropout2_rate,
            'act_method': activation,
            'pre_layer_norm': pre_layer_norm,
            'ln1_epsilon': ln1_epsilon,
            'ln2_epsilon': ln2_epsilon,
            'dropout1_is_test': not training,
            'dropout2_is_test': not training,
            'dropout1_fix_seed': seed is not None,
            'dropout2_fix_seed': seed is not None,
            'dropout1_seed': seed if seed is not None else 0,
            'dropout2_seed': seed if seed is not None else 0,
            'dropout1_implementation': mode,
            'dropout2_implementation': mode
        })
    return out
def fused_multi_head_attention(x,
                               qkv_weight,
                               linear_weight,
                               pre_layer_norm=False,
                               pre_ln_scale=None,
                               pre_ln_bias=None,
                               ln_scale=None,
                               ln_bias=None,
                               pre_ln_epsilon=1e-05,
                               qkv_bias=None,
                               linear_bias=None,
                               attn_mask=None,
                               dropout_rate=0.5,
                               attn_dropout_rate=0.5,
                               ln_epsilon=1e-05,
                               training=True,
                               mode='upscale_in_train',
                               name=None):
    """
    Attention mapps queries and a set of key-value pairs to outputs, and
    Multi-Head Attention performs multiple parallel attention to jointly attending
    to information from different representation subspaces. This API only
    support self_attention. The pseudo code is as follows:

    .. code-block:: python

    	if pre_layer_norm:
    	    out = layer_norm(x)
            out = linear(out) + qkv) + bias
    	else:
	    out = linear(x) + bias
    	out = transpose(out, perm=[2, 0, 3, 1, 4])
    	# extract q, k and v from out.
    	q = out[0:1,::]
    	k = out[1:2,::]
    	v = out[2:3,::]
    	out = q * k^t
    	out = attn_mask + out
    	out = softmax(out)
    	out = dropout(out)
    	out = out * v
    	out = transpose(out, perm=[0, 2, 1, 3])
    	out = out_linear(out)
    	if pre_layer_norm:
    	    out = x + dropout(linear_bias + out)
	else:
    	    out = layer_norm(x + dropout(linear_bias + out))

    Parameters:
        x (Tensor): The input tensor of fused_multi_head_attention. The shape is
            `[batch\_size, sequence\_len, embed\_dim]`.
        qkv_weight (Tensor): The qkv weight tensor. The shape is `[3, num_head, dim_head, dim_embed]`.
        linear_weight (Tensor): The linear weight tensor. The shape is `[embed_dim, embed_dim]`.
        pre_layer_norm (bool, optional): whether it is pre_layer_norm (True) or post_layer_norm architecture
	    (False). Default False.
        pre_ln_scale (Tensor, optional): The weight tensor of pre layernorm. Default None.
        pre_ln_bias (Tensor, optional): The bias tensor of pre layernorm. Default None.
        ln_scale (Tensor, optional): The weight tensor of layernorm. Default None.
        ln_bias (Tensor, optional): The bias tensor of layernorm. Default None.
        pre_ln_epsilon (float, optional): Small float value added to denominator of the pre layer_norm
            to avoid dividing by zero. Default is 1e-5.
        qkv_bias (Tensor, optional): The bias of qkv computation. The shape is `[3, num_head, dim_head]`.
            Default None.
        linear_bias (Tensor, optional): The bias of linear. The shape is `[embed_dim]`. Default None.
        attn_mask (Tensor, optional):  A tensor used in multi-head attention to prevents attention to
 	    some unwanted positions, usually the paddings or the subsequent positions. It is a tensor
            with shape broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`. When the
            data type is bool, the unwanted positions have `False` values and the others have `True` values.
            When the data type is int, the unwanted positions have 0 values and the others have 1 values.
            When the data type is float, the unwanted positions have `-INF` values and the others have 0 values.
            It can be None when nothing wanted or needed to be prevented attention to. Default None.
        dropout_rate (float, optional): The dropout probability used on attention
            weights to drop some attention targets for the dropout after attention.
            0 for no dropout. Default 0.5.
        attn_dropout_rate (float, optional): The dropout probability used on attention
            weights to drop some attention targets for the dropout in attention.
            0 for no dropout. Default 0.5.
        ln_epsilon (float, optional): Small float value added to denominator of layer_norm
            to avoid dividing by zero. Default is 1e-5.
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default True.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: The output Tensor, the data type and shape is same as `x`.

    Examples:

        .. code-block:: python

            # required: gpu
            import paddle
            import paddle.incubate.nn.functional as F

            # input: [batch_size, seq_len, embed_dim]
            x = paddle.rand(shape=(2, 4, 128), dtype="float32")
            # qkv_weight: [3, num_head, head_dim, embed_dim]
            qkv_weight = paddle.rand(shape=(3, 4, 32, 128), dtype="float32")
            # qkv_bias: [3, num_head, head_dim]
            qkv_bias = paddle.rand(shape=(3, 4, 32), dtype="float32")
            # linear_weight: [embed_dim, embed_dim]
            linear_weight = paddle.rand(shape=(128, 128), dtype="float32")
            # linear_bias: [embed_dim]
            linear_bias = paddle.rand(shape=[128], dtype="float32")
            # self attention mask: [batch_size, num_heads, seq_len, seq_len]
            attn_mask = paddle.rand(shape=(2, 4, 4, 4), dtype="float32")

            # output: [batch_size, seq_len, embed_dim]
            output = F.fused_multi_head_attention(
                x, qkv_weight, linear_weight, False,
                None, None, None, None, 1e-5, qkv_bias,
                linear_bias, attn_mask)
            # [2, 4, 128]
            print(output.shape)
    """

    seed = None
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'")
    mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

    if in_dygraph_mode():
        if default_main_program().random_seed != 0:
            seed = default_main_program().random_seed
        # pre_ln_mean, pre_ln_variance, pre_ln_out, qkv_out, qkv_bias_out, transpose_out, qk_out,
        # qktv_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, attn_mask_out, fmha_out,
        # linear_out, dropout_mask_out, ln_mean_out, ln_var_out, bias_dropout_residual_out, final_out
        assert len(qkv_weight.shape
                   ) == 4, "The dims of the shape of qkv_weight should be 4."
        assert qkv_weight.shape[
            0] == 3, "The shape of qkv_weight should be [3, num_head, head_dim, embed_dim]."
        assert qkv_weight.shape[3] == x.shape[
            2], "The 3rd dim of qkv_weight and 2nd dim of x should be the same, i.e., embed_dim."
        assert qkv_weight.shape[1] * qkv_weight.shape[2] == qkv_weight.shape[
            3], "embed_dim must be divisible by num_heads."

        _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, final_out = _C_ops.fused_attention(
            x, pre_ln_scale, pre_ln_bias, qkv_weight, qkv_bias, attn_mask,
            linear_weight, linear_bias, ln_scale, ln_bias, 'pre_layer_norm',
            pre_layer_norm, 'epsilon', pre_ln_epsilon, 'dropout_rate',
            dropout_rate, 'attn_dropout_rate', attn_dropout_rate, 'ln_epsilon',
            ln_epsilon, 'attn_dropout_is_test', not training, 'dropout_is_test',
            not training, 'attn_dropout_fix_seed', seed is not None,
            'dropout_fix_seed', seed is not None, 'attn_dropout_seed', seed
            if seed is not None else 0, 'dropout_seed', seed
            if seed is not None else 0, 'attn_dropout_implementation', mode,
            'dropout_implementation', mode)
        return final_out
    else:
        helper = LayerHelper('fused_multi_head_attention', **locals())
        dtype = x.dtype
        # check dtypes
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'fused_multihead_attention')
        check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
                    'fused_multi_head_attention')

        # set inputs
        inputs = dict()
        inputs['X'] = [x]
        if pre_ln_scale:
            inputs['LnScale'] = [pre_ln_scale]
        if pre_ln_bias:
            inputs['LnBias'] = [pre_ln_bias]
        inputs['QKVW'] = [qkv_weight]
        if qkv_bias is not None:
            inputs['QKVBias'] = [qkv_bias]
        inputs['SrcMask'] = attn_mask
        inputs['OutLinearW'] = [linear_weight]
        if linear_bias is not None:
            inputs['OutLinearBias'] = [linear_bias]
        if ln_scale:
            inputs['Ln2Scale'] = [ln_scale]
        if ln_bias:
            inputs['Ln2Bias'] = [ln_bias]

        if (seed is None or seed == 0) and helper.main_program.random_seed != 0:
            seed = helper.main_program.random_seed

        # set attrs
        attrs = {
            'pre_layer_norm': pre_layer_norm,
            'epsilon': pre_ln_epsilon,
            'ln_epsilon': ln_epsilon,
            'dropout_rate': dropout_rate,
            'attn_dropout_rate': attn_dropout_rate,
            'attn_dropout_is_test': not training,
            'dropout_is_test': not training,
            'attn_dropout_fix_seed': seed is not None,
            'dropout_fix_seed': seed is not None,
            'attn_dropout_seed': seed if seed is not None else 0,
            'dropout_seed': seed if seed is not None else 0,
            'attn_dropout_implementation': mode,
            'dropout_implementation': mode,
        }

        # set outputs
        pre_ln_mean_out = helper.create_variable_for_type_inference(
            dtype=dtype, stop_gradient=True)
        pre_ln_variance_out = helper.create_variable_for_type_inference(
            dtype=dtype, stop_gradient=True)
        pre_ln_out = helper.create_variable_for_type_inference(dtype=dtype)

        qkv_out = helper.create_variable_for_type_inference(dtype=dtype)
        qkv_bias_out = helper.create_variable_for_type_inference(dtype=dtype)

        transpose_out = helper.create_variable_for_type_inference(dtype=dtype)
        qk_out = helper.create_variable_for_type_inference(dtype=dtype)
        qktv_out = helper.create_variable_for_type_inference(dtype=dtype)
        softmax_out = helper.create_variable_for_type_inference(dtype=dtype)
        attn_dropout_mask_out = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
        attn_dropout_out = helper.create_variable_for_type_inference(
            dtype=dtype)
        attn_mask_out = helper.create_variable_for_type_inference(dtype=dtype)
        fmha_out = helper.create_variable_for_type_inference(dtype=dtype)
        out_linear_out = helper.create_variable_for_type_inference(dtype=dtype)
        dropout_mask_out = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
        ln_mean_out = helper.create_variable_for_type_inference(
            dtype=dtype, stop_gradient=True)
        ln_variance_out = helper.create_variable_for_type_inference(
            dtype=dtype, stop_gradient=True)
        bias_dropout_residual_out = helper.create_variable_for_type_inference(
            dtype=dtype)
        final_out = helper.create_variable_for_type_inference(dtype=dtype)

        helper.append_op(
            type='fused_attention',
            inputs=inputs,
            outputs={
                "LnMean": pre_ln_mean_out,
                "LnVariance": pre_ln_variance_out,
                "LnOut": pre_ln_out,
                "QKVOut": qkv_out,
                "QKVBiasOut": qkv_bias_out,
                "TransposeOut2": transpose_out,
                "QKOut": qk_out,
                "QKTVOut": qktv_out,
                "SoftmaxOut": softmax_out,
                "AttnDropoutMaskOut": attn_dropout_mask_out,
                "AttnDropoutOut": attn_dropout_out,
                "SrcMaskOut": attn_mask_out,
                "FMHAOut": fmha_out,
                "OutLinearOut": out_linear_out,
                "DropoutMaskOut": dropout_mask_out,
                "Ln2Mean": ln_mean_out,
                "Ln2Variance": ln_variance_out,
                "BiasDropoutResidualOut": bias_dropout_residual_out,
                'Y': final_out
            },
            attrs=attrs)
        return final_out
Beispiel #11
0
def fused_feedforward(x,
                      linear1_weight,
                      linear2_weight,
                      linear1_bias=None,
                      linear2_bias=None,
                      ln1_scale=None,
                      ln1_bias=None,
                      ln2_scale=None,
                      ln2_bias=None,
                      dropout1_rate=0.5,
                      dropout2_rate=0.5,
                      activation="relu",
                      ln1_epsilon=1e-5,
                      ln2_epsilon=1e-5,
                      pre_layer_norm=False,
                      training=True,
                      mode='upscale_in_train',
                      ring_id=-1,
                      name=None):
    seed = None
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'")
    mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

    helper = LayerHelper("fused_feedforward")
    dtype = x.dtype
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'fused_feedforward')
    check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
                'fused_feedforward')

    out = helper.create_variable_for_type_inference(x.dtype)
    dropout1_mask = helper.create_variable_for_type_inference(
        'uint8', stop_gradient=True)
    dropout2_mask = helper.create_variable_for_type_inference(
        'uint8', stop_gradient=True)
    ln1_mean = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln1_variance = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln2_mean = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln2_variance = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    linear1_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    ln1_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    dropout1_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)
    dropout2_out = helper.create_variable_for_type_inference(
        x.dtype, stop_gradient=True)

    if (seed is None or seed == 0) and helper.main_program.random_seed != 0:
        seed = helper.main_program.random_seed

    helper.append_op(
        type='fused_feedforward',
        inputs={
            'X': x,
            'Linear1Weight': linear1_weight,
            'Linear1Bias': linear1_bias,
            'Linear2Weight': linear2_weight,
            'Linear2Bias': linear2_bias,
            'Ln1Scale': ln1_scale,
            'Ln1Bias': ln1_bias,
            'Ln2Scale': ln2_scale,
            'Ln2Bias': ln2_bias,
        },
        outputs={
            'Out': out,
            'Dropout1Mask': dropout1_mask,
            'Dropout2Mask': dropout2_mask,
            'Ln1Mean': ln1_mean,
            'Ln1Variance': ln1_variance,
            'Ln2Mean': ln2_mean,
            'Ln2Variance': ln2_variance,
            'Linear1Out': linear1_out,
            'Ln1Out': ln1_out,
            'Dropout1Out': dropout1_out,
            'Dropout2Out': dropout2_out,
        },
        attrs={
            'dropout1_rate': dropout1_rate,
            'dropout2_rate': dropout2_rate,
            'act_method': activation,
            'pre_layer_norm': pre_layer_norm,
            'ln1_epsilon': ln1_epsilon,
            'ln2_epsilon': ln2_epsilon,
            'dropout1_is_test': not training,
            'dropout2_is_test': not training,
            'dropout1_fix_seed': seed is not None,
            'dropout2_fix_seed': seed is not None,
            'dropout1_seed': seed if seed is not None else 0,
            'dropout2_seed': seed if seed is not None else 0,
            'dropout1_implementation': mode,
            'dropout2_implementation': mode,
            'ring_id': ring_id,
        })
    return out
Beispiel #12
0
    def forward(ctx, *args, **kwargs):

        dist_op_context = ctx.dist_op_context
        main_block = dist_op_context.work_block
        src_op = dist_op_context.cur_src_op
        rank_id = dist_op_context.rank_id
        op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
        assert op_dist_attr is not None

        # check validation of inputs / outputs
        for input_name in src_op.desc.input_names():
            assert input_name in kwargs, "input [{}] is not given".format(
                input_name)
            assert len(kwargs[input_name]) == len(
                src_op.desc.input(input_name)
            ), "number of tensor for input [{}] is not match".format(
                input_name)
        for output_name in src_op.desc.output_names():
            assert output_name in kwargs, "input [{}] is not given".format(
                output_name)
            assert len(kwargs[output_name]) == len(
                src_op.desc.output(output_name)
            ), "number of tensor for input [{}] is not match".format(
                output_name)

        if rank_id not in op_dist_attr.process_mesh.processes:
            rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
                                              rank_id)

        X_var = main_block.var(kwargs['X'][0])
        in_dims_mapping = op_dist_attr.get_input_dims_mapping(X_var.name)
        for axis in range(len(in_dims_mapping)):
            if in_dims_mapping[axis] != -1:
                break
        process_mesh_shape = op_dist_attr.process_mesh.topology
        process_mesh_group = op_dist_attr.process_mesh.processes
        group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
                                      axis, rank_id)
        group = new_process_group(group_ranks)

        check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'],
                                 'norm')
        check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'],
                    'norm')

        # 1. insert barrier op
        ref_process_mesh = op_dist_attr.process_mesh
        constant_out_dims_mapping = [-1]
        fill_constant_out, fill_constant_op = _insert_fill_constant_op(
            main_block, src_op.attr('op_role'))
        # set fill_constant_out tensor dist_attr
        constant_out_dist_attr = TensorDistributedAttribute()
        constant_out_dist_attr.process_mesh = ref_process_mesh
        constant_out_dist_attr.dims_mapping = constant_out_dims_mapping
        ctx.set_tensor_dist_attr_for_program(fill_constant_out,
                                             constant_out_dist_attr)
        # set fill_constant op dist_attr
        constant_op_dist_attr = OperatorDistributedAttribute()
        constant_op_dist_attr.process_mesh = ref_process_mesh
        constant_op_dist_attr.set_output_dims_mapping(
            fill_constant_out.name, constant_out_dims_mapping)
        ctx.set_op_dist_attr_for_program(fill_constant_op,
                                         constant_op_dist_attr)
        barrier_op = main_block.append_op(type='barrier',
                                          inputs={'X': [fill_constant_out]},
                                          outputs={'Out': [fill_constant_out]},
                                          attrs={'ring_id': group.id})
        # set barrier op dist attr
        set_comm_op_dist_attr_for_program(barrier_op, ref_process_mesh,
                                          constant_out_dist_attr, ctx)

        # 2. insert c_allgather op
        # create c_allgather output var
        allgather_out = main_block.create_var(
            name=".".join(["c_allgather", X_var.name]),
            dtype=X_var.dtype,
            shape=X_var.shape,
            type=core.VarDesc.VarType.LOD_TENSOR,
            persistable=False,
            stop_gradient=X_var.stop_gradient)
        # set allgather_out tensor dist_attr
        allgather_out_dist_attr = TensorDistributedAttribute()
        allgather_out_dist_attr.process_mesh = op_dist_attr.process_mesh
        allgather_out_dist_attr.dims_mapping = [
            -1 for i in range(len(allgather_out.shape))
        ]
        ctx.set_tensor_dist_attr_for_program(allgather_out,
                                             allgather_out_dist_attr)
        c_allgather_op = main_block.append_op(type='c_allgather',
                                              inputs={'X': [X_var]},
                                              outputs={'Out': [allgather_out]},
                                              attrs={
                                                  'ring_id': group.id,
                                                  'use_calc_stream': True,
                                                  'nranks': group.nranks,
                                                  'op_role':
                                                  src_op.attr('op_role')
                                              })
        # set c_allgather op dist_attr
        allgather_op_dist_attr = OperatorDistributedAttribute()
        allgather_op_dist_attr.process_mesh = op_dist_attr.process_mesh
        allgather_op_dist_attr.set_input_dims_mapping(X_var.name,
                                                      in_dims_mapping)
        allgather_op_dist_attr.set_output_dims_mapping(
            allgather_out.name, allgather_out_dist_attr.dims_mapping)
        ctx.set_op_dist_attr_for_program(c_allgather_op,
                                         allgather_op_dist_attr)

        # 3. copy p_norm op desc and reset input name
        # rename input
        kwargs['X'] = [allgather_out.name]
        # replicate op in dist program
        dist_op_desc = main_block.desc.append_op()
        dist_op_desc.copy_from(src_op.desc)
        set_dist_op_desc_original_id(dist_op_desc, src_op.desc, ctx)
        for input_name in src_op.desc.input_names():
            dist_op_desc.set_input(input_name, kwargs[input_name])
        for output_name in src_op.desc.output_names():
            dist_op_desc.set_output(output_name, kwargs[output_name])
        pnorm_op = Operator(main_block, dist_op_desc)
        op_dist_attr.set_input_dims_mapping(
            allgather_out.name, allgather_out_dist_attr.dims_mapping)
        ctx.set_op_dist_attr_for_program(pnorm_op, op_dist_attr)

        main_block._sync_with_cpp()
Beispiel #13
0
def dynamic_gru(input,
                size,
                param_attr=None,
                bias_attr=None,
                is_reverse=False,
                gate_activation='sigmoid',
                candidate_activation='relu',
                h_0=None,
                origin_mode=False):
    """
    **Note: The input type of this must be LoDTensor. If the input type to be
    processed is Tensor, use** :ref:`api_fluid_layers_StaticRNN` .

    This operator is used to perform the calculations for a single layer of
    Gated Recurrent Unit (GRU) on full sequences step by step. The calculations
    in one time step support these two modes:

    If ``origin_mode`` is True, then the formula used is from paper
    `Learning Phrase Representations using RNN Encoder Decoder for Statistical
    Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_ .

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = u_t \odot h_{t-1} + (1-u_t) \odot \\tilde{h_t}


    if ``origin_mode`` is False, then the formula used is from paper
    `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
    Modeling  <https://arxiv.org/pdf/1412.3555.pdf>`_

    .. math::

        u_t & = act_g(W_{ux}x_{t} + W_{uh}h_{t-1} + b_u)

        r_t & = act_g(W_{rx}x_{t} + W_{rh}h_{t-1} + b_r)

        \\tilde{h_t} & = act_c(W_{cx}x_{t} + W_{ch}(r_t \odot h_{t-1}) + b_c)

        h_t & = (1-u_t) \odot h_{t-1} + u_t \odot \\tilde{h_t}

    :math:`x_t` is the input of current time step, but it is not from ``input`` .
    This operator does not include the calculations :math:`W_{ux}x_{t}, W_{rx}x_{t}, W_{cx}x_{t}` ,
    **Note** thus a fully-connect layer whose size is 3 times of ``size`` should
    be used before this operator, and the output should be used as ``input`` here.
    :math:`h_{t-1}` is the hidden state from previous time step. 
    :math:`u_t` , :math:`r_t` , :math:`\\tilde{h_t}` and :math:`h_t` stand for
    update gate, reset gate, candidate hidden and hidden output separately.
    :math:`W_{uh}, b_u` , :math:`W_{rh}, b_r` and :math:`W_{ch}, b_c` stand for
    the weight matrix and bias used in update gate, reset gate, candidate hidden
    calculations. For implementation, the three weight matrix are merged into a
    tensor shaped :math:`[D, D \\times 3]` , the three bias are concatenated as
    a tensor shaped :math:`[1, D \\times 3]` , where :math:`D` stands for the
    hidden size; The data layout of weight tensor is: :math:`W_{uh}` and :math:`W_{rh}`
    are concatenated with shape :math:`[D, D  \\times 2]` lying on the first part,
    and :math:`W_{ch}` lying on the latter part with shape :math:`[D, D]` .


    Args:
        input(Variable): A LoDTensor whose lod level is 1, representing the input
            after linear projection. Its shape should be :math:`[T, 2, D \\times 3]` ,
            which is transpose mpc input by axis {1, 0, 2}, and set mpc shares lod,
            where :math:`T` stands for the total sequence lengths in this mini-batch,
            :math:`D` for the hidden size. The data type should be int64.
        size(int): Indicate the hidden size.
        param_attr(ParamAttr, optional):  To specify the weight parameter property.
            Default: None, which means the default weight parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr, optional): To specify the bias parameter property.
            Default: None, which means the default bias parameter property is used.
            See usage for details in :ref:`api_fluid_ParamAttr` .
        is_reverse(bool, optional): Whether to compute in the reversed order of
            input sequences. Default False.
        gate_activation(str, optional): The activation function corresponding to
            :math:`act_g` in the formula. Only 'sigmoid' is supported now.
        candidate_activation(str, optional): The activation function corresponding to
            :math:`act_c` in the formula. Only "relu" is supported now.
        h_0 (Variable, optional): A Tensor representing the initial hidden state.
            It not provided, the default initial hidden state is 0. The shape is
            :math:`[2, N, D]` , where :math:`N` is the number of sequences in the
            mini-batch, :math:`D` for the hidden size. The data type should be
            same as ``input`` . Default None.

    Returns:
        Variable: A LoDTensor whose lod level is 1 and shape is :math:`[2, T, D]` , \
            where :math:`T` stands for the total sequence lengths in this mini-batch \
            :math:`D` for the hidden size. It represents GRU transformed sequence output, \
            and has the same lod and data type with ``input`` .

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid

            dict_dim, emb_dim = 128, 64
            data = fluid.data(name='sequence',
                      shape=[None],
                      dtype='int64',
                      lod_level=1)
            emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            hidden_dim = 512
            x = fluid.layers.fc(input=emb, size=hidden_dim * 3)
            hidden = fluid.layers.dynamic_gru(input=x, size=hidden_dim)
    """

    assert in_dygraph_mode(
    ) is not True, "please use gru instead of dynamic_gru in dygraph mode!"

    helper = MpcLayerHelper('mpc_gru', **locals())
    dtype = helper.input_dtype()

    check_variable_and_dtype(input, 'input', ['int64'], 'mpc_gru')
    check_dtype(dtype, 'dtype', ['int64'], 'mpc_gru')

    weight = helper.create_mpc_parameter(attr=helper.param_attr,
                                         shape=[size, 3 * size],
                                         dtype=dtype)
    bias = helper.create_mpc_parameter(attr=helper.bias_attr,
                                       shape=[1, 3 * size],
                                       dtype=dtype,
                                       is_bias=True)
    batch_size = input.shape[0]
    inputs = {'Input': input, 'Weight': weight, 'Bias': bias}
    if h_0:
        assert h_0.shape == (
            2, batch_size,
            size), 'The shape of h0 should be(batch_size, %d)' % size
        inputs['H0'] = h_0

    hidden = helper.create_mpc_variable_for_type_inference(dtype)
    batch_gate = helper.create_mpc_variable_for_type_inference(dtype)
    batch_reset_hidden_prev = helper.create_mpc_variable_for_type_inference(
        dtype)
    batch_hidden = helper.create_mpc_variable_for_type_inference(dtype)

    helper.append_op(type='mpc_gru',
                     inputs=inputs,
                     outputs={
                         'Hidden': hidden,
                         'BatchGate': batch_gate,
                         'BatchResetHiddenPrev': batch_reset_hidden_prev,
                         'BatchHidden': batch_hidden
                     },
                     attrs={
                         'is_reverse': is_reverse,
                         'gate_activation': gate_activation,
                         'activation': candidate_activation,
                         'origin_mode': origin_mode
                     })
    return hidden
Beispiel #14
0
def fused_multi_transformer(x,
                            ln_scales,
                            ln_biases,
                            qkv_weights,
                            qkv_biases,
                            linear_weights,
                            linear_biases,
                            ffn_ln_scales,
                            ffn_ln_biases,
                            ffn1_weights,
                            ffn1_biases,
                            ffn2_weights,
                            ffn2_biases,
                            pre_layer_norm=True,
                            epsilon=1e-05,
                            cache_kvs=None,
                            time_step=None,
                            attn_mask=None,
                            dropout_rate=0.0,
                            activation="gelu",
                            training=False,
                            mode='upscale_in_train',
                            ring_id=-1,
                            name=None):
    r"""
    This is a fusion operator to compute multi transformer layers in transformer model architecture.
    This operator only supports running on GPU. The function of the transformer layer is consistent
    with the following pseudo code:

    .. code-block:: python

        if pre_layer_norm:
            out = layer_norm(x)
            out = qkv_linear(out) + qkv_bias
        else:
            out = qkv_linear(x) + qkv_bias
        out = transpose(out, perm=[2, 0, 3, 1, 4])
        # extract q, k and v from out.
        q = out[0:1, ::]
        k = out[1:2, ::]
        v = out[2:3, ::]
        out = q * k^t
        out = attn_mask + out
        out = softmax(out)
        out = dropout(out)
        out = out * v
        out = transpose(out, perm=[0, 2, 1, 3])
        out = linear(out)
        if pre_layer_norm:
            out = x + dropout(out + bias)
        else:
            out = layer_norm(x + dropout(out + bias))

        residual = out;
        if pre_layer_norm:
            out = ffn_layer_norm(out)
        out = ffn1_linear(out)
        out = dropout(activation(out + ffn1_bias))
        out = ffn2_linear(out)
        out = residual + dropout(out + ffn2_bias)
        if not pre_layer_norm:
            out = ffn_layer_norm(out)

    Args:
        x (Tensor): the input tensor could be 3-D tensor, the input data type could be float16 or float32, the shape is `[batch\_size, sequence\_length, d\_model]`.
        ln_scales (list(Tensor)|tuple(Tensor)): The weight tensors of attention layer_norm, the shape is `[d\_model]`.
        ln_biases (list(Tensor)|tuple(Tensor)): The bias tensors of attention layer_norm. the shape is `[d\_model]`.
        qkv_weights (list(Tensor)|tuple(Tensor)): The weight tensors of attention qkv computation. The shape is `[3, num\_head, dim\_head, d\_model]`.
        qkv_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of attention qkv computation. The shape is `[3, num\_head, dim\_head]`.
        linear_weights (list(Tensor)|tuple(Tensor)): The weight tensors of attention linear. The shape is `[num\_head * dim\_head, d\_model]`.
        linear_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of attention linear. The shape is `[d\_model]`.
        ffn_ln_scales (list(Tensor)|tuple(Tensor)): The weight tensors of feedforward layer_norm, the shape is `[d\_model]`
        ffn_ln_biases (list(Tensor)|tuple(Tensor)): The bias tensors of feedforward layer_norm, the shape is `[d\_model]`
        ffn1_weights (list(Tensor)|tuple(Tensor)): The weight tensors of feedforward first linear, the shape is `[d\_model, dim\_feedforward]`.
        ffn1_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of feedforward first linear, the shape is `[dim\_feedforward]`.
        ffn2_weights (list(Tensor)|tuple(Tensor)): The weight tensors of feedforward second linear, the shape is `[dim\_feedforward, d\_model]`.
        ffn2_biases (list(Tensor)|tuple(Tensor)|None): The bias tensors of feedforward second linear, the shape is `[d_model]`.
        pre_layer_norm (bool, optional): whether it is pre_layer_norm(True) or post_layer_norm(False). Default True.
        epsilon (float, optional): Small float value added to denominator of the layer_norm to avoid dividing by zero. Default is 1e-5.
        cache_kvs (list(Tensor)|tuple(Tensor), optional): The cache structure tensors for the generation model. The shape is `[2, bsz, num\_head, max\_seq\_len, head\_dim]`. Default None.
        time_step (Tensor, optional): The time step tensor for the generation model. Which used in decode stage, to represent the time step, that is, the real seq_len of CacheKV. The shape is `[1]`, must be in CPUPlace. Default None.
        attn_mask (Tensor, optional):  A tensor used in multi-head attention to prevents attention to
            some unwanted positions, usually the paddings or the subsequent positions. It is a tensor
            with shape `[batch_size, 1, sequence_length, sequence_length]`. Default None.
        dropout_rate (float, optional): The dropout probability of setting units to zero. Default 0.0.
        activation (str, optional): The activation. Default "gelu".
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default False.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        ring_id (int, optional): For distributed forward in tensor model parallel, only support NCCL. Default is -1, means not using mp.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor|tuple: If `cache_kvs` is None, return a tensor that has
        the same shape and data type with `x`, representing the output
        of Transformer layers. If `cache_kvs` is not None, return the
        tuple (output, cache_kvs), which output is the output of
        Transformer layers, cache_kvs is inplace with input `cache_kvs`.

    Examples:
        .. code-block:: python

            # required: gpu
            import paddle
            import paddle.incubate.nn.functional as F
            import numpy as np

            # input: [batch_size, seq_len, embed_dim]
            x = paddle.rand(shape=(2, 4, 128), dtype="float32")

            # ln_scale: [embed_dim], ln_bias: [embed_dim]
            ln_scale = paddle.rand(shape=(128,), dtype="float32")
            ln_bias = paddle.rand(shape=(128,), dtype="float32")

            # qkv_weight: [3, num_head, head_dim, embed_dim], qkv_bias: [3, num_head, head_dim]
            qkv_weight = paddle.rand(shape=(3, 4, 32, 128), dtype="float32")
            qkv_bias = paddle.rand(shape=(3, 4, 32), dtype="float32")

            # linear_weight: [embed_dim, embed_dim], linear_bias: [embed_dim]
            linear_weight = paddle.rand(shape=(128, 128), dtype="float32")
            linear_bias = paddle.rand(shape=(128,), dtype="float32")

            # ffn_ln_scale: [embed_dim], ffn_ln_bias: [embed_dim]
            ffn_ln_scale = paddle.rand(shape=(128,), dtype="float32")
            ffn_ln_bias = paddle.rand(shape=(128,), dtype="float32")

            # ffn1_weight: [embed_dim, 4*embed_dim], ffn1_bias: [4*embed_dim]
            ffn1_weight = paddle.rand(shape=(128, 4*128), dtype="float32")
            ffn1_bias = paddle.rand(shape=(4*128,), dtype="float32")

            # ffn2_weight: [4*embed_dim, embed_dim], ffn2_bias: [embed_dim]
            ffn2_weight = paddle.rand(shape=(4*128, 128), dtype="float32")
            ffn2_bias = paddle.rand(shape=(128,), dtype="float32")

            # self attention mask: [batch_size, 1, seq_len, seq_len]
            attn_mask = paddle.rand(shape=(2, 1, 4, 4), dtype="float32")

            # output: [batch_size, seq_len, embed_dim]
            output = F.fused_multi_transformer(
                x, [ln_scale], [ln_bias], [qkv_weight], [qkv_bias],
                [linear_weight], [linear_bias], [ffn_ln_scale], [ffn_ln_bias],
                [ffn1_weight], [ffn1_bias], [ffn2_weight], [ffn2_bias],
                attn_mask=attn_mask)
            # [2, 4, 128]
            print(output.shape)
    """
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'"
        )
    mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

    if _non_static_mode():
        cache_kv_out, final_out = _C_ops.fused_multi_transformer(
            x, ln_scales, ln_biases, qkv_weights, qkv_biases, cache_kvs,
            time_step, attn_mask, linear_weights, linear_biases, ffn_ln_scales,
            ffn_ln_biases, ffn1_weights, ffn1_biases, ffn2_weights,
            ffn2_biases, cache_kvs, 'pre_layer_norm', pre_layer_norm,
            'epsilon', epsilon, 'dropout_rate', dropout_rate, 'is_test',
            not training, 'dropout_implementation', mode, 'act_method',
            activation, 'ring_id', ring_id)
        if cache_kvs is not None:
            return final_out, cache_kv_out
        return final_out
    else:
        helper = LayerHelper('fused_multi_transformer', **locals())
        dtype = x.dtype
        # check dtypes
        check_variable_and_dtype(x, 'x', ['float16', 'float32'],
                                 'fused_multi_transformer')
        check_dtype(dtype, 'dtype', ['float16', 'float32'],
                    'fused_multi_transformer')

        # set inputs
        inputs = dict()
        inputs['X'] = [x]
        inputs['LnScale'] = ln_scales
        inputs['LnBias'] = ln_biases
        inputs['QKVW'] = qkv_weights
        if qkv_biases is not None:
            inputs['QKVBias'] = qkv_biases
        if cache_kvs is not None:
            assert len(cache_kvs) == len(qkv_weights)
            inputs['CacheKV'] = cache_kvs
            if time_step is not None:
                inputs['TimeStep'] = time_step
        inputs['SrcMask'] = attn_mask
        inputs['OutLinearW'] = linear_weights
        if linear_biases is not None:
            inputs['OutLinearBias'] = linear_biases

        inputs['FFNLnScale'] = ffn_ln_scales
        inputs['FFNLnBias'] = ffn_ln_biases
        inputs['FFN1Weight'] = ffn1_weights
        if ffn1_biases is not None:
            inputs['FFN1Bias'] = ffn1_biases
        inputs['FFN2Weight'] = ffn2_weights
        if ffn2_biases is not None:
            inputs['FFN2Bias'] = ffn2_biases

        # set attrs
        attrs = {
            'pre_layer_norm': pre_layer_norm,
            'epsilon': epsilon,
            'dropout_rate': dropout_rate,
            'is_test': not training,
            'dropout_implementation': mode,
            'act_method': activation,
            'ring_id': ring_id
        }

        outputs = dict()
        final_out = helper.create_variable_for_type_inference(dtype=dtype)
        outputs['Out'] = final_out
        if cache_kvs:
            # NOTE: inplace
            outputs['CacheKVOut'] = cache_kvs

        helper.append_op(type='fused_multi_transformer',
                         inputs=inputs,
                         outputs=outputs,
                         attrs=attrs)

        return (final_out, cache_kvs) if cache_kvs else final_out
Beispiel #15
0
def fused_bias_dropout_residual_layer_norm(x,
                                           residual,
                                           bias=None,
                                           ln_scale=None,
                                           ln_bias=None,
                                           dropout_rate=0.5,
                                           ln_epsilon=1e-5,
                                           training=True,
                                           mode='upscale_in_train',
                                           name=None):
    r"""
    The fused_bias_dropout_residual_layer_norm operator. The pseudo code is as follows:

    .. code-block:: python
        y = layer_norm(residual + dropout(bias + x))

    Parameters:
        x (Tensor): The input tensor. The shape is `[*, embed\_dim]`.
        residual (Tensor): The residual tensor. The shape is same as x.
        bias (Tensor, optional): The bias of linear. The shape is `[embed_dim]`. Default None.
        ln_scale (Tensor, optional): The weight tensor of layernorm. The shape is `[embed_dim]`. Default None.
        ln_bias (Tensor, optional): The bias tensor of layernorm. The shape is `[embed_dim]`. Default None.
        dropout_rate (float, optional): The dropout probability used on attention
            weights to drop some attention targets for the dropout after attention.
            0 for no dropout. Default 0.5.
        ln_epsilon (float, optional): Small float value added to denominator of layer_norm
            to avoid dividing by zero. Default is 1e-5.
        training (bool, optional): A flag indicating whether it is in train phrase or not. Default True.
        mode (str, optional): ['upscale_in_train'(default) | 'downscale_in_infer']

                               1. upscale_in_train(default), upscale the output at training time

                                  - train: out = input * mask / ( 1.0 - p )
                                  - inference: out = input

                               2. downscale_in_infer, downscale the output at inference

                                  - train: out = input * mask
                                  - inference: out = input * (1.0 - p)
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: The output Tensor, the data type and shape is same as `x`.

    Examples:

        .. code-block:: python

            # required: gpu
            import paddle
            import paddle.incubate.nn.functional as F

            # input: [batch_size, seq_len, embed_dim]
            x = paddle.rand(shape=(2, 4, 128), dtype="float32")
            # residual: [batch_size, seq_len, embed_dim]
            residual = paddle.rand(shape=(2, 4, 128), dtype="float32")
            # linear bias: [embed_dim]
            bias = paddle.rand(shape=[128], dtype="float32")
            # output: [batch_size, seq_len, embed_dim]
            output = F.fused_bias_dropout_residual_layer_norm(
                x, residual, bias)
            # [2, 4, 128]
            print(output.shape)
    """
    seed = None
    if mode not in ('downscale_in_infer', 'upscale_in_train'):
        raise ValueError(
            "mode argument should be 'downscale_in_infer' or 'upscale_in_train'"
        )
    mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode  #semantic transfer

    if ln_scale is not None:
        assert len(ln_scale.shape
                   ) == 1, "The dims of the shape of ln_scale should be 1."
        assert x.shape[len(x.shape) - 1] == ln_scale.shape[
            0], "The dim of ln_scale must equal to the last dim of x."
    if ln_bias is not None:
        assert len(ln_bias.shape
                   ) == 1, "The dims of the shape of ln_bias should be 1."
        assert x.shape[len(x.shape) - 1] == ln_bias.shape[
            0], "The dim of ln_bias must equal to the last dim of x."

    if _non_static_mode():
        if default_main_program().random_seed != 0:
            seed = default_main_program().random_seed
        _, _, _, _, final_out = _C_ops.fused_bias_dropout_residual_layer_norm(
            x, residual, bias, ln_scale, ln_bias, 'dropout_rate', dropout_rate,
            'ln_epsilon', ln_epsilon, 'is_test', not training,
            'dropout_fix_seed', seed is not None, 'dropout_seed',
            seed if seed is not None else 0, 'dropout_implementation', mode)
        return final_out
    else:
        helper = LayerHelper('fused_bias_dropout_residual_layer_norm',
                             **locals())
        dtype = x.dtype
        # check dtypes
        check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                                 'fused_bias_dropout_residual_layer_norm')
        check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
                    'fused_bias_dropout_residual_layer_norm')
        # set inputs
        inputs = dict()
        inputs['X'] = [x]
        inputs['Residual'] = [residual]
        if bias is not None:
            inputs['Bias'] = [bias]
        if ln_scale:
            inputs['LnScale'] = [ln_scale]
        if ln_bias:
            inputs['LnBias'] = [ln_bias]
        if (seed is None
                or seed == 0) and helper.main_program.random_seed != 0:
            seed = helper.main_program.random_seed
        # set attrs
        attrs = {
            'ln_epsilon': ln_epsilon,
            'dropout_rate': dropout_rate,
            'is_test': not training,
            'dropout_fix_seed': seed is not None,
            'dropout_seed': seed if seed is not None else 0,
            'dropout_implementation': mode,
        }
        # set outputs
        dropout_mask_out = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
        ln_mean_out = helper.create_variable_for_type_inference(
            dtype=dtype, stop_gradient=True)
        ln_variance_out = helper.create_variable_for_type_inference(
            dtype=dtype, stop_gradient=True)
        bias_dropout_residual_out = helper.create_variable_for_type_inference(
            dtype=dtype)
        final_out = helper.create_variable_for_type_inference(dtype=dtype)

        helper.append_op(type='fused_bias_dropout_residual_layer_norm',
                         inputs=inputs,
                         outputs={
                             "BiasDropoutResidualOut":
                             bias_dropout_residual_out,
                             "DropoutMaskOut": dropout_mask_out,
                             "LnMean": ln_mean_out,
                             "LnVariance": ln_variance_out,
                             'Y': final_out,
                         },
                         attrs=attrs)
        return final_out