def __init__(self, pos_score, neg_score): """ """ kwargs = locals() del kwargs['self'] helper = LayerHelper("PaddleRec_PosNegRatio", **kwargs) if "pos_score" not in kwargs or "neg_score" not in kwargs: raise ValueError( "PosNegRatio expect pos_score and neg_score as inputs.") pos_score = kwargs.get('pos_score') neg_score = kwargs.get('neg_score') if not isinstance(pos_score, Variable): raise ValueError("pos_score must be Variable, but received %s" % type(pos_score)) if not isinstance(neg_score, Variable): raise ValueError("neg_score must be Variable, but received %s" % type(neg_score)) wrong = fluid.layers.cast(fluid.layers.less_equal( pos_score, neg_score), dtype='float32') wrong_cnt = fluid.layers.reduce_sum(wrong) right = fluid.layers.cast(fluid.layers.less_than(neg_score, pos_score), dtype='float32') right_cnt = fluid.layers.reduce_sum(right) global_right_cnt, _ = helper.create_or_get_global_variable( name="right_cnt", persistable=True, dtype='float32', shape=[1]) global_wrong_cnt, _ = helper.create_or_get_global_variable( name="wrong_cnt", persistable=True, dtype='float32', shape=[1]) for var in [global_right_cnt, global_wrong_cnt]: helper.set_variable_initializer( var, Constant(value=0.0, force_cpu=True)) helper.append_op(type="elementwise_add", inputs={ "X": [global_right_cnt], "Y": [right_cnt] }, outputs={"Out": [global_right_cnt]}) helper.append_op(type="elementwise_add", inputs={ "X": [global_wrong_cnt], "Y": [wrong_cnt] }, outputs={"Out": [global_wrong_cnt]}) self.pn = (global_right_cnt + 1.0) / (global_wrong_cnt + 1.0) self._global_metric_state_vars = dict() self._global_metric_state_vars['right_cnt'] = (global_right_cnt.name, "float32") self._global_metric_state_vars['wrong_cnt'] = (global_wrong_cnt.name, "float32") self.metrics = dict() self.metrics['WrongCnt'] = global_wrong_cnt self.metrics['RightCnt'] = global_right_cnt self.metrics['PN'] = self.pn
def segment_pool(data, segment_ids, pool_type, name=None): """ Segment Operator. """ pool_type = pool_type.upper() if in_dygraph_mode(): out, tmp = core.ops.segment_pool(data, segment_ids, 'pooltype', pool_type) return out check_variable_and_dtype(data, "X", ("float32", "float64"), "segment_pool") check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool") helper = LayerHelper("segment_pool", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) pool_ids = helper.create_variable_for_type_inference(dtype=data.dtype) helper.append_op( type="segment_pool", inputs={"X": data, "SegmentIds": segment_ids}, outputs={"Out": out, "SummedIds": pool_ids}, attrs={"pooltype": pool_type}) return out
def scatter(input, index, updates, name=None): """ **Scatter Layer** by Lihang Liu. There's a bug in Python API scatter for parameter checking, please refer to (https://github.com/PaddlePaddle/Paddle/issues/12725). Output is obtained by updating the input on selected indices on the first axis. .. math:: Out = X Out[Ids] = Updates Args: input (Variable): The source input with rank>=1. index (Variable): The index input with rank=1. Its dtype should be int32 or int64 as it is used as indexes. updates (Variable): The updated value of scatter op. name (str|None): The output variable name. Default None. Returns: output (Variable): The output is a tensor with the same shape as input. Examples: .. code-block:: python output = fluid.layers.scatter(input, index, updates) """ helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() out = helper.create_tmp_variable(dtype) helper.append_op( type="scatter", inputs={"X": input, "Ids": index, "Updates": updates}, outputs={"Out": out}) return out
def slice_select(x, axis, starts, ends, strides, out=None): if not isinstance(axis, (list, tuple)): raise TypeError( f'Argument type error. `axis` is supposed to be list or' f' tuple but found {type(axis)}.') if not isinstance(starts, (list, tuple)): raise TypeError( f'Argument type error. `starts` is supposed to be list or' f' tuple but found {type(starts)}.') if not isinstance(ends, (list, tuple)): raise TypeError( f'Argument type error. `ends` is supposed to be list or' f' tuple but found {type(ends)}.') assert len(axis) == len(starts) == len(ends) == len(strides), ( f'len(axis), len(starts), len(ends) and len(strides) should be equal, ' f'but len(axis)={len(axis)}, len(starts)={len(starts)}, ' f'len(ends)={len(ends)} and len(strides)={len(strides)}') attrs = {'axis': axis, 'starts': starts, 'ends': ends, 'strides': strides} helper = LayerHelper('slice_select_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type=helper.layer_type, inputs={'X': x}, outputs={'Y': out}, attrs=attrs) return out
def segment_max(data, segment_ids, name=None): r""" Segment max operator. This operator calculate the maximum elements of input `data` which with the same index in `segment_ids`. It computes a tensor such that $out_i = \\max_{j} data_{j}$ where max is over j such that `segment_ids[j] == i`. Args: data (tensor): a tensor, available data type float32, float64, int32, int64. segment_ids (tensor): a 1-d tensor, which have the same size with the first dimension of input data. available data type is int32, int64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: output (Tensor): the reduced result. Examples: .. code-block:: python import paddle data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32') segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32') out = paddle.incubate.segment_max(data, segment_ids) #Outputs: [[3., 2., 3.], [4., 5., 6.]] """ if in_dygraph_mode(): out, tmp = _C_ops.final_state_segment_pool(data, segment_ids, "MAX") return out if _non_static_mode(): out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MAX") return out check_variable_and_dtype(data, "X", ("float32", "float64", "int32", "int64"), "segment_pool") check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool") helper = LayerHelper("segment_max", **locals()) out = helper.create_variable_for_type_inference(dtype=data.dtype) summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) helper.append_op(type="segment_pool", inputs={ "X": data, "SegmentIds": segment_ids }, outputs={ "Out": out, "SummedIds": summed_ids }, attrs={"pooltype": "MAX"}) return out
def vmat(x, name=None): # vmat的type和在OP中定义的type相同 helper = LayerHelper("vmat", **locals()) # 创建输出Variable out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="vmat", inputs={"X": x}, outputs={"Y": out}) return out
def lookup_table(input, embedding_table, dtype='float32'): """ lookup table support for paddle. :param input: :param embedding_table: :param dtype: :return: """ is_sparse = False is_distributed = False helper = LayerHelper('embedding', **locals()) remote_prefetch = is_sparse and (not is_distributed) if remote_prefetch: assert is_sparse is True and is_distributed is False tmp = helper.create_variable_for_type_inference(dtype) padding_idx = -1 helper.append_op(type='lookup_table', inputs={ 'Ids': input, 'W': embedding_table }, outputs={'Out': tmp}, attrs={ 'is_sparse': is_sparse, 'is_distributed': is_distributed, 'remote_prefetch': remote_prefetch, 'padding_idx': padding_idx }) return tmp
def scatter_nd_add_fix_bug(ref, index, updates, name=None): """fix bug of paddle.fluid.layers.scatter_nd_add Args: ref (TYPE): NULL index (TYPE): NULL updates (TYPE): NULL name (TYPE): Default is None Returns: TODO Raises: NULL """ if ref.dtype != updates.dtype: raise ValueError("ref and updates must have same data type.") helper = LayerHelper('scatter_nd_add', **locals()) dtype = helper.input_dtype(input_param_name='ref') if name is None: output = helper.create_variable_for_type_inference(dtype) else: output = helper.create_variable( name=name, dtype=dtype, persistable=False) helper.append_op( type="scatter_nd_add", inputs={"X": ref, "Index": index, "Updates": updates}, outputs={"Out": output}) return output
def farthest_point_sampling(input, sampled_point_num): ''' Sampling point based on its max eucliden distance with other points. Args: input (Variable): input point cloud dataset with shape (B, N, 3) B is batch size, N is points's nums, 3 is (x,y,z) coordinate sampled_point_num (int): sampled points's nums Retrun: output (Variable): return sampled points with shape (B, M) B is batch size, M is points's nums Examples: .. code-block:: python x = fluid.layers.data(name='data', shape=(2,100,3), dtype='float32') sampled_points = fluid.layers.farthest_point_sampling( x, 50 ) ''' helper = LayerHelper('farthest_point_sampling', **locals()) dtype = input.dtype op_out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='farthest_point_sampling', inputs={'X': input}, outputs={'Output': op_out}, attrs={'sampled_point_num': sampled_point_num}) return op_out
def check_finite_and_unscale(x, scale, name=None): """ Check if input X contains all finite data, if yes, scale it by input Scale. $$Out = X / scale$$ If any tensor in X contains Inf or Nan, the Out will generate a indicator. FoundInfinite will be 1 (True), and Out will not be scaled. In this case, the data of Out should not be used, and its data may not be deterministic. Otherwise, FoundInfinite will be 0 (False). Args: x(list|tuple): The input tensors of check_finite_and_unscale operator. scale: The scale of check_finite_and_unscale operator. """ #check_type(x, 'x', (tuple, list), 'check_finite_and_unscale') #for e in x: # check_variable_and_dtype(e, "x", ['float16', 'float32', 'float64'], # 'check_finite_and_unscale') helper = LayerHelper("check_finite_and_unscale", **locals()) found_inf = helper.create_variable_for_type_inference(dtype='bool') inputs = {'X': x, 'Scale': scale} outputs = {'Out': x, 'FoundInfinite': found_inf} helper.append_op(type='check_finite_and_unscale', inputs=inputs, outputs=outputs) return x, found_inf
def op2(x): value = paddle.fluid.layers.fill_constant([1, 3, 2], "float32", 1) # test stop_gradient value.stop_gradient = False x.stop_gradient = False attrs = { 'axes': [0], 'starts': [6], 'ends': [0], 'steps': [-4], 'decrease_axes': [], 'none_axes': [], 'dtype': paddle.float32 } inputs = {'Input': x, 'ValueTensor': value} helper = LayerHelper("set_value") y = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="set_value", inputs=inputs, outputs={'Out': y}, attrs=attrs) return y, value
def correlation(input1, input2, pad_size, kernel_size, max_displacement, stride1, stride2, corr_type_multiply=1): helper = LayerHelper("correlation", **locals()) output = helper.create_variable_for_type_inference(dtype=input1.dtype) helper.append_op(type="correlation", inputs={ "Input1": input1, "Input2": input2 }, attrs={ "pad_size": pad_size, "kernel_size": kernel_size, "max_displacement": max_displacement, "stride1": stride1, "stride2": stride2, "corr_type_multiply": corr_type_multiply }, outputs={"Output": output}) return output
def inplace_add(x, bias): helper = LayerHelper('scale', **locals()) helper.append_op(type='scale', inputs={'X': [x]}, outputs={'Out': [x]}, attrs={'bias': bias}) return x
def rank_attention(input, rank_offset, rank_param_shape, rank_param_attr, max_rank=3): """ **Rank Attention layer** This Op can calculate rank attention between input and rank_param, and rank_param gives the organization of data. Notice: It currently supports GPU device. This Op exists in contrib, which means that it is not shown to the public. Args: input: Tensor with data type float32, float64. rank_offset: Tensor with data type int32. rank_para_shape: The shape of rank_param. rank_param_attr: Attribute initializer of rank_param. max_rank: The max rank of input's ranks. Returns: Variable: A Tensor with the same data type as input's. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np input = fluid.data(name="input", shape=[None, 2], dtype="float32") rank_offset = fluid.data(name="rank_offset", shape=[None, 7], dtype="int32") out = fluid.contrib.layers.rank_attention(input=input, rank_offset=rank_offset, rank_param_shape=[18,3], rank_param_attr= fluid.ParamAttr(learning_rate=1.0, name="ubm_rank_param.w_0", initializer= fluid.initializer.Xavier(uniform=False)), max_rank=3) """ helper = LayerHelper('rank_attention', **locals()) dtype = helper.input_dtype(input_param_name='input') input_shape = input.shape assert input_shape[1] * max_rank * max_rank == rank_param_shape[0] rank_param = helper.create_parameter(attr=rank_param_attr, shape=rank_param_shape, dtype=dtype) rank_param.stop_gradient = False output = helper.create_variable_for_type_inference(dtype) ins_rank = helper.create_variable_for_type_inference(dtype=dtype, stop_gradient=True) helper.append_op(type="rank_attention", inputs={ "X": input, "RankOffset": rank_offset, "RankParam": rank_param }, outputs={"Out": output}, attrs={"MaxRank": max_rank}) return output
def partial_concat(input, start_index=0, length=-1): """ **Partial Concat** This OP concatenates the inputs according to the start index and length. This OP exists in contrib, which means that it is not shown to the public. Only 2-D Tensor or LodTensor input is supported. Slice and concat can only be performed along the second dimension. .. code-block:: text Given: x = [[0, 1, 2], [3, 4, 5]] y = [[6, 7 ,8], [9, 10, 11]] output = partial_concat([x, y], start_index=0, length=2) we get: output = [[0, 1, 6, 7], [3, 4, 9, 10]] Args: input(list): List of input Tensors with data type float32, float64, int32, int64. start_index(int32): The start index of each instance for partial concatenation. Default is 0. length(int32): The length of each instance for partial concatenation. Default is -1. Negative values for all elements after start_index. Returns: Variable: A Tensor with the same data type as input's. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.data(name="x", shape=[None,3], dtype="float32") y = fluid.data(name="y", shape=[None,3], dtype="float32") concat = fluid.contrib.layers.partial_concat( [x, y], start_index=0, length=2) """ if not isinstance(input, list): warnings.warn( "The type of input in partial_concat should be list, but received %s." % (type(input))) input = [input] for id, x in enumerate(input): check_variable_and_dtype( x, 'input[' + str(id) + ']', ['float16', 'float32', 'float64', 'int32', 'int64'], 'partial_concat') check_type(start_index, 'start_index', (int), 'partial_concat') check_type(length, 'length', (int), 'partial_concat') inputs = {'X': input} attrs = {'start_index': start_index, 'length': length} helper = LayerHelper('partial_concat', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op(type='partial_concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs) return out
def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op(type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return reduce_out
def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if in_dygraph_mode(): if dim is None: return core.ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return core.ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out
def log_loss(input, label, epsilon=1e-4): """ **Negative Log Loss Layer** This layer accepts input predictions and target label and returns the negative log loss. .. math:: Out = -label * log(X + epsilon) - (1 - label) * log(1 - X + epsilon) Args: input: a 2-D tensor with shape [N x 1], where N is the batch size. This input is a probability computed by the previous operator. label: the ground truth which is a 2-D tensor with shape [N x 1], where N is the batch size. epsilon: epsilon Returns: A 2-D tensor with shape [N x 1], the negative log loss. Examples: .. code-block:: python prob = fluid.layers.sigmoid(net) cost = fluid.layers.log_loss(input=prob, label=label) """ helper = LayerHelper('log_loss', **locals()) loss = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( type='log_loss', inputs={'Predicted': [input], 'Labels': [label]}, outputs={'Loss': [loss]}, attrs={'epsilon': epsilon}) return loss
def relu2(x, name=None): # relu2的type和在OP中定义的type相同 helper = LayerHelper("relu2", **locals()) # 创建输出Variable out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="relu2", inputs={"X0": [x]}, outputs={"Out": out}) return out
def fill_const(value, shape, dtype, out=None): attrs = {'value': value, 'shape': shape, 'dtype': dtype} helper = LayerHelper('fill_constant_p', **locals()) if out is None: out = helper.create_variable_for_type_inference(dtype) helper.append_op(type=helper.layer_type, outputs={'Y': out}, attrs=attrs) return out
def histogram(input, bins=100, min=0, max=0): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int32. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Variable: Tensor or LoDTensor calculated by histogram layer. The data type is int32. Code Example 1: .. code-block:: python import paddle import numpy as np startup_program = paddle.static.Program() train_program = paddle.static.Program() with paddle.static.program_guard(train_program, startup_program): inputs = paddle.data(name='input', dtype='int32', shape=[2,3]) output = paddle.histogram(inputs, bins=5, min=1, max=5) place = paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(startup_program) img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32) res = exe.run(train_program, feed={'input': img}, fetch_list=[output]) print(np.array(res[0])) # [0,3,0,2,1] Code Example 2: .. code-block:: python import paddle paddle.disable_static(paddle.CPUPlace()) inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] paddle.enable_static() """ if in_dygraph_mode(): return core.ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype(input, 'X', ['int32', 'int32', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op(type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={ 'bins': bins, 'min': min, 'max': max }) return out
def rrpn_box_coder(prior_box, prior_box_var, target_box, name=None): """ Args: prior_box(Variable): Box list prior_box is a 2-D Tensor with shape [M, 5] holds M boxes and data type is float32 or float64. Each box is represented as [x, y, w, h, angle], [x, y] is the center coordinate of the anchor box, [w, h] is the width and height of the anchor box, angle is rotated angle of prior_box. prior_box_var(List|Variable|None): "prior_box_var is a 2-D Tensor with shape [M, 5] holds M group of variance." target_box(Variable): This input can be a 2-D LoDTensor with shape [M, 5]. Each box is represented as [x, y, w, h, angle]. The data type is float32 or float64. name(str): Name of this layer. None by default. Returns: Variable: output_box(Variable): The output tensor of rrpn_box_coder_op with shape [N, 5] representing the result of N target boxes encoded with N Prior boxes and variances. N represents the number of box and 5 represents [x, y, w, h ,angle]. Examples: .. code-block:: python import paddle.fluid as fluid prior_box_decode = fluid.data(name='prior_box_decode', shape=[512, 5], dtype='float32') target_box_decode = fluid.data(name='target_box_decode', shape=[512, 5], dtype='float32') output_decode = rrpn_box_coder(prior_box=prior_box_decode, prior_box_var=[10, 10, 5, 5, 1], target_box=target_box_decode) """ helper = LayerHelper("rrpn_box_coder", **locals()) if name is None: output_box = helper.create_variable_for_type_inference( dtype=prior_box.dtype) else: output_box = helper.create_variable( name=name, dtype=prior_box.dtype, persistable=False) inputs = {"PriorBox": prior_box, "TargetBox": target_box} attrs = {} if isinstance(prior_box_var, Variable): inputs['PriorBoxVar'] = prior_box_var elif isinstance(prior_box_var, list): attrs['variance'] = prior_box_var else: raise TypeError( "Input variance of rrpn_box_coder must be Variable or list") helper.append_op( type="rrpn_box_coder", inputs=inputs, attrs=attrs, outputs={"OutputBox": output_box}) return output_box
def relu3(x, name=None): helper = LayerHelper("relu3", **locals()) out = helper.create_variable(type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op(type="relu3", inputs={"X": x}, outputs={"Y": out}) return out
def __init__(self, input, label, k=20): """ """ kwargs = locals() del kwargs['self'] self.k = k if not isinstance(input, Variable): raise ValueError("input must be Variable, but received %s" % type(input)) if not isinstance(label, Variable): raise ValueError("label must be Variable, but received %s" % type(label)) helper = LayerHelper("PaddleRec_RecallK", **kwargs) batch_accuracy = accuracy(input, label, self.k) global_ins_cnt, _ = helper.create_or_get_global_variable( name="ins_cnt", persistable=True, dtype='float32', shape=[1]) global_pos_cnt, _ = helper.create_or_get_global_variable( name="pos_cnt", persistable=True, dtype='float32', shape=[1]) for var in [global_ins_cnt, global_pos_cnt]: helper.set_variable_initializer( var, Constant(value=0.0, force_cpu=True)) tmp_ones = fluid.layers.fill_constant(shape=fluid.layers.shape(label), dtype="float32", value=1.0) batch_ins = fluid.layers.reduce_sum(tmp_ones) batch_pos = batch_ins * batch_accuracy helper.append_op(type="elementwise_add", inputs={ "X": [global_ins_cnt], "Y": [batch_ins] }, outputs={"Out": [global_ins_cnt]}) helper.append_op(type="elementwise_add", inputs={ "X": [global_pos_cnt], "Y": [batch_pos] }, outputs={"Out": [global_pos_cnt]}) self.acc = global_pos_cnt / global_ins_cnt self._global_metric_state_vars = dict() self._global_metric_state_vars['ins_cnt'] = (global_ins_cnt.name, "float32") self._global_metric_state_vars['pos_cnt'] = (global_pos_cnt.name, "float32") metric_name = "Acc(Recall@%d)" % self.k self.metrics = dict() self.metrics["InsCnt"] = global_ins_cnt self.metrics["RecallCnt"] = global_pos_cnt self.metrics[metric_name] = self.acc
def bmm(x, y, name=None): """ :alias_main: paddle.bmm :alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Variable): The input variable which is a Tensor or LoDTensor. y (Variable): The input variable which is a Tensor or LoDTensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Variable: The product Tensor (or LoDTensor) variable. Examples: import paddle paddle.disable_static() # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}" .format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}" .format(x_shape, y_shape)) helper = LayerHelper('bmm', **locals()) if in_dygraph_mode(): return core.ops.bmm(x, y) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out
def iou_similarity(x, y, box_normalized=True, name=None): """ Computes intersection-over-union (IOU) between two box lists. Box list 'X' should be a LoDTensor and 'Y' is a common Tensor, boxes in 'Y' are shared by all instance of the batched inputs of X. Given two boxes A and B, the calculation of IOU is as follows: $$ IOU(A, B) = \\frac{area(A\\cap B)}{area(A)+area(B)-area(A\\cap B)} $$ Args: x (Tensor): Box list X is a 2-D Tensor with shape [N, 4] holds N boxes, each box is represented as [xmin, ymin, xmax, ymax], the shape of X is [N, 4]. [xmin, ymin] is the left top coordinate of the box if the input is image feature map, they are close to the origin of the coordinate system. [xmax, ymax] is the right bottom coordinate of the box. The data type is float32 or float64. y (Tensor): Box list Y holds M boxes, each box is represented as [xmin, ymin, xmax, ymax], the shape of X is [N, 4]. [xmin, ymin] is the left top coordinate of the box if the input is image feature map, and [xmax, ymax] is the right bottom coordinate of the box. The data type is float32 or float64. box_normalized(bool): Whether treat the priorbox as a normalized box. Set true by default. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Tensor: The output of iou_similarity op, a tensor with shape [N, M] representing pairwise iou scores. The data type is same with x. Examples: .. code-block:: python import paddle from ppdet.modeling import ops paddle.enable_static() x = paddle.static.data(name='x', shape=[None, 4], dtype='float32') y = paddle.static.data(name='y', shape=[None, 4], dtype='float32') iou = ops.iou_similarity(x=x, y=y) """ if in_dygraph_mode(): out = core.ops.iou_similarity(x, y, 'box_normalized', box_normalized) return out else: helper = LayerHelper("iou_similarity", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="iou_similarity", inputs={"X": x, "Y": y}, attrs={"box_normalized": box_normalized}, outputs={"Out": out}) return out
def partial_sum(input, start_index=0, length=-1): """ **PartialSum** This Op can sum the vars by specifying the initial position(start_index) and length(length). This Op exists in contrib, which means that it is not shown to the public. Only 2-D Tensor or LodTensor input is supported. Slice and concat can only be performed along the second dimension. .. code-block:: text Given: x = [[0, 1, 2], [3, 4, 5]] y = [[6, 7 ,8], [9, 10, 11]] output = partial_sum([x, y], start_index=0, length=2) we get: output = [[6, 8], [12, 14]] Args: input(list): List of input Tensors with data type float32, float64, int32, int64. Returns: Variable: A Tensor with the same data type as input's. Examples: .. code-block:: python import paddle.fluid.layers as layers import paddle.fluid as fluid import numpy as np x = fluid.data(name="x", shape=[None, 3], dtype="float32") y = fluid.data(name="y", shape=[None, 3], dtype="float32") sum = layers.partial_sum([x,y], start_index=0, length=2) place = fluid.CPUPlace() exe = fluid.Executor(place) xx = np.array([1,2,3,4,5,6]).reshape((2,3)).astype("float32") yy = np.array([6,5,4,4,5,6]).reshape((2,3)).astype("float32") out = exe.run(feed={"x":xx, "y":yy}, fetch_list=[sum]) """ for id, x in enumerate(input): check_variable_and_dtype(x, 'input[' + str(id) + ']', ['float32', 'float64', 'int32', 'int64'], 'partial_sum') inputs = {'X': input} attrs = {} attrs['start_index'] = start_index attrs['length'] = length helper = LayerHelper('partial_sum', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op(type='partial_sum', inputs=inputs, outputs={'Out': [out]}, attrs=attrs) return out
def _prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker): """ prune gate by capacity(only support CUDA) Args: gate_idx (Tensor): Represents the gate_id sequence corresponding to the input data with type int32, int64. expert_count (Tensor): The quantity value counted on the gate_id sequence of the input data with type int32, int64. n_worker(int,optional): The number of workers on the trainer with type int64. Returns: new_gate_idx (Tensor): The gate_id sequence corresponding to the new input data after passing through prune. Examples: .. code-block:: python import paddle gate_idx = paddle.to_tensor([1, 3, 3, 3, 3, 2, 1, 1], dtype='int32') expert_count = paddle.to_tensor([0, 3, 1, 3, 0, 0, 0, 0], dtype='int32') n_worker = 1 new_gate_id = paddle.distributed.utils.prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker) print(new_gate_id) # Tensor(shape=[8], dtype=int32, place=CUDAPlace(0), stop_gradient=True, [1, 3, 3, 3, -1, 2, 1, 1]) """ if in_dygraph_mode(): return _C_ops.prune_gate_by_capacity(gate_idx, expert_count, "n_expert", n_expert, "n_worker", n_worker) elif _in_legacy_dygraph(): return core.ops.prune_gate_by_capacity(gate_idx, expert_count, "n_expert", n_expert, "n_worker", n_worker) check_variable_and_dtype( gate_idx, 'GateIdx', ['int32', 'int64'], 'paddle.distributed.utils.prune_gate_by_capacity') check_variable_and_dtype( expert_count, 'ExpertCount', ['int32', 'int64'], 'paddle.distributed.utils.prune_gate_by_capacity') helper = LayerHelper('prune_gate_by_capacity', **locals()) new_gate_idx = helper.create_variable_for_type_inference( dtype=gate_idx.dtype) helper.append_op(type='prune_gate_by_capacity', inputs={ 'GateIdx': gate_idx, "ExpertCount": expert_count }, outputs={'NewGateIdx': new_gate_idx}, attrs={ "n_expert": n_expert, "n_worker": n_worker }) return new_gate_idx
def fused_matmul_bias(x, y, bias=None, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication of two tensors and then bias addition if provided. This method requires CUDA version >= 11.6. Args: x (Tensor): the first input Tensor to be multiplied. y (Tensor): the second input Tensor to be multiplied. Its rank must be 2. bias (Tensor|None): the input bias Tensor. If it is None, no bias addition would be performed. Otherwise, the bias is added to the matrix multiplication result. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. Returns: Tensor: the output Tensor. Examples: .. code-block:: python # required: gpu import paddle from paddle.incubate.nn.functional import fused_matmul_bias x = paddle.randn([3, 4]) y = paddle.randn([4, 5]) bias = paddle.randn([5]) out = fused_matmul_bias(x, y, bias) print(out.shape) # [3, 5] """ if bias is None: return matmul(x, y, transpose_x, transpose_y, name) if _non_static_mode(): return _C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', transpose_x, 'trans_y', transpose_y) helper = LayerHelper('fused_matmul_bias', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='fused_gemm_epilogue', inputs={'X': x, 'Y': y, 'Bias': bias}, outputs={'Out': out}, attrs={'trans_x': transpose_x, 'trans_y': transpose_y}) return out
def set_value(x, y, axis, starts, ends, strides, out): assert x is out, "x and out should be the same Tensor in set_value" attrs = {'axes': axis, 'starts': starts, 'ends': ends, 'steps': strides} helper = LayerHelper('set_value', **locals()) helper.append_op(type=helper.layer_type, inputs={ 'Input': x, 'ValueTensor': y }, outputs={'Out': out}, attrs=attrs) return out