Пример #1
0
                                    nms_top_k=self.pre_nms_topN,
                                    score_threshold=-1.,
                                    keep_top_k=self.post_nms_topN,
                                    nms_threshold=self.nms_thresh,
                                    normalized=False,
                                    nms_eta=self.eta)
        label, scores, proposal = paddle.tensor.split(
            out, axis=1, num_or_sections=[1, 1, 4])
        return scores, proposal

    def forward(self):
        anchors = self.input('Anchors', 0)
        bboxdeltas = self.input('BboxDeltas', 0)
        iminfo = self.input('ImInfo', 0)
        scores = self.input('Scores', 0)
        variances = self.input('Variances', 0)

        bboxdeltas = paddle.transpose(bboxdeltas, perm=[0, 2, 3, 1])
        bboxdeltas = paddle.reshape(bboxdeltas, [-1, 4])
        scores = paddle.transpose(scores, perm=[0, 2, 3, 1])
        scores = paddle.reshape(scores, [-1, 1])
        anchors = paddle.reshape(anchors, [-1, 4])
        variances = paddle.reshape(variances, [-1, 4])

        new_scores, proposals = self.proposal_for_single_sample(
            anchors, bboxdeltas, iminfo, scores, variances)
        return {'RpnRoiProbs': [new_scores], 'RpnRois': [proposals]}


register_custom_paddle_op('generate_proposals', GenerateProposals)
Пример #2
0
    def forward(self):
        input_feature = self.input('Input', 0)
        input_shape = paddle.shape(input_feature)
        n, c, h, w = paddle.tensor.split(input_shape, num_or_sections=4)
        x_ctr = paddle.arange(start=0, end=w, step=1, dtype=input_feature.dtype)
        y_ctr = paddle.arange(start=0, end=h, step=1, dtype=input_feature.dtype)
        x_ctr = x_ctr * self.strides[0] + self.offset * (self.strides[0] - 1)
        y_ctr = y_ctr * self.strides[1] + self.offset * (self.strides[1] - 1)
        tensor_one = paddle.ones(shape=[1], dtype='int64')
        tensor_len_shape = paddle.full(
            shape=[1], fill_value=len(self.shapes), dtype='int64')
        x_ctr = paddle.reshape(x_ctr, shape=(1, -1))
        y_ctr = paddle.reshape(y_ctr, shape=(1, -1))
        x_ctr = paddle.tile(x_ctr, repeat_times=(h, tensor_one))
        y_ctr = paddle.tile(y_ctr, repeat_times=(w, tensor_one))
        y_ctr = paddle.transpose(y_ctr, perm=[1, 0])
        centers = paddle.stack([x_ctr, y_ctr], axis=-1)
        centers = paddle.tensor.unsqueeze(centers, axis=[2])
        centers = paddle.tile(centers, repeat_times=(1, 1, len(self.shapes), 2))
        shape_tensor = paddle.assign(np.array(self.shapes).astype('float32'))
        anchors = centers + shape_tensor
        variance_tensor = paddle.assign(
            np.asarray(self.variances).astype('float32'))
        vars = paddle.reshape(variance_tensor, shape=[1, 1, 1, -1])
        vars = paddle.tile(
            vars, repeat_times=(h, w, tensor_len_shape, tensor_one))
        return {'Anchors': [anchors], 'Variances': [vars]}


register_custom_paddle_op('anchor_generator', AnchorGenerator)
import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.constant import dtypes
from paddle2onnx.op_mapper import CustomPaddleOp, register_custom_paddle_op


class FillConstantBatchSizeLike(CustomPaddleOp):
    def __init__(self, node, **kw):
        super(FillConstantBatchSizeLike, self).__init__(node)

    def forward(self):
        input = self.input('Input', 0)
        input_shape = paddle.shape(input)
        updates = input_shape[self.node.attr('input_dim_idx')]
        shape = paddle.assign(
            np.array(self.node.attr('shape')).astype('int32'))
        dims = len(self.node.attr('shape'))
        new_shape = paddle.concat([
            shape[:self.node.attr('output_dim_idx')], updates,
            shape[self.node.attr('output_dim_idx') + 1:dims]
        ])
        dtype = dtypes.DTYPE_PADDLE_STR_MAP[self.node.attr('dtype')]
        out = paddle.full(new_shape, self.node.attr('value'), dtype)
        return {'Out': [out]}


register_custom_paddle_op('fill_constant_batch_size_like',
                          FillConstantBatchSizeLike)
from __future__ import absolute_import

import numpy as np
import paddle
from paddle.fluid import layers
from paddle2onnx.op_mapper import CustomPaddleOp, register_custom_paddle_op


class CollectFpnProposals(CustomPaddleOp):
    def __init__(self, node, **kw):
        super(CollectFpnProposals, self).__init__(node)
        self.post_nms_top_n = node.attr('post_nms_topN')

    def forward(self):
        multi_level_rois = self.input('MultiLevelRois')
        multi_level_scores = self.input('MultiLevelScores')
        multi_level_rois = paddle.concat(multi_level_rois, axis=0)
        multi_level_scores = paddle.concat(multi_level_scores, axis=0)
        proposal_num = paddle.shape(multi_level_scores)[0]
        post_nms_top_n_tensor = paddle.assign(
            np.array([self.post_nms_top_n]).astype('int32'))
        k_candidate = paddle.concat([proposal_num, post_nms_top_n_tensor])
        k = paddle.min(k_candidate)
        scores, index = paddle.topk(multi_level_scores, k=k, axis=0)
        rois = paddle.gather(multi_level_rois, index, axis=0)
        return {"FpnRois": [rois]}


register_custom_paddle_op('collect_fpn_proposals', CollectFpnProposals)
Пример #5
0
        nd_index = []
        for k in range(len(x.shape)):
            if k == dim:
                nd_index.append(index_flatten)
            else:
                reshape_shape = [1] * len(x.shape)
                x_shape_k = x_shape[k]
                # x_shape_k = x.shape[k]
                reshape_shape[k] = x_shape_k
                x_arange = paddle.arange(x_shape_k, dtype=index.dtype)
                x_arange = x_arange.reshape(reshape_shape)
                dim_index = paddle.expand(x_arange, index_shape).flatten()
                nd_index.append(dim_index)
        ind2 = paddle.transpose(paddle.stack(nd_index), [1, 0]).astype("int64")
        paddle_out = paddle.gather_nd(x, ind2).reshape(index_shape)
        return paddle_out

    def forward(self):
        input = self.input('X', 0)
        grid = self.input('Grid', 0)
        if self.mode != 'bilinear' or self.padding_mode != 'zeros':
            raise Exception(
                "grid_sample only is supported with mode should be 'bilinear' and padding_mode should be 'zeros'"
            )
        res = self.paddle_bilinear_grid_sample(
            input, grid, align_corners=self.align_corners)
        return {'Output': [res]}


register_custom_paddle_op('grid_sampler', GridSampler)
Пример #6
0
            paddle.reshape(
                x_offset[:, :, :, :, s:s + self.kernel_size],
                (-1, self.in_channel, offset_h, offset_w * self.kernel_size))
            for s in range(0, self.N, self.kernel_size)
        ],
                                 axis=-1)
        x_offset = paddle.reshape(
            x_offset, (-1, self.in_channel, offset_h * self.kernel_size,
                       offset_w * self.kernel_size))
        return x_offset


@op_mapper('deformable_conv')
class Deformconv2d:
    @classmethod
    def opset_1(cls, graph, node, **kw):
        node = graph.make_node(
            'deformable_conv',
            inputs=node.input('Input') + node.input('Filter') +
            node.input('Mask') + node.input('Offset'),
            outputs=node.output('Output'),
            stride=node.attr('strides'),
            padding=node.attr('paddings'),
            groups=node.attr('groups'),
            dilation=node.attr('dilations'),
            deformable_groups=node.attr('deformable_groups'),
            domain='custom')


register_custom_paddle_op('deformable_conv', DeformConv2d)
Пример #7
0
        target_level = paddle.log(scale / self.refer_scale + 1e-06) / np.log(2)
        target_level = paddle.floor(self.refer_level + target_level)
        target_level = paddle.clip(target_level,
                                   min=self.min_level,
                                   max=self.max_level)

        rois = list()
        rois_idx_order = list()

        for level in range(self.min_level, self.max_level + 1):
            level_tensor = paddle.full_like(target_level, fill_value=level)
            res = paddle.equal(target_level, level_tensor)
            res = paddle.squeeze(res, axis=1)
            res = paddle.cast(res, dtype='int32')
            index = paddle.nonzero(res)
            roi = paddle.gather(fpn_rois, index, axis=0)
            rois.append(roi)
            rois_idx_order.append(index)
        rois_idx_order = paddle.concat(rois_idx_order, axis=0)
        size = paddle.shape(rois_idx_order)[0]
        _, rois_idx_restore = paddle.topk(rois_idx_order,
                                          axis=0,
                                          sorted=True,
                                          largest=False,
                                          k=size)
        #rois_idx_restore = paddle.cast(rois_idx_restore, dtype='int32')
        return {'MultiFpnRois': rois, 'RestoreIndex': [rois_idx_restore]}


register_custom_paddle_op('distribute_fpn_proposals', DistributeFpnProposals)
Пример #8
0
        im_info = paddle.reshape(im_info, shape=[3])
        h, w, s = paddle.tensor.split(im_info, axis=0, num_or_sections=3)
        tensor_one = paddle.full(shape=[1], dtype='float32', fill_value=1.0)
        tensor_zero = paddle.full(shape=[1], dtype='float32', fill_value=0.0)
        h = paddle.subtract(h, tensor_one)
        w = paddle.subtract(w, tensor_one)
        xmin, ymin, xmax, ymax = paddle.tensor.split(input,
                                                     axis=-1,
                                                     num_or_sections=4)
        xmin = paddle.maximum(paddle.minimum(xmin, w), tensor_zero)
        ymin = paddle.maximum(paddle.minimum(ymin, h), tensor_zero)
        xmax = paddle.maximum(paddle.minimum(xmax, w), tensor_zero)
        ymax = paddle.maximum(paddle.minimum(ymax, h), tensor_zero)
        cliped_box = paddle.concat([xmin, ymin, xmax, ymax], axis=-1)

        return {'Output': [cliped_box]}


@op_mapper('box_clip')
class Boxclip:
    @classmethod
    def opset_1(cls, graph, node, **kw):
        node = graph.make_node('box_clip',
                               inputs=node.input('Input') +
                               node.input('ImInfo'),
                               outputs=node.output('Output'),
                               domain='custom')


register_custom_paddle_op('box_clip', BoxClip)