Beispiel #1
0
    def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
                              rpn_bbox_offsets):

        n = rpn_bbox_offsets.shape[0]
        res = []
        for i in range(n):

            gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
            offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
            m = offsets.shape[0]
            an, ac = all_anchors.shape[0], all_anchors.shape[1]
            anchors = F.broadcast_to(F.expand_dims(all_anchors, 1),
                                     (an, 1, ac)).reshape(-1, ac)
            dtboxes = bbox_transform_inv_opr(anchors[:, :4], offsets[:, :4])
            overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
            ignore_mask = 1 - F.equal(
                gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
            ignore_mask = F.expand_dims(ignore_mask, axis=0)
            overlaps = overlaps * ignore_mask

            index = F.argmax(overlaps, axis=1)
            value = F.nn.indexing_one_hot(overlaps, index, 1)
            value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
            res.append(value)

        result = F.concat(res, 0)
        return result
def restore_bbox(rois, deltas, unnormalize=True):
    if unnormalize:
        std_opr = mge.tensor(config.bbox_normalize_stds[None, :])
        mean_opr = mge.tensor(config.bbox_normalize_means[None, :])
        deltas = deltas * std_opr
        deltas = deltas + mean_opr
    pred_bbox = bbox_transform_inv_opr(rois, deltas)
    return pred_bbox
Beispiel #3
0
    def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list,
                         rpn_iou_list):

        assert rpn_cls_list[0].shape[0] == 1
        all_anchors = F.concat(anchors_list, axis=0)
        rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
        rpn_bbox_offsets_final = F.concat(rpn_bbox_list, axis=1)[0]
        rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]

        rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
        rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
        rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)

        n, c = all_anchors.shape[0], all_anchors.shape[1]
        anchors = F.broadcast_to(F.expand_dims(all_anchors, 1),
                                 (n, 1, c)).reshape(-1, c)
        rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
        pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
        return pred_boxes
Beispiel #4
0
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
                           all_anchors_list, im_info):
    prev_nms_top_n = config.train_prev_nms_top_n \
        if is_train else config.test_prev_nms_top_n
    post_nms_top_n = config.train_post_nms_top_n \
        if is_train else config.test_post_nms_top_n
    batch_per_gpu = config.batch_per_gpu if is_train else 1
    nms_threshold = config.rpn_nms_threshold
    box_min_size = config.rpn_min_box_size
    bbox_normalize_targets = config.rpn_bbox_normalize_targets
    bbox_normalize_means = config.bbox_normalize_means
    bbox_normalize_stds = config.bbox_normalize_stds

    list_size = len(rpn_bbox_offsets_list)

    return_rois, return_probs = [], []
    batch_per_gpu = rpn_cls_prob_list[0].shape[0]
    for bid in range(batch_per_gpu):
        batch_proposals_list = []
        batch_probs_list = []
        for l in range(list_size):
            # get proposals and probs
            offsets = rpn_bbox_offsets_list[l][bid] \
                .transpose(1, 2, 0).reshape(-1, 4)
            if bbox_normalize_targets:
                std_opr = tensor(config.bbox_normalize_stds[None, :])
                mean_opr = tensor(config.bbox_normalize_means[None, :])
                pred_offsets = pred_offsets * std_opr
                pred_offsets = pred_offsets + mean_opr
            all_anchors = all_anchors_list[l]

            proposals = bbox_transform_inv_opr(all_anchors, offsets)
            if config.anchor_within_border:
                proposals = clip_boxes_opr(proposals, im_info[bid, :])
            probs = rpn_cls_prob_list[l][bid] \
                    .transpose(1,2,0).reshape(-1, 2)
            probs = F.softmax(probs)[:, 1]
            # gather the proposals and probs
            batch_proposals_list.append(proposals)
            batch_probs_list.append(probs)

        batch_proposals = F.concat(batch_proposals_list, axis=0)
        batch_probs = F.concat(batch_probs_list, axis=0)
        # filter the boxes with small size.
        wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1
        thresh = box_min_size * im_info[bid, 2]
        keep_mask = F.prod((wh >= thresh), axis=1)
        keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
        keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask)

        inds = inds.astype(np.int32)
        # batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0)
        # batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0)
        batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds]

        # prev_nms_top_n
        num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0])
        idx = F.argsort(batch_probs, descending=True)
        topk_idx = idx[:num_proposals].reshape(-1)
        batch_proposals = batch_proposals[topk_idx].detach()
        batch_probs = batch_probs[topk_idx].detach()

        # For each image, run a total-level NMS, and choose topk results.
        keep_inds = nms(batch_proposals,
                        batch_probs,
                        nms_threshold,
                        max_output=2000)
        # num = F.minimum(post_nms_top_n, keep_inds.shape[0])
        # keep_inds = keep_inds[:num]

        batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[
            keep_inds]

        # cons the rois
        batch_inds = F.ones((batch_rois.shape[0], 1)) * bid
        batch_rois = F.concat([batch_inds, batch_rois[:, :4]], axis=1)
        return_rois.append(batch_rois)
        return_probs.append(batch_probs)

    if batch_per_gpu == 1:
        return batch_rois, batch_probs
    else:
        concated_rois = F.concat(return_rois, axis=0)
        concated_probs = F.concat(return_probs, axis=0)
        return concated_rois, concated_probs
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
                           all_anchors_list, im_info):
    prev_nms_top_n = config.train_prev_nms_top_n \
        if is_train else config.test_prev_nms_top_n
    post_nms_top_n = config.train_post_nms_top_n \
        if is_train else config.test_post_nms_top_n
    batch_per_gpu = config.batch_per_gpu if is_train else 1
    nms_threshold = config.rpn_nms_threshold
    box_min_size = config.rpn_min_box_size
    bbox_normalize_targets = config.rpn_bbox_normalize_targets
    bbox_normalize_means = config.bbox_normalize_means
    bbox_normalize_stds = config.bbox_normalize_stds

    list_size = len(rpn_bbox_offsets_list)

    return_rois = []
    return_probs = []
    for bid in range(batch_per_gpu):
        batch_proposals_list = []
        batch_probs_list = []
        for l in range(list_size):
            # get proposals and probs
            offsets = rpn_bbox_offsets_list[l][bid] \
                .dimshuffle(1, 2, 0).reshape(-1, 4)
            if bbox_normalize_targets:
                std_opr = tensor(config.bbox_normalize_stds[None, :])
                mean_opr = tensor(config.bbox_normalize_means[None, :])
                pred_offsets = pred_offsets * std_opr
                pred_offsets = pred_offsets + mean_opr
            all_anchors = all_anchors_list[l]
            proposals = bbox_transform_inv_opr(all_anchors, offsets)
            if config.anchor_within_border:
                proposals = clip_boxes_opr(proposals, im_info[bid, :])
            probs = rpn_cls_prob_list[l][bid] \
                    .dimshuffle(1,2,0).reshape(-1, 2)
            probs = F.softmax(probs)[:, 1]
            # gather the proposals and probs
            batch_proposals_list.append(proposals)
            batch_probs_list.append(probs)
        batch_proposals = F.concat(batch_proposals_list, axis=0)
        batch_probs = F.concat(batch_probs_list, axis=0)
        # filter the zero boxes.
        batch_keep_mask = filter_boxes_opr(batch_proposals,
                                           box_min_size * im_info[bid, 2])
        batch_probs = batch_probs * batch_keep_mask
        # prev_nms_top_n
        num_proposals = F.minimum(prev_nms_top_n, batch_probs.shapeof()[0])
        batch_probs, idx = F.argsort(batch_probs, descending=True)
        batch_probs = batch_probs[:num_proposals].reshape(-1, 1)
        topk_idx = idx[:num_proposals].reshape(-1)
        batch_proposals = batch_proposals.ai[topk_idx]
        batch_rois = F.concat([batch_proposals, batch_probs], axis=1)
        # For each image, run a total-level NMS, and choose topk results.
        keep_inds = gpu_nms(batch_rois, nms_threshold, post_nms_top_n)
        batch_rois = batch_rois.ai[keep_inds]
        batch_probs = batch_rois[:, -1]
        # cons the rois
        batch_inds = mge.ones((batch_rois.shapeof()[0], 1)) * bid
        batch_rois = F.concat([batch_inds, batch_rois[:, :-1]], axis=1)
        return_rois.append(batch_rois)
        return_probs.append(batch_probs)

    if batch_per_gpu == 1:
        return batch_rois, batch_probs
    else:
        concated_rois = F.concat(return_rois, axis=0)
        concated_probs = F.concat(return_probs, axis=0)
        return concated_rois, concated_probs
Beispiel #6
0
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
        all_anchors_list, im_info):
    prev_nms_top_n = config.train_prev_nms_top_n \
        if is_train else config.test_prev_nms_top_n
    post_nms_top_n = config.train_post_nms_top_n \
        if is_train else config.test_post_nms_top_n
    batch_per_gpu = config.train_batch_per_gpu if is_train else 1
    nms_threshold = config.rpn_nms_threshold
    box_min_size = config.rpn_min_box_size
    bbox_normalize_targets = config.rpn_bbox_normalize_targets
    bbox_normalize_means = config.bbox_normalize_means
    bbox_normalize_stds = config.bbox_normalize_stds

    list_size = len(rpn_bbox_offsets_list)

    return_rois = []
    return_inds = []
    for bid in range(batch_per_gpu):
        batch_proposals_list = []
        batch_probs_list = []
        if list_size == 1:
            pass
        else:
            for l in range(list_size):
                # get proposals and probs
                offsets = rpn_bbox_offsets_list[l][bid] \
                    .permute(1, 2, 0).reshape(-1, 4)
                if bbox_normalize_targets:
                    std_opr = torch.tensor(config.bbox_normalize_stds[None, :]
                        ).type_as(bbox_targets)
                    mean_opr = torch.tensor(config.bbox_normalize_means[None, :]
                        ).type_as(bbox_targets)
                    pred_offsets = pred_offsets * std_opr
                    pred_offsets = pred_offsets + mean_opr
                all_anchors = all_anchors_list[l]
                proposals = bbox_transform_inv_opr(all_anchors, offsets)
                if config.anchor_within_border:
                    proposals = clip_boxes_opr(proposals, im_info[bid, :])
                probs = rpn_cls_prob_list[l][bid] \
                        .permute(1,2,0).flatten()
                # gather the proposals and probs
                batch_proposals_list.append(proposals)
                batch_probs_list.append(probs)
        batch_proposals = torch.cat(batch_proposals_list, dim=0)
        batch_probs = torch.cat(batch_probs_list, dim=0)
        batch_keep_index = filter_boxes_opr(
                batch_proposals, box_min_size * im_info[bid, 2])
        batch_proposals = batch_proposals[batch_keep_index]
        batch_probs = batch_probs[batch_keep_index]
        num_proposals = min(prev_nms_top_n, batch_probs.shape[0])
        batch_probs, idx = batch_probs.sort(descending=True)
        batch_probs = batch_probs[:num_proposals]
        topk_idx = idx[:num_proposals].flatten()
        batch_proposals = batch_proposals[topk_idx]
        # For each image, run a total-level NMS, and choose topk results.
        keep = nms(batch_proposals, batch_probs, nms_threshold)
        batch_proposals = batch_proposals[keep]
        batch_probs = batch_probs[keep]
        num_proposals = min(post_nms_top_n, batch_probs.shape[0])
        batch_proposals = batch_proposals[:num_proposals]
        batch_probs = batch_probs[:num_proposals]

        batch_inds = torch.ones(batch_proposals.shape[0], 1
                                ).type_as(batch_proposals) * bid
        batch_rois = torch.cat([batch_inds, batch_proposals], axis=1).detach()
        return_rois.append(batch_rois)
        return_inds.append(batch_rois.shape[0])

    if batch_per_gpu == 1:
        return batch_rois, [batch_rois.shape[0]]
    else:
        concated_rois = torch.cat(return_rois, axis=0)
        import numpy as np
        return_inds = np.cumsum(return_inds)
        return concated_rois, return_inds