Exemplo n.º 1
0
    def aug_test_rpn(self, feats, img_metas):
        """Test with augmentation for only for ``RPNHead`` and its variants,
        e.g., ``GARPNHead``, etc.

        Args:
            feats (tuple[Tensor]): Features from the upstream network, each is
                        a 4D-tensor.
            img_metas (list[dict]): Meta info of each image.

        Returns:
            list[Tensor]: Proposals of each image, each item has shape (n, 5),
                where 5 represent (tl_x, tl_y, br_x, br_y, score).
        """
        samples_per_gpu = len(img_metas[0])
        aug_proposals = [[] for _ in range(samples_per_gpu)]
        for x, img_meta in zip(feats, img_metas):
            proposal_list = self.simple_test_rpn(x, img_meta)
            for i, proposals in enumerate(proposal_list):
                aug_proposals[i].append(proposals)
        # reorganize the order of 'img_metas' to match the dimensions
        # of 'aug_proposals'
        aug_img_metas = []
        for i in range(samples_per_gpu):
            aug_img_meta = []
            for j in range(len(img_metas)):
                aug_img_meta.append(img_metas[j][i])
            aug_img_metas.append(aug_img_meta)
        # after merging, proposals will be rescaled to the original image size
        merged_proposals = [
            merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
            for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
        ]
        return merged_proposals
 def aug_test_rpn(self, feats, img_metas, rpn_test_cfg, sync_bg=False):
     imgs_per_gpu = len(img_metas[0])
     aug_proposals = [[] for _ in range(imgs_per_gpu)]
     bg_vectors = []
     for x, img_meta in zip(feats, img_metas):
         if sync_bg:
             proposal_list, bg_vector = self.simple_test_rpn(
                 x, img_meta, rpn_test_cfg)
             bg_vectors.append(bg_vector)
         else:
             proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
         for i, proposals in enumerate(proposal_list):
             aug_proposals[i].append(proposals)
     if sync_bg and bg_vectors:
         bg_vector_avg = torch.mean(torch.cat(bg_vectors, dim))
     # reorganize the order of 'img_metas' to match the dimensions
     # of 'aug_proposals'
     aug_img_metas = []
     for i in range(imgs_per_gpu):
         aug_img_meta = []
         for j in range(len(img_metas)):
             aug_img_meta.append(img_metas[j][i])
         aug_img_metas.append(aug_img_meta)
     # after merging, proposals will be rescaled to the original image size
     merged_proposals = [
         merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)
         for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
     ]
     if sync_bg:
         return merged_proposals, bg_vector_avg
     else:
         return merged_proposals
Exemplo n.º 3
0
 def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):
     imgs_per_gpu = len(img_metas[0])
     aug_proposals = [[] for _ in range(imgs_per_gpu)]
     for x, img_meta in zip(feats, img_metas):
         proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
         for i, proposals in enumerate(proposal_list):
             aug_proposals[i].append(proposals)
     # after merging, proposals will be rescaled to the original image size
     merged_proposals = [
         merge_aug_proposals(proposals, img_meta, rpn_test_cfg)
         for proposals, img_meta in zip(aug_proposals, img_metas)
     ]
     return merged_proposals
Exemplo n.º 4
0
 def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):
     imgs_per_gpu = len(img_metas[0])
     aug_proposals = [[] for _ in range(imgs_per_gpu)]
     for x, img_meta in zip(feats, img_metas):
         proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
         for i, proposals in enumerate(proposal_list):
             aug_proposals[i].append(proposals)
     # reorganize the order of 'img_metas' to match the dimensions
     # of 'aug_proposals'
     aug_img_metas = []
     for i in range(imgs_per_gpu):
         aug_img_meta = []
         for j in range(len(img_metas)):
             aug_img_meta.append(img_metas[j][i])
         aug_img_metas.append(aug_img_meta)
     # after merging, proposals will be rescaled to the original image size
     merged_proposals = [
         merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)
         for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
     ]
     return merged_proposals
Exemplo n.º 5
0
 def aug_test(self, imgs, img_metas, rescale=False):
     feats = self.extract_feats(imgs)
     imgs_per_gpu = len(img_metas[0])
     aug_proposals = [[] for _ in range(imgs_per_gpu)]
     for x, img_meta in zip(feats, img_metas):
         proposal_list = self.common_test(x, img_meta)
         for i, proposals in enumerate(proposal_list):
             aug_proposals[i].append(proposals)
     # after merging, proposals will be rescaled to the original image size
     proposal_list = [
         merge_aug_proposals(proposals, img_meta, self.test_cfg.rpn)
         for proposals, img_meta in zip(aug_proposals, img_metas)
     ]
     if not rescale:
         for proposals, img_meta in zip(proposal_list, img_metas[0]):
             img_shape = img_meta['img_shape']
             scale_factor = img_meta['scale_factor']
             flip = img_meta['flip']
             proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
                                             scale_factor, flip)
     # TODO: remove this restriction
     return proposal_list[0].cpu().numpy()
    def aug_test(self, imgs, img_metas, proposals=None, rescale=False):
        """Test with augmentations.

        If rescale is False, then returned bboxes and masks will fit the scale
        of imgs[0].
        """
        rpn_test_cfg = self.models[0].test_cfg.rpn
        imgs_per_gpu = len(img_metas[0])
        aug_proposals = [[] for _ in range(imgs_per_gpu)]
        for model in self.models:
            for x, img_meta in zip(model.extract_feats(imgs), img_metas):
                proposal_list = model.simple_test_rpn(x, img_meta,
                                                      rpn_test_cfg)
                for i, proposals in enumerate(proposal_list):
                    aug_proposals[i].append(proposals)

        # # after merging, proposals will be rescaled to the original image size
        proposal_list = [
            merge_aug_proposals(proposals, img_meta, rpn_test_cfg)
            for proposals, img_meta in zip(aug_proposals, img_metas)
        ]
        rcnn_test_cfg = self.models[0].test_cfg.rcnn
        aug_bboxes = []
        aug_scores = []
        aug_img_metas = []
        for model in self.models:
            for x, img_meta in zip(model.extract_feats(imgs), img_metas):
                # only one image in the batch
                img_shape = img_meta[0]['img_shape']
                scale_factor = img_meta[0]['scale_factor']
                flip = img_meta[0]['flip']

                proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
                                         scale_factor, flip)
                # "ms" in variable names means multi-stage
                ms_scores = []

                rois = bbox2roi([proposals])
                for i in range(model.num_stages):
                    bbox_roi_extractor = model.bbox_roi_extractor[i]
                    bbox_head = model.bbox_head[i]

                    bbox_feats = bbox_roi_extractor(
                        x[:len(bbox_roi_extractor.featmap_strides)], rois)
                    if model.with_shared_head:
                        bbox_feats = model.shared_head(bbox_feats)

                    cls_score, bbox_pred = bbox_head(bbox_feats)
                    ms_scores.append(cls_score)

                    if i < model.num_stages - 1:
                        bbox_label = cls_score.argmax(dim=1)
                        rois = bbox_head.regress_by_class(
                            rois, bbox_label, bbox_pred, img_meta[0])

                cls_score = sum(ms_scores) / float(len(ms_scores))
                bboxes, scores = model.bbox_head[-1].get_det_bboxes(
                    rois,
                    cls_score,
                    bbox_pred,
                    img_shape,
                    scale_factor,
                    rescale=False,
                    cfg=None)
                aug_bboxes.append(bboxes)
                aug_scores.append(scores)
                aug_img_metas.append(img_meta)

        # after merging, bboxes will be rescaled to the original image size
        merged_bboxes, merged_scores = merge_aug_bboxes(
            aug_bboxes, aug_scores, aug_img_metas, rcnn_test_cfg)
        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
                                                rcnn_test_cfg.score_thr,
                                                rcnn_test_cfg.nms,
                                                rcnn_test_cfg.max_per_img)

        bbox_result = bbox2result(det_bboxes, det_labels,
                                  self.models[0].bbox_head[-1].num_classes)

        if self.models[0].with_mask:
            raise NotImplementedError
        else:
            return bbox_result
    def aug_test(self, imgs, img_metas, **kwargs):
        """Test with augmentations.
        If rescale is False, then returned bboxes and masks will fit the scale
        of imgs[0].
        """
        rpn_test_cfg = self.models[0].test_cfg.rpn
        #print(rpn_test_cfg)
        imgs_per_gpu = len(img_metas[0])
        aug_proposals = [[] for _ in range(imgs_per_gpu)]
        for model in self.models:
            # recompute feats to save memory
            for x, img_meta in zip(model.extract_feats(imgs), img_metas):
                proposal_list = model.simple_test_rpn(x, img_meta,
                                                      rpn_test_cfg)
                for i, proposals in enumerate(proposal_list):
                    aug_proposals[i].append(proposals)
        # after merging, proposals will be rescaled to the original image size
        proposal_list = [
            merge_aug_proposals(proposals, img_meta, rpn_test_cfg)
            for proposals, img_meta in zip(aug_proposals, img_metas)
        ]

        rcnn_test_cfg = self.models[0].test_cfg.rcnn
        #print(rcnn_test_cfg)
        aug_bboxes = []
        aug_scores = []
        aug_img_metas = []
        for model in self.models:
            for x, img_meta in zip(model.extract_feats(imgs), img_metas):
                # only one image in the batch
                img_shape = img_meta[0]['img_shape']
                scale_factor = img_meta[0]['scale_factor']
                flip = img_meta[0]['flip']

                proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
                                         scale_factor, flip)
                # "ms" in variable names means multi-stage
                ms_scores = []

                rois = bbox2roi([proposals])
                for i in range(model.num_stages):
                    bbox_head = model.bbox_head[i]
                    cls_score, bbox_pred = model._bbox_forward_test(i, x, rois)
                    ms_scores.append(cls_score)

                    if i < model.num_stages - 1:
                        bbox_label = cls_score.argmax(dim=1)
                        rois = bbox_head.regress_by_class(
                            rois, bbox_label, bbox_pred, img_meta[0])

                cls_score = sum(ms_scores) / float(len(ms_scores))
                bboxes, scores = model.bbox_head[-1].get_det_bboxes(
                    rois,
                    cls_score,
                    bbox_pred,
                    img_shape,
                    scale_factor,
                    rescale=False,
                    cfg=None)
                aug_bboxes.append(bboxes)
                aug_scores.append(scores)
                aug_img_metas.append(img_meta)

        # after merging, bboxes will be rescaled to the original image size
        merged_bboxes, merged_scores = merge_aug_bboxes(
            aug_bboxes, aug_scores, aug_img_metas, rcnn_test_cfg)
        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
                                                rcnn_test_cfg.score_thr,
                                                rcnn_test_cfg.nms,
                                                rcnn_test_cfg.max_per_img)

        bbox_result = bbox2result(det_bboxes, det_labels,
                                  self.models[0].bbox_head[-1].num_classes)

        if self.models[0].with_mask:
            if det_bboxes.shape[0] == 0:
                segm_result = [
                    []
                    for _ in range(self.models[0].mask_head[-1].num_classes -
                                   1)
                ]
            else:
                aug_masks = []
                aug_img_metas = []
                for model in self.models:
                    for x, img_meta in zip(model.extract_feats(imgs),
                                           img_metas):
                        img_shape = img_meta[0]['img_shape']
                        scale_factor = img_meta[0]['scale_factor']
                        flip = img_meta[0]['flip']
                        _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
                                               scale_factor, flip)
                        mask_rois = bbox2roi([_bboxes])
                        mask_roi_extractor = model.mask_roi_extractor[-1]
                        mask_feats = mask_roi_extractor(
                            x[:len(mask_roi_extractor.featmap_strides)],
                            mask_rois)
                        if model.with_shared_head:
                            mask_feats = self.shared_head(mask_feats)
                        last_feat = None
                        for i in range(model.num_stages):
                            mask_head = model.mask_head[i]
                            mask_pred = mask_head(mask_feats)
                            aug_masks.append(mask_pred.sigmoid().cpu().numpy())
                            aug_img_metas.append(img_meta)
                merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
                                               rcnn_test_cfg)

                ori_shape = img_metas[0][0]['ori_shape']
                segm_result = self.models[0].mask_head[-1].get_seg_masks(
                    merged_masks,
                    det_bboxes,
                    det_labels,
                    rcnn_test_cfg,
                    ori_shape,
                    scale_factor=1.0,
                    rescale=False)
            return bbox_result, segm_result
        else:
            return bbox_result
Exemplo n.º 8
0
    def forward_feat_aug(self,
                         x=None,
                         img_meta=None,
                         proposals=None,
                         rescale=False):
        assert x is not None and img_meta is not None
        if isinstance(x, collections.Sequence):
            assert len(x) == len(img_meta)
            assert len(x[0]) == len(img_meta[0])
            assert isinstance(x[0][0], torch.Tensor)

        feats = []
        img_metas = []
        scales_num = len(x[0])
        for i in range(scales_num):
            feats.append([torch.cat([x_[i] for x_ in x], dim=0)])
            img_metas.append([meta[i] for meta in img_meta])
        # x=[[x_ for x_ in x_scales ]for x_scales in x]

        bbox_feats_all_scales = []
        for feat_ in feats:
            if self.feat_from_shared_head:
                bbox_feats_all = [self.shared_head(feat_[0])]
            else:
                bbox_feats_all = feat_
            bbox_feats_all_scales.append(bbox_feats_all)

        imgs_per_gpu = len(img_metas[0])
        # imgs_per_gpu = 1
        aug_proposals = [[] for _ in range(imgs_per_gpu)]
        for x, img_meta in zip(feats, img_metas):
            proposal_list = self.simple_test_rpn(x, img_meta,
                                                 self.test_cfg.rpn)
            for i, proposals in enumerate(proposal_list):
                aug_proposals[i].append(proposals)

        aug_img_metas = []
        for i in range(imgs_per_gpu):
            aug_img_meta = []
            for j in range(len(img_metas)):
                aug_img_meta.append(img_metas[j][i])
            aug_img_metas.append(aug_img_meta)

        merged_proposals = [
            merge_aug_proposals(proposals, aug_img_meta, self.test_cfg.rpn)
            for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
        ]

        proposal_list = merged_proposals

        # x=[torch.cat(tuple(x),dim=0)]

        # if self.feat_from_shared_head:
        #     bbox_feats_all = [self.shared_head(x[0])]
        # else:
        #     bbox_feats_all = x

        # proposal_list = self.simple_test_rpn(
        #     x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
        # assert isinstance(proposal_list, collections.Sequence)

        det_bboxes_collect, det_labels_collect = self.aug_test_bboxes(
            bbox_feats_all_scales, img_metas, proposal_list,
            self.test_cfg.rcnn)

        bbox_results_collect = []
        for det_bboxes, det_labels in zip(det_bboxes_collect,
                                          det_labels_collect):
            if rescale:
                _det_bboxes = det_bboxes
            else:
                _det_bboxes = det_bboxes.clone()
                _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']

            if hasattr(self.test_cfg.rcnn, 'nms'):
                bbox_results = bbox2result(_det_bboxes, det_labels,
                                           self.bbox_head.num_classes)
                bbox_results_collect.append(bbox_results)
            else:
                bbox_results_collect.append([_det_bboxes, det_labels])

        # det_bboxes always keep the original scale
        return bbox_results_collect