예제 #1
0
 def test_pairwise_ioa(self):
     boxes1, boxes2 = self.create_boxes()
     expected_ioas = torch.tensor(
         [[1.0, 1.0, 1.0, 1.0, 1.0, 0.25], [1.0, 1.0, 1.0, 1.0, 1.0, 0.25]]
     )
     ioas = pairwise_ioa(Boxes(boxes1), Boxes(boxes2))
     self.assertTrue(torch.allclose(ioas, expected_ioas))
예제 #2
0
    def _filterHalfMan(self, instances):
        ioaMx = pairwise_ioa(instances.boxes_before,
                             instances.boxes_before).numpy()
        np.fill_diagonal(ioaMx, 0)
        smallBoxIdx = np.max(ioaMx, axis=0)
        smallBoxIdx = np.where(smallBoxIdx > 0.6)[0]

        # A logika az az, hogy ha a kis BBox részhalmaza egy másik BBoxnak ÉS cellahatáron van akkor az tuti hogy félembert jelent
        # Ezt úgy ellenőrzöm, hogy ha a kis BBox középpontja legalább 2 cellában benne van akkor félembert jelez
        # Mivel a cellákat úgy alakítottam ki, hogy a metszetükben egy egész ember tuti elférjen még
        print(smallBoxIdx)
        idxToDrop = []
        for i in smallBoxIdx:
            xTL, yTL, xBR, yBR = instances.boxes_before.tensor[i].numpy()
            xCenter, yCenter = (xTL + xBR) / 2, (yTL + yBR) / 2
            numOfCells = np.sum((self.gridList[:, 0] <= xCenter)
                                & (self.gridList[:, 1] <= yCenter)
                                & (self.gridList[:, 2] >= xCenter)
                                & (self.gridList[:, 3] >= yCenter))
            if numOfCells > 1:
                idxToDrop.append(i)
        idxToDrop = [
            False if idx in idxToDrop else True
            for idx in range(len(instances))
        ]

        return instances[idxToDrop]
예제 #3
0
def patch(outputs):
    boxes = [Boxes(x.reshape(1, 4)) for x in outputs['instances'].pred_boxes]
    if len(boxes) == 0:
        return 0, None
    scores = outputs['instances'].scores

    l = len(boxes)
    i = 0
    j = 0
    while i < l:
        while j < l:
            if i != j:
                if pairwise_ioa(boxes[i], boxes[j]) > 0.9:
                    print()
                    print(boxes[i], boxes[j])
                    boxes[i] = join_boxes(boxes[i], boxes[j])
                    print(boxes[i])
                    boxes.pop(j)
                    scores = torch.cat([scores[:j], scores[j + 1:]])
                    l -= 1
                else:
                    j += 1
            else:
                j += 1
        i += 1

    minX = min([x.tensor[0][0] for x in boxes])
    minY = min([x.tensor[0][1] for x in boxes])
    maxX = max([x.tensor[0][2] for x in boxes])
    maxY = max([x.tensor[0][3] for x in boxes])

    outputs['instances'] = Instances(
        outputs['instances'].image_size,
        pred_boxes=Boxes(torch.Tensor([minX, minY, maxX, maxY]).reshape(1, 4)),
        scores=torch.Tensor([sum(scores) / len(scores)]),
        pred_classes=torch.tensor([0], dtype=torch.int32))

    return len(boxes), [float(minX), float(minY), float(maxX), float(maxY)]
    def label_and_sample_proposals(
            self, proposals: List[Instances],
            targets: List[Instances]) -> List[Instances]:
        """
        Prepare some proposals to be used to train the ROI heads.
        It performs box matching between `proposals` and `targets`, and assigns
        training labels to the proposals.
        It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
        boxes, with a fraction of positives that is no larger than
        ``self.positive_sample_fraction``.

        Args:
            See :meth:`ROIHeads.forward`

        Returns:
            list[Instances]:
                length `N` list of `Instances`s containing the proposals
                sampled for training. Each `Instances` has the following fields:

                - proposal_boxes: the proposal boxes
                - gt_boxes: the ground-truth box that the proposal is assigned to
                  (this is only meaningful if the proposal has a label > 0; if label = 0
                  then the ground-truth box is random)

                Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
        """
        # Augment proposals with ground-truth boxes.
        # In the case of learned proposals (e.g., RPN), when training starts
        # the proposals will be low quality due to random initialization.
        # It's possible that none of these initial
        # proposals have high enough overlap with the gt objects to be used
        # as positive examples for the second stage components (box head,
        # cls head, mask head). Adding the gt boxes to the set of proposals
        # ensures that the second stage components will have some positive
        # examples from the start of training. For RPN, this augmentation improves
        # convergence and empirically improves box AP on COCO by about 0.5
        # points (under one tested configuration).
        if self.proposal_append_gt:
            # non_ignore_gt_boxes = [x.gt_boxes[x.gt_classes != -1] for x in targets]
            gt_boxes = [x.gt_boxes for x in targets]
            proposals = add_ground_truth_to_proposals(gt_boxes, proposals)

        proposals_with_gt = []

        num_fg_samples = []
        num_bg_samples = []
        for proposals_per_image, targets_per_image in zip(proposals, targets):
            has_gt = len(targets_per_image) > 0

            if self.ignore_ioa:
                gt_boxes = targets_per_image.gt_boxes
                match_quality_matrix = pairwise_iou(
                    gt_boxes, proposals_per_image.proposal_boxes)
                match_quality_matrix_t = match_quality_matrix.transpose(1, 0)
                ignore_match_quality_matrix_t = pairwise_ioa(
                    gt_boxes,
                    proposals_per_image.proposal_boxes).transpose(1, 0)

                gt_ignore_mask = targets_per_image.gt_classes.eq(-1).repeat(
                    ignore_match_quality_matrix_t.shape[0], 1)
                match_quality_matrix_t *= ~gt_ignore_mask
                ignore_match_quality_matrix_t *= gt_ignore_mask

                matched_idxs, matched_labels = self.proposal_matcher(
                    match_quality_matrix_t,
                    ignore_match_quality_matrix_t,
                    targets_per_image.gt_classes,
                )
            elif False:
                gt_boxes = targets_per_image.gt_boxes[
                    targets_per_image.gt_classes != -1]
                ignore_gt_boxes = targets_per_image.gt_boxes[
                    targets_per_image.gt_classes == -1]
                match_quality_matrix = pairwise_iou(
                    gt_boxes, proposals_per_image.proposal_boxes)
                matched_idxs, matched_labels = self.proposal_matcher(
                    match_quality_matrix)
                if len(ignore_gt_boxes) > 0:
                    ignore_overlaps = pairwise_ioa(
                        ignore_gt_boxes, proposals_per_image.proposal_boxes)
                    ignore_overlaps_vals, _ = ignore_overlaps.max(dim=0)
                    iou_vals, _ = match_quality_matrix.max(dim=0)
                    matched_labels[(matched_labels == 0)
                                   & (ignore_overlaps_vals > iou_vals)
                                   & (ignore_overlaps_vals >
                                      self.iou_thresholds[0])] = -1
                    matched_labels[(matched_labels != 1)
                                   & (ignore_overlaps_vals > iou_vals)
                                   & (ignore_overlaps_vals <
                                      self.iou_thresholds[0])] = 0
                gt_targets = targets_per_image.gt_classes != -1
                targets_per_image = targets_per_image[gt_targets]
            else:
                match_quality_matrix = pairwise_iou(
                    targets_per_image.gt_boxes,
                    proposals_per_image.proposal_boxes)
                matched_idxs, matched_labels = self.proposal_matcher(
                    match_quality_matrix)
            sampled_idxs, gt_classes = self._sample_proposals(
                matched_idxs, matched_labels, targets_per_image.gt_classes)

            # Set target attributes of the sampled proposals:
            proposals_per_image = proposals_per_image[sampled_idxs]
            proposals_per_image.gt_classes = gt_classes

            # We index all the attributes of targets that start with "gt_"
            # and have not been added to proposals yet (="gt_classes").
            if has_gt:
                sampled_targets = matched_idxs[sampled_idxs]
                # NOTE: here the indexing waste some compute, because heads
                # like masks, keypoints, etc, will filter the proposals again,
                # (by foreground/background, or number of keypoints in the image, etc)
                # so we essentially index the data twice.
                for (trg_name,
                     trg_value) in targets_per_image.get_fields().items():
                    if trg_name.startswith(
                            "gt_") and not proposals_per_image.has(trg_name):
                        proposals_per_image.set(trg_name,
                                                trg_value[sampled_targets])
            else:
                gt_boxes = Boxes(
                    targets_per_image.gt_boxes.tensor.new_zeros(
                        (len(sampled_idxs), 4)))
                proposals_per_image.gt_boxes = gt_boxes

            num_bg_samples.append(
                (gt_classes == self.num_classes).sum().item())
            num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])

            # overlap box select
            matched_vals, sorted_idx = match_quality_matrix.sort(
                0, descending=True)
            if matched_vals.size(0) > 1:
                overlap_iou = matched_vals[1, :]
                overlap_gt_idx = sorted_idx[1, :]
            else:
                overlap_iou = matched_vals.new_zeros(matched_vals.size(1))
                overlap_gt_idx = sorted_idx[0, :]

            selected_overlap_iou = overlap_iou[sampled_idxs]
            selected_overlap_gt_idx = overlap_gt_idx[sampled_idxs]
            selected_overlap_gt_boxes = targets_per_image.gt_boxes[
                selected_overlap_gt_idx]

            proposals_per_image.overlap_iou = selected_overlap_iou
            proposals_per_image.overlap_gt_boxes = selected_overlap_gt_boxes

            proposals_with_gt.append(proposals_per_image)

        # Log the number of fg/bg samples that are selected for training ROI heads
        storage = get_event_storage()
        storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
        storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))

        return proposals_with_gt