Exemple #1
0
def mask_target_opr(proposals, targets, high_threshold, low_threshold,
                    discretization_size):
    """
    Generate proposal targets for computing loss.

    Args:
        proposals: (list[BoxList])
        targets: (list[BoxList])
        high_threshold: (float)
        low_threshold: (float)
        discretization_size: (int)
    """

    matcher = Matcher(high_threshold,
                      low_threshold,
                      allow_low_quality_matches=False)

    # prepare targets
    labels = []
    masks = []
    for proposals_per_image, targets_per_image in zip(proposals, targets):
        # match targets to proposals
        match_quality_matrix = boxlist_iou(targets_per_image,
                                           proposals_per_image)
        matched_idxs = matcher(match_quality_matrix)
        # Mask RCNN needs "labels" and "masks "fields for creating the targets
        target = targets_per_image.copy_with_fields(["labels", "masks"])
        # get the targets corresponding GT for each proposal
        # NB: need to clamp the indices because we can have a single
        # GT in the image, and matched_idxs can be -2, which goes
        # out of bounds
        matched_targets = target[matched_idxs.clamp(min=0)]
        matched_targets.add_field("matched_idxs", matched_idxs)

        matched_idxs = matched_targets.get_field("matched_idxs")
        labels_per_image = matched_targets.get_field("labels")
        labels_per_image = labels_per_image.to(dtype=torch.int64)

        # this can probably be removed, but is left here for clarity
        # and completeness
        neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
        labels_per_image[neg_inds] = 0

        # mask scores are only computed on positive samples
        positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)

        segmentation_masks = matched_targets.get_field("masks")
        segmentation_masks = segmentation_masks[positive_inds]

        positive_proposals = proposals_per_image[positive_inds]

        masks_per_image = project_masks_on_boxes(segmentation_masks,
                                                 positive_proposals,
                                                 discretization_size)

        labels.append(labels_per_image)
        masks.append(masks_per_image)

    return labels, masks
def anchor_target_opr(
        anchors, targets, box_coder, high_threshold, low_threshold,
        allow_low_quality_matches=True):
    """
    Generate anchor targets for computing retinanet loss.

    Args:
        anchors: (list[BoxList])
        targets: (list[BoxList])
        box_coder: (BoxCoder)
        high_threshold: (float)
        low_threshold: (float)
    """
    matcher = Matcher(high_threshold, low_threshold,
                      allow_low_quality_matches=allow_low_quality_matches)

    # prepare targets
    labels = []
    regression_targets = []
    for anchors_per_image, targets_per_image in zip(anchors, targets):
        # match targets to anchors
        match_quality_matrix = boxlist_iou(targets_per_image, anchors_per_image)
        matched_idxs = matcher(match_quality_matrix)
        targets_per_image = targets_per_image.copy_with_fields(['labels'])
        matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
        matched_targets.add_field("matched_idxs", matched_idxs)

        matched_idxs = matched_targets.get_field("matched_idxs")
        # generate rpn labels
        labels_per_image = matched_targets.get_field("labels")
        labels_per_image = labels_per_image.to(dtype=torch.float32)

        # Background (negative examples)
        bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
        labels_per_image[bg_indices] = 0

        # discard indices that are between thresholds
        inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
        labels_per_image[inds_to_discard] = -1

        # compute regression targets
        regression_targets_per_image = box_coder.encode(
            matched_targets.bbox, anchors_per_image.bbox
        )

        labels.append(labels_per_image)
        regression_targets.append(regression_targets_per_image)

    return labels, regression_targets
Exemple #3
0
def evaluate_box_proposals(
    predictions, dataset, thresholds=None, area="all", limit=None
):
    """Evaluate detection proposal recall metrics. This function is a much
    faster alternative to the official COCO API recall evaluation code. However,
    it produces slightly different results.
    """
    # Record max overlap value for each gt box
    # Return vector of overlap values
    areas = {
        "all": 0,
        "small": 1,
        "medium": 2,
        "large": 3,
        "96-128": 4,
        "128-256": 5,
        "256-512": 6,
        "512-inf": 7,
    }
    area_ranges = [
        [0 ** 2, 1e5 ** 2],  # all
        [0 ** 2, 32 ** 2],  # small
        [32 ** 2, 96 ** 2],  # medium
        [96 ** 2, 1e5 ** 2],  # large
        [96 ** 2, 128 ** 2],  # 96-128
        [128 ** 2, 256 ** 2],  # 128-256
        [256 ** 2, 512 ** 2],  # 256-512
        [512 ** 2, 1e5 ** 2],
    ]  # 512-inf
    assert area in areas, "Unknown area range: {}".format(area)
    area_range = area_ranges[areas[area]]
    gt_overlaps = []
    num_pos = 0

    for image_id, prediction in enumerate(predictions):
        original_id = dataset.id_to_img_map[image_id]

        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction = prediction.resize((image_width, image_height))

        # sort predictions in descending order
        # TODO maybe remove this and make it explicit in the documentation
        inds = prediction.get_field("objectness").sort(descending=True)[1]
        prediction = prediction[inds]

        ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
        anno = dataset.coco.loadAnns(ann_ids)
        gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
        gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4)  # guard against no boxes
        gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
            "xyxy"
        )
        gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])

        if len(gt_boxes) == 0:
            continue

        valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
        gt_boxes = gt_boxes[valid_gt_inds]

        num_pos += len(gt_boxes)

        if len(gt_boxes) == 0:
            continue

        if len(prediction) == 0:
            continue

        if limit is not None and len(prediction) > limit:
            prediction = prediction[:limit]

        overlaps = boxlist_iou(prediction, gt_boxes)

        _gt_overlaps = torch.zeros(len(gt_boxes))
        for j in range(min(len(prediction), len(gt_boxes))):
            # find which proposal box maximally covers each gt box
            # and get the iou amount of coverage for each gt box
            max_overlaps, argmax_overlaps = overlaps.max(dim=0)

            # find which gt box is 'best' covered (i.e. 'best' = most iou)
            gt_ovr, gt_ind = max_overlaps.max(dim=0)
            assert gt_ovr >= 0
            # find the proposal box that covers the best covered gt box
            box_ind = argmax_overlaps[gt_ind]
            # record the iou coverage of this gt box
            _gt_overlaps[j] = overlaps[box_ind, gt_ind]
            assert _gt_overlaps[j] == gt_ovr
            # mark the proposal box and the gt box as used
            overlaps[box_ind, :] = -1
            overlaps[:, gt_ind] = -1

        # append recorded iou coverage level
        gt_overlaps.append(_gt_overlaps)
    gt_overlaps = torch.cat(gt_overlaps, dim=0)
    gt_overlaps, _ = torch.sort(gt_overlaps)

    if thresholds is None:
        step = 0.05
        thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
    recalls = torch.zeros_like(thresholds)
    # compute recall for each iou threshold
    for i, t in enumerate(thresholds):
        recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
    # ar = 2 * np.trapz(recalls, thresholds)
    ar = recalls.mean()
    return {
        "ar": ar,
        "recalls": recalls,
        "thresholds": thresholds,
        "gt_overlaps": gt_overlaps,
        "num_pos": num_pos,
    }
def proposal_target_opr(
        proposals, targets, box_coder, high_threshold, low_threshold,
        batch_size_per_image, positive_fraction, return_ious=False,
        return_sample_id=False, return_raw_proposals=False):
    """
    Generate proposal targets for computing loss.

    Args:
        proposals: (list[BoxList])
        targets: (list[BoxList])
        box_coder: (BoxCoder)
        high_threshold: (float)
        low_threshold: (float)
        batch_size_per_image: (int)
        positive_fraction: (float)
        return_ious: (bool)
    """

    matcher = Matcher(high_threshold, low_threshold,
                      allow_low_quality_matches=False)
    fg_bg_sampler = BalancedPositiveNegativeSampler(
        batch_size_per_image, positive_fraction)

    # prepare targets
    labels = []
    regression_targets = []
    ious = []
    for proposals_per_image, targets_per_image in zip(proposals, targets):
        # match targets to proposals
        match_quality_matrix = boxlist_iou(
            targets_per_image, proposals_per_image)
        matched_idxs = matcher(match_quality_matrix)
        # Fast RCNN only need "labels" field for selecting the targets
        target = targets_per_image.copy_with_fields("labels")
        # get the targets corresponding GT for each proposal
        # NB: need to clamp the indices because we can have a single
        # GT in the image, and matched_idxs can be -2, which goes
        # out of bounds
        matched_targets = target[matched_idxs.clamp(min=0)]
        matched_ious = match_quality_matrix.t()[
            range(proposals_per_image.bbox.shape[0]), matched_idxs.clamp(min=0)]
        matched_targets.add_field("matched_idxs", matched_idxs)

        matched_idxs = matched_targets.get_field("matched_idxs")
        labels_per_image = matched_targets.get_field("labels")
        labels_per_image = labels_per_image.to(dtype=torch.int64)

        # Label background (below the low threshold)
        bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
        labels_per_image[bg_inds] = 0

        # Label ignore proposals (between low and high thresholds)
        ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
        labels_per_image[ignore_inds] = -1  # -1 is ignored by sampler

        # compute regression targets
        regression_targets_per_image = box_coder.encode(
            matched_targets.bbox, proposals_per_image.bbox
        )

        labels.append(labels_per_image)
        regression_targets.append(regression_targets_per_image)
        ious.append(matched_ious)

    sampled_pos_inds, sampled_neg_inds = fg_bg_sampler(labels)
    proposals = list(proposals)
    # add corresponding label and regression_targets information to the bounding boxes
    for labels_per_image, regression_targets_per_image, ious_per_image, \
        proposals_per_image in zip(labels, regression_targets, ious, proposals):
        proposals_per_image.add_field("labels", labels_per_image)
        proposals_per_image.add_field(
            "regression_targets", regression_targets_per_image
        )
        if return_ious:
            proposals_per_image.add_field("ious", ious_per_image)

    if return_sample_id:
        sample_id = []
    if return_raw_proposals:
        raw_proposals = proposals.copy()
    # distributed sampled proposals, that were obtained on all feature maps
    # concatenated via the fg_bg_sampler, into individual feature map levels
    for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
            zip(sampled_pos_inds, sampled_neg_inds)
    ):
        img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
        proposals_per_image = proposals[img_idx][img_sampled_inds]
        proposals[img_idx] = proposals_per_image

        if return_sample_id:
            sample_id.append(img_sampled_inds)

    if return_sample_id:
        if return_raw_proposals:
            return proposals, sample_id, raw_proposals
        else:
            return proposals, sample_id
    else:
        if return_raw_proposals:
            return proposals, raw_proposals
        else:
            return proposals
Exemple #5
0
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
    """Calculate precision and recall based on evaluation code of PASCAL VOC.
    This function calculates precision and recall of
    predicted bounding boxes obtained from a dataset which has :math:`N`
    images.
    The code is based on the evaluation code used in PASCAL VOC Challenge.
   """
    n_pos = defaultdict(int)
    score = defaultdict(list)
    match = defaultdict(list)
    for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
        pred_bbox = pred_boxlist.bbox.numpy()
        pred_label = pred_boxlist.get_field("labels").numpy()
        pred_score = pred_boxlist.get_field("scores").numpy()
        gt_bbox = gt_boxlist.bbox.numpy()
        gt_label = gt_boxlist.get_field("labels").numpy()
        gt_difficult = gt_boxlist.get_field("difficult").numpy()

        for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
            pred_mask_l = pred_label == l
            pred_bbox_l = pred_bbox[pred_mask_l]
            pred_score_l = pred_score[pred_mask_l]
            # sort by score
            order = pred_score_l.argsort()[::-1]
            pred_bbox_l = pred_bbox_l[order]
            pred_score_l = pred_score_l[order]

            gt_mask_l = gt_label == l
            gt_bbox_l = gt_bbox[gt_mask_l]
            gt_difficult_l = gt_difficult[gt_mask_l]

            n_pos[l] += np.logical_not(gt_difficult_l).sum()
            score[l].extend(pred_score_l)

            if len(pred_bbox_l) == 0:
                continue
            if len(gt_bbox_l) == 0:
                match[l].extend((0, ) * pred_bbox_l.shape[0])
                continue

            # VOC evaluation follows integer typed bounding boxes.
            pred_bbox_l = pred_bbox_l.copy()
            pred_bbox_l[:, 2:] += 1
            gt_bbox_l = gt_bbox_l.copy()
            gt_bbox_l[:, 2:] += 1
            iou = boxlist_iou(
                BoxList(pred_bbox_l, gt_boxlist.size),
                BoxList(gt_bbox_l, gt_boxlist.size),
            ).numpy()
            gt_index = iou.argmax(axis=1)
            # set -1 if there is no matching ground truth
            gt_index[iou.max(axis=1) < iou_thresh] = -1
            del iou

            selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
            for gt_idx in gt_index:
                if gt_idx >= 0:
                    if gt_difficult_l[gt_idx]:
                        match[l].append(-1)
                    else:
                        if not selec[gt_idx]:
                            match[l].append(1)
                        else:
                            match[l].append(0)
                    selec[gt_idx] = True
                else:
                    match[l].append(0)

    n_fg_class = max(n_pos.keys()) + 1
    prec = [None] * n_fg_class
    rec = [None] * n_fg_class

    for l in n_pos.keys():
        score_l = np.array(score[l])
        match_l = np.array(match[l], dtype=np.int8)

        order = score_l.argsort()[::-1]
        match_l = match_l[order]

        tp = np.cumsum(match_l == 1)
        fp = np.cumsum(match_l == 0)

        # If an element of fp + tp is 0,
        # the corresponding element of prec[l] is nan.
        prec[l] = tp / (fp + tp)
        # If n_pos[l] is 0, rec[l] is None.
        if n_pos[l] > 0:
            rec[l] = tp / n_pos[l]

    return prec, rec
def anchor_target_opr(
        anchors, targets, box_coder, high_threshold, low_threshold,
        batch_size_per_image, positive_fraction):
    """
    Generate anchor targets for computing loss.

    Args:
        anchors: (list[BoxList])
        targets: (list[BoxList])
        box_coder: (BoxCoder)
        high_threshold: (float)
        low_threshold: (float)
        batch_size_per_image: (int)
        positive_fraction: (float)
    """
    matcher = Matcher(
        high_threshold, low_threshold, allow_low_quality_matches=True)
    fg_bg_sampler = BalancedPositiveNegativeSampler(
        batch_size_per_image, positive_fraction)

    anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
    # prepare targets
    labels = []
    regression_targets = []
    for anchors_per_image, targets_per_image in zip(anchors, targets):
        # match targets to anchors
        match_quality_matrix = boxlist_iou(targets_per_image, anchors_per_image)
        matched_idxs = matcher(match_quality_matrix)
        targets_per_image = targets_per_image.copy_with_fields([])
        matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
        matched_targets.add_field("matched_idxs", matched_idxs)

        matched_idxs = matched_targets.get_field("matched_idxs")
        # generate rpn labels
        labels_per_image = matched_idxs >= 0
        labels_per_image = labels_per_image.to(dtype=torch.float32)

        # Background (negative examples)
        bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
        labels_per_image[bg_indices] = 0

        # discard anchors that go out of the boundaries of the image
        labels_per_image[~anchors_per_image.get_field("visibility")] = -1

        # discard indices that are between thresholds
        inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
        labels_per_image[inds_to_discard] = -1

        # compute regression targets
        regression_targets_per_image = box_coder.encode(
            matched_targets.bbox, anchors_per_image.bbox
        )

        labels.append(labels_per_image)
        regression_targets.append(regression_targets_per_image)

    sampled_pos_inds, sampled_neg_inds = fg_bg_sampler(labels)
    sampled_pos_inds = torch.nonzero(
        torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
    sampled_neg_inds = torch.nonzero(
        torch.cat(sampled_neg_inds, dim=0)).squeeze(1)

    sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)

    labels = torch.cat(labels, dim=0)
    regression_targets = torch.cat(regression_targets, dim=0)

    return labels, regression_targets, sampled_inds, sampled_pos_inds