예제 #1
0
    def forward(self, anchors, objectness, box_regression, targets=None):
        """
        Arguments:
            anchors: list[list[BoxList]]
            objectness: list[tensor]
            box_regression: list[tensor]

        Returns:
            boxlists (list[BoxList]): the post-processed anchors, after
                applying box decoding and NMS
        """
        sampled_boxes = []
        num_levels = len(objectness)
        anchors = list(zip(*anchors))
        for a, o, b in zip(anchors, objectness, box_regression):
            sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))

        boxlists = list(zip(*sampled_boxes))
        boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]

        if num_levels > 1:
            boxlists = self.select_over_all_levels(boxlists)

        # append ground-truth bboxes to proposals
        if self.training and targets is not None:
            boxlists = self.add_gt_proposals(boxlists, targets)

        return boxlists
예제 #2
0
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)
        logits = boxlist.get_field("logits").reshape(-1, num_classes)  ## BoxList => boxlist
        features = boxlist.get_field("features")

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            features_j = features[inds]
            boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class.add_field("features", features_j)
            boxlist_for_class = boxlist_nms(
                boxlist_for_class, self.nms
            )
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
            )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
예제 #3
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            scores = boxlists[i].get_field("scores")
            labels = boxlists[i].get_field("labels")
            boxes = boxlists[i].bbox
            boxlist = boxlists[i]
            result = []
            # skip the background
            for j in range(1, self.num_classes):
                inds = (labels == j).nonzero().view(-1)

                scores_j = scores[inds]
                boxes_j = boxes[inds, :].view(-1, 4)
                boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
                boxlist_for_class.add_field("scores", scores_j)
                boxlist_for_class = boxlist_nms(
                    boxlist_for_class, self.nms_thresh,
                    score_field="scores"
                )
                num_labels = len(boxlist_for_class)
                boxlist_for_class.add_field(
                    "labels", torch.full((num_labels,), j,
                                         dtype=torch.int64,
                                         device=scores.device)
                )
                result.append(boxlist_for_class)

            result = cat_boxlist(result)
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1
                )
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
예제 #4
0
    def __call__(self, anchors, objectness, box_regression, targets):
        """
        Arguments:
            anchors (list[BoxList])
            objectness (list[Tensor])
            box_regression (list[Tensor])
            targets (list[BoxList])

        Returns:
            objectness_loss (Tensor)
            box_loss (Tensor
        """
        anchors = [
            cat_boxlist(anchors_per_image) for anchors_per_image in anchors
        ]
        labels, regression_targets = self.prepare_targets(anchors, targets)
        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
        sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds,
                                                   dim=0)).squeeze(1)
        sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds,
                                                   dim=0)).squeeze(1)

        sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)

        objectness, box_regression = \
                concat_box_prediction_layers(objectness, box_regression)

        objectness = objectness.squeeze()

        labels = torch.cat(labels, dim=0)
        regression_targets = torch.cat(regression_targets, dim=0)

        box_loss = smooth_l1_loss(
            box_regression[sampled_pos_inds],
            regression_targets[sampled_pos_inds],
            beta=1.0 / 9,
            size_average=False,
        ) / (sampled_inds.numel())

        objectness_loss = F.binary_cross_entropy_with_logits(
            objectness[sampled_inds], labels[sampled_inds])

        return objectness_loss, box_loss
예제 #5
0
    def __call__(self, anchors, box_cls, box_regression, targets):
        """
        Arguments:
            anchors (list[BoxList])
            box_cls (list[Tensor])
            box_regression (list[Tensor])
            targets (list[BoxList])

        Returns:
            retinanet_cls_loss (Tensor)
            retinanet_regression_loss (Tensor
        """
        anchors = [
            cat_boxlist(anchors_per_image) for anchors_per_image in anchors
        ]
        labels, regression_targets = self.prepare_targets(anchors, targets)

        N = len(labels)
        box_cls, box_regression = \
            concat_box_prediction_layers(box_cls, box_regression)

        labels = torch.cat(labels, dim=0)
        regression_targets = torch.cat(regression_targets, dim=0)
        pos_inds = torch.nonzero(labels > 0).squeeze(1)

        retinanet_regression_loss = smooth_l1_loss(
            box_regression[pos_inds],
            regression_targets[pos_inds],
            beta=self.bbox_reg_beta,
            size_average=False,
        ) / (max(1,
                 pos_inds.numel() * self.regress_norm))

        labels = labels.int()

        retinanet_cls_loss = self.box_cls_loss_func(
            box_cls, labels) / (pos_inds.numel() + N)

        return retinanet_cls_loss, retinanet_regression_loss
예제 #6
0
    def add_gt_proposals(self, proposals, targets):
        """
        Arguments:
            proposals: list[BoxList]
            targets: list[BoxList]
        """
        # Get the device we're operating on
        device = proposals[0].bbox.device

        gt_boxes = [target.copy_with_fields([]) for target in targets]

        # later cat of bbox requires all fields to be present for all bbox
        # so we need to add a dummy for objectness that's missing
        for gt_box in gt_boxes:
            gt_box.add_field("objectness",
                             torch.ones(len(gt_box), device=device))

        proposals = [
            cat_boxlist((proposal, gt_box))
            for proposal, gt_box in zip(proposals, gt_boxes)
        ]

        return proposals
예제 #7
0
    def forward(self, features, proposals, targets=None):
        """
        Arguments:
            features (list[Tensor]): feature-maps from possibly several levels
            proposals (list[BoxList]): proposal boxes
            targets (list[BoxList], optional): the ground-truth targets.

        Returns:
            x (Tensor): the result of the feature extractor
            proposals (list[BoxList]): during training, the subsampled proposals
                are returned. During testing, the predicted boxlists are returned
            losses (dict[Tensor]): During training, returns the losses for the
                head. During testing, returns an empty dict.
        """

        if self.training and self.use_gt_boxes:
            # augment proposals with ground-truth boxes
            targets_cp = [target.copy_with_fields(target.fields()) for target in targets]

            with torch.no_grad():
                x = self.box_feature_extractor(features, targets_cp)
                class_logits, box_regression = self.box_predictor(x)

            boxes_per_image = [len(proposal) for proposal in targets_cp]
            target_features = x.split(boxes_per_image, dim=0)
            for proposal, target_feature in zip(targets_cp, target_features):
                proposal.add_field("features", self.box_avgpool(target_feature))
            proposals_gt = self.box_post_processor((class_logits, box_regression), targets_cp, skip_nms=True)
            proposals = [cat_boxlist([proposal, proposal_gt]) for (proposal, proposal_gt) in zip(proposals, proposals_gt)]

        if self.training:
            # Faster R-CNN subsamples during training the proposals with a fixed
            # positive / negative ratio
            if self.cfg.MODEL.USE_RELPN:
                proposal_pairs, loss_relpn = self.relpn(proposals, targets)
            else:
                proposal_pairs = self.loss_evaluator.subsample(proposals, targets)
        else:
            with torch.no_grad():
                if self.cfg.MODEL.USE_RELPN:
                    proposal_pairs, relnesses = self.relpn(proposals)
                else:
                    proposal_pairs = self.loss_evaluator.subsample(proposals)

        if self.cfg.MODEL.USE_FREQ_PRIOR:
            """
            if use frequency prior, we directly use the statistics
            """
            x = None
            obj_class_logits = None
            _, obj_labels, im_inds = _get_tensor_from_boxlist(proposals, 'labels')
            _, proposal_idx_pairs, im_inds_pairs = _get_tensor_from_boxlist(proposal_pairs, 'idx_pairs')
            rel_inds = _get_rel_inds(im_inds, im_inds_pairs, proposal_idx_pairs)
            pred_class_logits = self.freq_bias.index_with_labels(
                torch.stack((obj_labels[rel_inds[:, 0]],obj_labels[rel_inds[:, 1]],), 1))
        else:
            # extract features that will be fed to the final classifier. The
            # feature_extractor generally corresponds to the pooler + heads

            x, obj_class_logits, pred_class_logits, obj_class_labels, rel_inds = \
                self.rel_predictor(features, proposals, proposal_pairs)
            # TODO(cjrd) each image hits this state -- we'll extract the features somewhere in here
            # import ipdb; ipdb.set_trace()
            if self.use_bias:
                pred_class_logits = pred_class_logits + self.freq_bias.index_with_labels(
                    torch.stack((
                        obj_class_labels[rel_inds[:, 0]],
                        obj_class_labels[rel_inds[:, 1]],
                    ), 1))

        if not self.training:
            result = self.post_processor((pred_class_logits), proposal_pairs, use_freq_prior=self.cfg.MODEL.USE_FREQ_PRIOR)
            
            # ---------------------------------- COLO ADD - extract features for downstream applications (from grcnn.py) ------------------#
            if self.cfg.TEST.SAVE_INTERMEDIATE_FEATURES:
                try:
                    obj_feats, pred_feats = self.rel_predictor.get_transformed_features(features, proposals, proposal_pairs)
                    top_idxs = pred_class_logits.max(1)[0]
                    if top_idxs.numel() >= self.cfg.TEST.INTERMEDIATE_FEATURES_TOPK_RELS:
                        top_rels_idx = top_idxs.topk(self.cfg.TEST.INTERMEDIATE_FEATURES_TOPK_RELS)[1]
                        top_rel_feats = pred_feats[0][top_rels_idx,:]
                        result[0].add_field("top_rel_feats", top_rel_feats)
                except Exception as e:
                    print("Error determining intermediate features: {}".format(e))

            #------------------------------------------------------------------------------------------------------------------------------#

            # TODO investiate this model structure
            # import ipdb; ipdb.set_trace()

            # if self.cfg.MODEL.USE_RELPN:
            #     for res, relness in zip(result, relnesses):
            #         res.add_field("scores", res.get_field("scores") * relness.view(-1, 1))

            return x, result, {}

        loss_obj_classifier = 0
        if obj_class_logits is not None:
            loss_obj_classifier = self.loss_evaluator.obj_classification_loss(proposals, [obj_class_logits])

        if self.cfg.MODEL.USE_RELPN:
            idx = obj_class_labels[rel_inds[:, 0]] * 151 + obj_class_labels[rel_inds[:, 1]]
            freq_prior = self.freq_dist.view(-1, 51)[idx].cuda()
            loss_pred_classifier = self.relpn.pred_classification_loss([pred_class_logits], freq_prior=freq_prior)
            return (
                x,
                proposal_pairs,
                dict(loss_obj_classifier=loss_obj_classifier,
                     loss_relpn = loss_relpn,
                     loss_pred_classifier=loss_pred_classifier),
            )
        else:
            loss_pred_classifier = self.loss_evaluator([pred_class_logits])
            return (
                x,
                proposal_pairs,
                dict(loss_obj_classifier=loss_obj_classifier,
                     loss_pred_classifier=loss_pred_classifier),
            )
    def forward(self, features, proposals, targets=None):
        """
        Arguments:
            features (list[Tensor]): feature-maps from possibly several levels
            proposals (list[BoxList]): proposal boxes
            targets (list[BoxList], optional): the ground-truth targets.

        Returns:
            x (Tensor): the result of the feature extractor
            proposals (list[BoxList]): during training, the subsampled proposals
                are returned. During testing, the predicted boxlists are returned
            losses (dict[Tensor]): During training, returns the losses for the
                head. During testing, returns an empty dict.
        """

        if self.training and self.use_gt_boxes:
            # augment proposals with ground-truth boxes
            targets_cp = [
                target.copy_with_fields(target.fields()) for target in targets
            ]

            with torch.no_grad():
                x = self.box_feature_extractor(features, targets_cp)
                class_logits, box_regression = self.box_predictor(x)

            boxes_per_image = [len(proposal) for proposal in targets_cp]
            target_features = x.split(boxes_per_image, dim=0)
            for proposal, target_feature in zip(targets_cp, target_features):
                proposal.add_field("features",
                                   self.box_avgpool(target_feature))
            proposals_gt = self.box_post_processor(
                (class_logits, box_regression), targets_cp, skip_nms=True)
            proposals = [
                cat_boxlist([proposal, proposal_gt])
                for (proposal, proposal_gt) in zip(proposals, proposals_gt)
            ]

        if self.training:
            # Faster R-CNN subsamples during training the proposals with a fixed
            # positive / negative ratio
            if self.cfg.MODEL.USE_RELPN:
                proposal_pairs, loss_relpn = self.relpn(proposals, targets)
            else:
                proposal_pairs = self.loss_evaluator.subsample(
                    proposals, targets)
        else:
            with torch.no_grad():
                if self.cfg.MODEL.USE_RELPN:
                    proposal_pairs, relnesses = self.relpn(proposals)
                else:
                    proposal_pairs = self.loss_evaluator.subsample(proposals)

        if self.cfg.MODEL.USE_FREQ_PRIOR:
            """
            if use frequency prior, we directly use the statistics
            """
            x = None
            obj_class_logits = None
            _, obj_labels, im_inds = _get_tensor_from_boxlist(
                proposals, 'labels')
            _, proposal_idx_pairs, im_inds_pairs = _get_tensor_from_boxlist(
                proposal_pairs, 'idx_pairs')
            rel_inds = _get_rel_inds(im_inds, im_inds_pairs,
                                     proposal_idx_pairs)
            pred_class_logits = self.freq_bias.index_with_labels(
                torch.stack((
                    obj_labels[rel_inds[:, 0]],
                    obj_labels[rel_inds[:, 1]],
                ), 1))
        else:
            # extract features that will be fed to the final classifier. The
            # feature_extractor generally corresponds to the pooler + heads

            x, obj_class_logits, pred_class_logits, obj_class_labels, rel_inds = \
                self.rel_predictor(features, proposals, proposal_pairs)

            if self.use_bias:
                pred_class_logits = pred_class_logits + self.freq_bias.index_with_labels(
                    torch.stack((
                        obj_class_labels[rel_inds[:, 0]],
                        obj_class_labels[rel_inds[:, 1]],
                    ), 1))

        if not self.training:
            # NOTE: if we have updated object class logits, then we need to update proposals as well!!!
            # if obj_class_logits is not None:
            #     boxes_per_image = [len(proposal) for proposal in proposals]
            #     obj_logits = obj_class_logits
            #     obj_scores, obj_labels = obj_class_logits[:, 1:].max(1)
            #     obj_labels = obj_labels + 1
            #     obj_logits = obj_logits.split(boxes_per_image, dim=0)
            #     obj_scores = obj_scores.split(boxes_per_image, dim=0)
            #     obj_labels = obj_labels.split(boxes_per_image, dim=0)
            #     for proposal, obj_logit, obj_score, obj_label in \
            #         zip(proposals, obj_logits, obj_scores, obj_labels):
            #         proposal.add_field("logits", obj_logit)
            #         proposal.add_field("scores", obj_score)
            #         proposal.add_field("labels", obj_label)
            result = self.post_processor(
                (pred_class_logits),
                proposal_pairs,
                use_freq_prior=self.cfg.MODEL.USE_FREQ_PRIOR)

            # if self.cfg.MODEL.USE_RELPN:
            #     for res, relness in zip(result, relnesses):
            #         res.add_field("scores", res.get_field("scores") * relness.view(-1, 1))

            return x, result, {}

        loss_obj_classifier = 0
        if obj_class_logits is not None:
            loss_obj_classifier = self.loss_evaluator.obj_classification_loss(
                proposals, [obj_class_logits])

        if self.cfg.MODEL.USE_RELPN:
            idx = obj_class_labels[rel_inds[:, 0]] * 151 + obj_class_labels[
                rel_inds[:, 1]]
            freq_prior = self.freq_dist.view(-1, 51)[idx].cuda()
            loss_pred_classifier = self.relpn.pred_classification_loss(
                [pred_class_logits], freq_prior=freq_prior)
            return (
                x,
                proposal_pairs,
                dict(loss_obj_classifier=loss_obj_classifier,
                     loss_relpn=loss_relpn,
                     loss_pred_classifier=loss_pred_classifier),
            )
        else:
            loss_pred_classifier = self.loss_evaluator([pred_class_logits])
            return (
                x,
                proposal_pairs,
                dict(loss_obj_classifier=loss_obj_classifier,
                     loss_pred_classifier=loss_pred_classifier),
            )