Example #1
0
def im_detect_bbox_aug(model, images, device):
    # Collect detections computed under different transformations
    boxlists_ts = []
    for _ in range(len(images)):
        boxlists_ts.append([])

    def add_preds_t(boxlists_t):
        for i, boxlist_t in enumerate(boxlists_t):
            if len(boxlists_ts[i]) == 0:
                # The first one is identity transform, no need to resize the boxlist
                boxlists_ts[i].append(boxlist_t)
            else:
                # Resize the boxlist as the first one
                boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))

    # Compute detections for the original image (identity transform)
    boxlists_i = im_detect_bbox(
        model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device
    )
    add_preds_t(boxlists_i)

    # Perform detection on the horizontally flipped image
    if cfg.TEST.BBOX_AUG.H_FLIP:
        boxlists_hf = im_detect_bbox_hflip(
            model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device
        )
        add_preds_t(boxlists_hf)

    # Compute detections at different scales
    for scale in cfg.TEST.BBOX_AUG.SCALES:
        max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
        boxlists_scl = im_detect_bbox_scale(
            model, images, scale, max_size, device
        )
        add_preds_t(boxlists_scl)

        if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
            boxlists_scl_hf = im_detect_bbox_scale(
                model, images, scale, max_size, device, hflip=True
            )
            add_preds_t(boxlists_scl_hf)

    # Merge boxlists detected by different bbox aug params
    boxlists = []
    for i, boxlist_ts in enumerate(boxlists_ts):
        bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])
        scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
        boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
        boxlist.add_field('scores', scores)
        boxlists.append(boxlist)

    # Apply NMS and limit the final detections
    results = []
    post_processor = make_roi_box_post_processor(cfg)
    for boxlist in boxlists:
        results.append(post_processor.filter_results(boxlist, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES))

    return results
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = objectness.device
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        batch_idx = torch.arange(N, device=device)[:, None]
        box_regression = box_regression[batch_idx, topk_idx]

        image_shapes = [box.size for box in anchors]
        concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        proposals = self.box_coder.decode(box_regression.view(-1, 4),
                                          concat_anchors.view(-1, 4))

        proposals = proposals.view(N, -1, 4)

        result = []
        for proposal, score, im_shape in zip(proposals, objectness,
                                             image_shapes):
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
Example #3
0
    def get_groundtruth(self, index):
        width, height = self.im_sizes[index, :]
        # get object bounding boxes, labels and relations

        obj_boxes = self.gt_boxes[index].copy()
        obj_labels = self.gt_classes[index].copy()
        obj_relation_triplets = self.relationships[index].copy()

        if self.filter_duplicate_rels:
            # Filter out dupes!
            assert self.split == 'train'
            old_size = obj_relation_triplets.shape[0]
            all_rel_sets = defaultdict(list)
            for (o0, o1, r) in obj_relation_triplets:
                all_rel_sets[(o0, o1)].append(r)
            obj_relation_triplets = [(k[0], k[1], np.random.choice(v))
                                     for k, v in all_rel_sets.items()]
            obj_relation_triplets = np.array(obj_relation_triplets)

        obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0]))

        for i in range(obj_relation_triplets.shape[0]):
            subj_id = obj_relation_triplets[i][0]
            obj_id = obj_relation_triplets[i][1]
            pred = obj_relation_triplets[i][2]
            obj_relations[subj_id, obj_id] = pred

        target = BoxList(obj_boxes, (width, height), mode="xyxy")
        target.add_field("labels", torch.from_numpy(obj_labels))
        target.add_field("pred_labels", torch.from_numpy(obj_relations))
        target.add_field("relation_labels",
                         torch.from_numpy(obj_relation_triplets))
        target.add_field("difficult",
                         torch.from_numpy(obj_labels).clone().fill_(0))
        return target
 def prepare_boxlist(self, boxes, scores, image_shape):
     """
     Returns BoxList from `boxes` and adds probability scores information
     as an extra field
     `boxes` has shape (#detections, 4 * #classes), where each row represents
     a list of predicted bounding boxes for each of the object classes in the
     dataset (including the background class). The detections in each row
     originate from the same object proposal.
     `scores` has shape (#detection, #classes), where each row represents a list
     of object detection confidence scores for each of the object classes in the
     dataset (including the background class). `scores[i, j]`` corresponds to the
     box at `boxes[i, j * 4:(j + 1) * 4]`.
     """
     boxes = boxes.reshape(-1, 4)
     scores = scores.reshape(-1)
     boxlist = BoxList(boxes, image_shape, mode="xyxy")
     boxlist.add_field("scores", scores)
     return boxlist
Example #5
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            scores = boxlists[i].get_field("scores")
            labels = boxlists[i].get_field("labels")
            boxes = boxlists[i].bbox
            boxlist = boxlists[i]
            result = []
            # skip the background
            for j in range(1, self.num_classes):
                inds = (labels == j).nonzero().view(-1)

                scores_j = scores[inds]
                boxes_j = boxes[inds, :].view(-1, 4)
                boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
                boxlist_for_class.add_field("scores", scores_j)
                boxlist_for_class = boxlist_nms(
                    boxlist_for_class, self.nms_thresh,
                    score_field="scores"
                )
                num_labels = len(boxlist_for_class)
                boxlist_for_class.add_field(
                    "labels", torch.full((num_labels,), j,
                                         dtype=torch.int64,
                                         device=scores.device)
                )
                result.append(boxlist_for_class)

            result = cat_boxlist(result)
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1
                )
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4: (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(
                boxlist_for_class, self.nms
            )
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
            )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
Example #7
0
    def forward_for_single_feature_map(
            self, anchors, box_cls, box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = box_cls.device
        N, _, H, W = box_cls.shape
        A = box_regression.size(1) // 4
        C = box_cls.size(1) // A

        # put in the same format as anchors
        box_cls = permute_and_flatten(box_cls, N, A, C, H, W)
        box_cls = box_cls.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
        box_regression = box_regression.reshape(N, -1, 4)

        num_anchors = A * H * W

        candidate_inds = box_cls > self.pre_nms_thresh

        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)

        results = []
        for per_box_cls, per_box_regression, per_pre_nms_top_n, \
            per_candidate_inds, per_anchors in zip(
            box_cls,
            box_regression,
            pre_nms_top_n,
            candidate_inds,
            anchors):
            # Sort and select TopN
            # TODO most of this can be made out of the loop for
            # all images.
            # TODO:Yang: Not easy to do. Because the numbers of detections are
            # different in each image. Therefore, this part needs to be done
            # per image.
            per_box_cls = per_box_cls[per_candidate_inds]

            per_box_cls, top_k_indices = \
                per_box_cls.topk(per_pre_nms_top_n, sorted=False)

            per_candidate_nonzeros = \
                per_candidate_inds.nonzero()[top_k_indices, :]

            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            per_class += 1

            detections = self.box_coder.decode(
                per_box_regression[per_box_loc, :].view(-1, 4),
                per_anchors.bbox[per_box_loc, :].view(-1, 4)
            )

            boxlist = BoxList(detections, per_anchors.size, mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", per_box_cls)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results
    def forward(self, x, boxes, skip_nms=False):
        """
        Arguments:
            x (tuple[tensor, tensor]): x contains the class logits
                and the box_regression from the model.
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra fields labels and scores
        """
        class_logit, box_regression = x
        class_prob = F.softmax(class_logit, -1)

        # TODO think about a representation of batch of boxes
        image_shapes = [box.size for box in boxes]
        boxes_per_image = [len(box) for box in boxes]
        features = [box.get_field("features") for box in boxes]
        concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)

        if not skip_nms:
            if self.cls_agnostic_bbox_reg:
                box_regression = box_regression[:, -4:]
            proposals = self.box_coder.decode(
                box_regression.view(sum(boxes_per_image), -1), concat_boxes)
            if self.cls_agnostic_bbox_reg:
                proposals = proposals.repeat(1, class_prob.shape[1])
            proposals = proposals.split(boxes_per_image, dim=0)
        else:
            proposals = concat_boxes.split(boxes_per_image, dim=0)

        num_classes = class_prob.shape[1]
        class_prob = class_prob.split(boxes_per_image, dim=0)
        class_logit = class_logit.split(boxes_per_image, dim=0)

        results = []
        idx = 0
        for prob, logit, boxes_per_img, features_per_img, image_shape in zip(
                class_prob, class_logit, proposals, features, image_shapes):
            if not self.bbox_aug_enabled and not skip_nms:  # If bbox aug is enabled, we will do it later
                boxlist = self.prepare_boxlist(boxes_per_img, features_per_img,
                                               prob, logit, image_shape)
                boxlist = boxlist.clip_to_image(remove_empty=False)
                if not self.relation_on:
                    boxlist_filtered = self.filter_results(
                        boxlist, num_classes)
                else:
                    # boxlist_pre = self.filter_results(boxlist, num_classes)
                    boxlist_filtered = self.filter_results_nm(
                        boxlist, num_classes)

                    # to enforce minimum number of detections per image
                    # we will do a binary search on the confidence threshold
                    score_thresh = 0.05
                    while len(boxlist_filtered) < self.min_detections_per_img:
                        score_thresh /= 2.0
                        print(("\nNumber of proposals {} is too small, "
                               "retrying filter_results with score thresh"
                               " = {}").format(len(boxlist_filtered),
                                               score_thresh))
                        boxlist_filtered = self.filter_results_nm(
                            boxlist, num_classes, thresh=score_thresh)
            else:
                boxlist = BoxList(boxes_per_img, image_shape, mode="xyxy")
                boxlist.add_field("scores", prob[:, 1:].max(1)[0])
                boxlist.add_field("logits", logit)
                boxlist.add_field("features", features_per_img)
                boxlist.add_field("labels", boxes[idx].get_field("labels"))
                boxlist.add_field("regression_targets",
                                  boxes[idx].bbox.clone().fill_(0.0))
                boxlist_filtered = boxlist
                idx += 1

            if len(boxlist) == 0:
                raise ValueError("boxlist shoud not be empty!")

            results.append(boxlist_filtered)
        return results
    def filter_results_nm(self, boxlist, num_classes, thresh=0.05):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS). Similar to Neural-Motif Network
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)
        logits = boxlist.get_field("logits").reshape(-1, num_classes)
        features = boxlist.get_field("features")

        valid_cls = (scores[:, 1:].max(0)[0] > thresh).nonzero() + 1

        nms_mask = scores.clone()
        nms_mask.zero_()

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in valid_cls.view(-1).cpu():
            scores_j = scores[:, j]
            boxes_j = boxes[:, j * 4:(j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class.add_field(
                "idxs",
                torch.arange(0, scores.shape[0]).long())
            # boxlist_for_class = boxlist_nms(
            #     boxlist_for_class, self.nms
            # )
            boxlist_for_class = boxlist_nms(boxlist_for_class, 0.3)
            nms_mask[:, j][boxlist_for_class.get_field("idxs")] = 1

            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            result.append(boxlist_for_class)

        dists_all = nms_mask * scores

        # filter duplicate boxes
        scores_pre, labels_pre = dists_all.max(1)
        inds_all = scores_pre.nonzero()
        assert inds_all.dim() != 0
        inds_all = inds_all.squeeze(1)

        labels_all = labels_pre[inds_all]
        scores_all = scores_pre[inds_all]
        features_all = features[inds_all]
        logits_all = logits[inds_all]

        box_inds_all = inds_all * scores.shape[1] + labels_all
        result = BoxList(boxlist.bbox.view(-1, 4)[box_inds_all],
                         boxlist.size,
                         mode="xyxy")
        result.add_field("labels", labels_all)
        result.add_field("scores", scores_all)
        result.add_field("logits", logits_all)
        result.add_field("features", features_all)
        number_of_detections = len(result)

        vs, idx = torch.sort(scores_all, dim=0, descending=True)
        idx = idx[vs > thresh]
        if self.detections_per_img < idx.size(0):
            idx = idx[:self.detections_per_img]
        result = result[idx]
        return result
Example #10
0
        showarray(v.get_image()[:, :, ::-1], save_dir + "pred_%i.jpg" % i)
        #print('instances:\n', instances)
        #print()
        #print('boxes:\n', instances.pred_boxes)
        #print()
        #print('Shape of features:\n', features.shape)

        #pred_class_logits, pred_attr_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(features)
        #pred_class_probs = torch.nn.functional.softmax(pred_class_logits, -1)[:, :-1]
        #max_probs, max_classes = pred_class_probs.max(-1)
        #print("%d objects are different, it is because the classes-aware NMS process" % (NUM_OBJECTS - torch.eq(instances.pred_classes, max_classes).sum().item()))
        #print("The total difference of score is %0.4f" % (instances.scores - max_probs).abs().sum().item())

        boxes = instances.pred_boxes.tensor
        boxlist = BoxList(boxes.cpu(), (image_w, image_h), mode="xyxy")
        boxlist.add_field("scores", instances.scores.cpu())
        boxlist.add_field("labels", instances.pred_classes.cpu())
        boxlist.add_field("attr_logits", instances.attr_logits.cpu())
        boxlist.add_field("cls_logits", instances.cls_logits.cpu())
        data.append(boxlist)
        #new_entry["boxes"] = instances.pred_boxes.tensor.cpu().numpy()
        #new_entry["box_scores"] = instances.scores.cpu().numpy()
        #new_entry["pred_classes"] = instances.pred_classes.cpu().numpy()
        #new_entry["attr_logits"] = instances.attr_logits.cpu().numpy()
        #new_entry["cls_logits"] = instances.cls_logits.cpu().numpy()
        #np_dict = np.array(list(new_entry.items()))
        #np.savetxt("./data/%i.npy" % i, np_dict)
        #dd.io.save("./data/%i.h5" % i, new_entry, compression="default")
        #data.append(new_entry)
        #with open("./data/%i.json" % i, "w") as f: