def forward(self, x, boxes): """ Arguments: x (Tensor): the mask logits boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra field mask """ mask_prob = x.sigmoid() # select masks coresponding to the predicted classes num_masks = x.shape[0] labels = [bbox.get_field("labels") for bbox in boxes] labels = torch.cat(labels) index = torch.arange(num_masks, device=labels.device) mask_prob = mask_prob[index, labels][:, None] boxes_per_image = [len(box) for box in boxes] mask_prob = mask_prob.split(boxes_per_image, dim=0) if self.masker: mask_prob = self.masker(mask_prob, boxes) results = [] for prob, box in zip(mask_prob, boxes): bbox = BoxList(box.bbox, box.size, mode="xyxy") for field in box.fields(): bbox.add_field(field, box.get_field(field)) bbox.add_field("mask", prob) results.append(bbox) return results
def __call__(self, proposals, source_score, labels, device, return_targets=False): gt_boxes = torch.zeros((0, 4), dtype=torch.float, device=device) gt_classes = torch.zeros((0, 1), dtype=torch.long, device=device) gt_scores = torch.zeros((0, 1), dtype=torch.float, device=device) # not using the background class _prob = source_score[:, 1:].clone() _labels = labels[1:] positive_classes = _labels.eq(1).nonzero(as_tuple=False)[:, 0] for c in positive_classes: cls_prob = _prob[:, c] max_index = torch.argmax(cls_prob) gt_boxes = torch.cat( (gt_boxes, proposals.bbox[max_index].view(1, -1)), dim=0) gt_classes = torch.cat((gt_classes, c.add(1).view(1, 1)), dim=0) gt_scores = torch.cat((gt_scores, cls_prob[max_index].view(1, 1)), dim=0) _prob[max_index].fill_(0) if return_targets == True: gt_boxes = BoxList(gt_boxes, proposals.size, mode=proposals.mode) gt_boxes.add_field('labels', gt_classes[:, 0].float()) # gt_boxes.add_field('difficult', bb) return gt_boxes if gt_boxes.shape[0] == 0: num_rois = len(source_score) pseudo_labels = torch.zeros(num_rois, dtype=torch.long, device=device) loss_weights = torch.zeros(num_rois, dtype=torch.float, device=device) else: gt_boxes = BoxList(gt_boxes, proposals.size, mode=proposals.mode) overlaps = boxlist_iou(proposals, gt_boxes) max_overlaps, gt_assignment = overlaps.max(dim=1) pseudo_labels = gt_classes[gt_assignment, 0] loss_weights = gt_scores[gt_assignment, 0] # Select background RoIs as those with <= FG_IOU_THRESHOLD bg_inds = max_overlaps.le( cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD).nonzero( as_tuple=False)[:, 0] pseudo_labels[bg_inds] = 0 # PCL_TRICK: # ignore_thres = 0.1 # ignore_inds = max_overlaps.le(ignore_thres).nonzero(as_tuple=False)[:,0] # loss_weights[ignore_inds] = 0 return pseudo_labels, loss_weights
def get_groundtruth(self, index): img_id = self.ids[index] anno = ET.parse(self._annopath % img_id).getroot() anno = self._preprocess_annotation(anno) height, width = anno["im_info"] target = BoxList(anno["boxes"], (width, height), mode="xyxy") target.add_field("labels", anno["labels"]) target.add_field("difficult", anno["difficult"]) return target
def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1) objectness = objectness.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode( box_regression.view(-1, 4), concat_anchors.view(-1, 4) ) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result
def prepare_boxlist(self, boxes, scores, image_shape): """ Returns BoxList from `boxes` and adds probability scores information as an extra field `boxes` has shape (#detections, 4 * #classes), where each row represents a list of predicted bounding boxes for each of the object classes in the dataset (including the background class). The detections in each row originate from the same object proposal. `scores` has shape (#detection, #classes), where each row represents a list of object detection confidence scores for each of the object classes in the dataset (including the background class). `scores[i, j]`` corresponds to the box at `boxes[i, j * 4:(j + 1) * 4]`. """ boxes = boxes.reshape(-1, 4) scores = scores.reshape(-1) boxlist = BoxList(boxes, image_shape, mode="xyxy") boxlist.add_field("scores", scores) return boxlist
def select_over_all_levels(self, boxlists): num_images = len(boxlists) results = [] for i in range(num_images): scores = boxlists[i].get_field("scores") labels = boxlists[i].get_field("labels") boxes = boxlists[i].bbox boxlist = boxlists[i] result = [] # skip the background for j in range(1, self.num_classes): inds = (labels == j).nonzero().view(-1) scores_j = scores[inds] boxes_j = boxes[inds, :].view(-1, 4) boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms_thresh, score_field="scores") num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels, ), j, dtype=torch.int64, device=scores.device)) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.fpn_post_nms_top_n > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.fpn_post_nms_top_n + 1) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] results.append(result) return results
def filter_results(self, boxlist, num_classes): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero(as_tuple=False).squeeze(1) scores_j = scores[inds, j] boxes_j = boxes[inds, j * 4 : (j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device) ) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep, as_tuple=False).squeeze(1) result = result[keep] return result
def forward(self, x, boxes): mask_prob = x scores = None if self.keypointer: mask_prob, scores = self.keypointer(x, boxes) assert len(boxes) == 1, "Only non-batched inference supported for now" boxes_per_image = [box.bbox.size(0) for box in boxes] mask_prob = mask_prob.split(boxes_per_image, dim=0) scores = scores.split(boxes_per_image, dim=0) results = [] for prob, box, score in zip(mask_prob, boxes, scores): bbox = BoxList(box.bbox, box.size, mode="xyxy") for field in box.fields(): bbox.add_field(field, box.get_field(field)) prob = PersonKeypoints(prob, box.size) prob.add_field("logits", score) bbox.add_field("keypoints", prob) results.append(bbox) return results
def forward_for_single_feature_map(self, anchors, box_cls, box_regression): """ Arguments: anchors: list[BoxList] box_cls: tensor of size N, A * C, H, W box_regression: tensor of size N, A * 4, H, W """ device = box_cls.device N, _, H, W = box_cls.shape A = box_regression.size(1) // 4 C = box_cls.size(1) // A # put in the same format as anchors box_cls = permute_and_flatten(box_cls, N, A, C, H, W) box_cls = box_cls.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) box_regression = box_regression.reshape(N, -1, 4) num_anchors = A * H * W candidate_inds = box_cls > self.pre_nms_thresh pre_nms_top_n = candidate_inds.view(N, -1).sum(1) pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n) results = [] for per_box_cls, per_box_regression, per_pre_nms_top_n, \ per_candidate_inds, per_anchors in zip( box_cls, box_regression, pre_nms_top_n, candidate_inds, anchors): # Sort and select TopN # TODO most of this can be made out of the loop for # all images. # TODO:Yang: Not easy to do. Because the numbers of detections are # different in each image. Therefore, this part needs to be done # per image. per_box_cls = per_box_cls[per_candidate_inds] per_box_cls, top_k_indices = \ per_box_cls.topk(per_pre_nms_top_n, sorted=False) per_candidate_nonzeros = \ per_candidate_inds.nonzero()[top_k_indices, :] per_box_loc = per_candidate_nonzeros[:, 0] per_class = per_candidate_nonzeros[:, 1] per_class += 1 detections = self.box_coder.decode( per_box_regression[per_box_loc, :].view(-1, 4), per_anchors.bbox[per_box_loc, :].view(-1, 4)) boxlist = BoxList(detections, per_anchors.size, mode="xyxy") boxlist.add_field("labels", per_class) boxlist.add_field("scores", per_box_cls) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) results.append(boxlist) return results
def __getitem__(self, idx): img, anno = super(COCODataset, self).__getitem__(idx) # filter crowd annotations # TODO might be better to add an extra field if "lvis_v0.5" not in self.ann_file: anno = [obj for obj in anno if obj["iscrowd"] == 0] if self.proposals is not None: img_id = self.ids[idx] id_field = 'indexes' if 'indexes' in self.proposals else 'ids' # compat fix roi_idx = self.proposals[id_field].index(img_id) rois = self.proposals['boxes'][roi_idx] # remove duplicate, clip, remove small boxes, and take top k keep = unique_boxes(rois) rois = rois[keep, :] # scores = scores[keep] rois = BoxList(torch.tensor(rois), img.size, mode="xyxy") rois = rois.clip_to_image(remove_empty=True) rois = remove_small_boxes(boxlist=rois, min_size=2) if self.top_k > 0: rois = rois[[range(self.top_k)]] # scores = scores[:self.top_k] else: rois = None # support un-labled if anno == [] and 'unlabeled' in self.ann_file: boxes = torch.as_tensor([[0, 0, 0, 0]]).reshape(-1, 4) target = BoxList(boxes, img.size, mode="xyxy") classes = torch.tensor([0]) target.add_field("labels", classes) if self._transforms is not None: img, target, rois = self._transforms(img, target, rois) target.bbox.fill_(0) else: boxes = [obj["bbox"] for obj in anno] boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes target = BoxList(boxes, img.size, mode="xywh").convert("xyxy") classes = [obj["category_id"] for obj in anno] classes = [ self.json_category_id_to_contiguous_id[c] for c in classes ] classes = torch.tensor(classes) target.add_field("labels", classes) if anno and "segmentation" in anno[0]: masks = [obj["segmentation"] for obj in anno] masks = SegmentationMask(masks, img.size, mode='poly') target.add_field("masks", masks) if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = PersonKeypoints(keypoints, img.size) target.add_field("keypoints", keypoints) if anno and 'point' in anno[0]: click = [obj["point"] for obj in anno] click = Click(click, img.size) target.add_field("click", click) if anno and 'scribble' in anno[0]: scribble = [obj["scribble"] for obj in anno] # xmin, ymin, xmax, ymax scribble_box = [] for sc in scribble: if len(sc[0]) == 0: scribble_box.append([1, 2, 3, 4]) else: scribble_box.append( [min(sc[0]), min(sc[1]), max(sc[0]), max(sc[1])]) scribble_box = torch.tensor(scribble_box) scribble_box = torch.as_tensor(scribble_box).reshape( -1, 4) # guard against no boxes scribble_target = BoxList(scribble_box, img.size, mode="xyxy") target.add_field("scribble", scribble_target) if anno and 'use_as' in anno[0]: tag_to_ind = {'tag': 0, 'point': 1, 'scribble': 2, 'box': 3} use_as = [tag_to_ind[obj['use_as']] for obj in anno] use_as = torch.tensor(use_as) target.add_field("use_as", use_as) target = target.clip_to_image(remove_empty=True) if self._transforms is not None: img, target, rois = self._transforms(img, target, rois) return img, target, rois, idx
def im_detect_bbox_aug(model, images, device, rois=None): # Collect detections computed under different transformations boxlists_ts = [] for _ in range(len(images)): boxlists_ts.append([]) def add_preds_t(boxlists_t): for i, boxlist_t in enumerate(boxlists_t): if len(boxlists_ts[i]) == 0: # The first one is identity transform, no need to resize the boxlist boxlists_ts[i].append(boxlist_t) else: # Resize the boxlist as the first one boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size)) # Compute detections for the original image (identity transform) boxlists_i = im_detect_bbox(model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device, rois=rois) add_preds_t(boxlists_i) # Perform detection on the horizontally flipped image if cfg.TEST.BBOX_AUG.H_FLIP: boxlists_hf = im_detect_bbox_hflip(model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device, rois=rois) add_preds_t(boxlists_hf) # Compute detections at different scales for scale in cfg.TEST.BBOX_AUG.SCALES: max_size = cfg.TEST.BBOX_AUG.MAX_SIZE boxlists_scl = im_detect_bbox_scale(model, images, scale, max_size, device, rois=rois) add_preds_t(boxlists_scl) if cfg.TEST.BBOX_AUG.SCALE_H_FLIP: boxlists_scl_hf = im_detect_bbox_scale(model, images, scale, max_size, device, hflip=True, rois=rois) add_preds_t(boxlists_scl_hf) # Merge boxlists detected by different bbox aug params boxlists = [] for i, boxlist_ts in enumerate(boxlists_ts): if cfg.TEST.BBOX_AUG.HEUR == 'UNION': bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts]) scores = torch.cat( [boxlist_t.get_field('scores') for boxlist_t in boxlist_ts]) elif cfg.TEST.BBOX_AUG.HEUR == 'AVG': bbox = torch.mean(torch.stack( [boxlist_t.bbox for boxlist_t in boxlist_ts]), dim=0) scores = torch.mean(torch.stack( [boxlist_t.get_field('scores') for boxlist_t in boxlist_ts]), dim=0) else: raise ValueError('please use proper BBOX_AUG.HEUR ') boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode) boxlist.add_field('scores', scores) boxlists.append(boxlist) # Apply NMS and limit the final detections results = [] post_processor = make_roi_box_post_processor(cfg) for boxlist in boxlists: results.append( post_processor.filter_results(boxlist, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES)) return results