def get_groundtruth(self, index): width, height = self.im_sizes[index, :] # get object bounding boxes, labels and relations obj_boxes = self.gt_boxes[index].copy() obj_labels = self.gt_classes[index].copy() obj_relation_triplets = self.relationships[index].copy() if self.filter_duplicate_rels: # Filter out dupes! assert self.split == 'train' old_size = obj_relation_triplets.shape[0] all_rel_sets = defaultdict(list) for (o0, o1, r) in obj_relation_triplets: all_rel_sets[(o0, o1)].append(r) obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k, v in all_rel_sets.items()] obj_relation_triplets = np.array(obj_relation_triplets) obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0])) for i in range(obj_relation_triplets.shape[0]): subj_id = obj_relation_triplets[i][0] obj_id = obj_relation_triplets[i][1] pred = obj_relation_triplets[i][2] obj_relations[subj_id, obj_id] = pred target = BoxList(obj_boxes, (width, height), mode="xyxy") target.add_field("labels", torch.from_numpy(obj_labels)) target.add_field("pred_labels", torch.from_numpy(obj_relations)) target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets)) target.add_field("difficult", torch.from_numpy(obj_labels).clone().fill_(0)) return target
def im_detect_bbox_aug(model, images, device): # Collect detections computed under different transformations boxlists_ts = [] for _ in range(len(images)): boxlists_ts.append([]) def add_preds_t(boxlists_t): for i, boxlist_t in enumerate(boxlists_t): if len(boxlists_ts[i]) == 0: # The first one is identity transform, no need to resize the boxlist boxlists_ts[i].append(boxlist_t) else: # Resize the boxlist as the first one boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size)) # Compute detections for the original image (identity transform) boxlists_i = im_detect_bbox( model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device ) add_preds_t(boxlists_i) # Perform detection on the horizontally flipped image if cfg.TEST.BBOX_AUG.H_FLIP: boxlists_hf = im_detect_bbox_hflip( model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device ) add_preds_t(boxlists_hf) # Compute detections at different scales for scale in cfg.TEST.BBOX_AUG.SCALES: max_size = cfg.TEST.BBOX_AUG.MAX_SIZE boxlists_scl = im_detect_bbox_scale( model, images, scale, max_size, device ) add_preds_t(boxlists_scl) if cfg.TEST.BBOX_AUG.SCALE_H_FLIP: boxlists_scl_hf = im_detect_bbox_scale( model, images, scale, max_size, device, hflip=True ) add_preds_t(boxlists_scl_hf) # Merge boxlists detected by different bbox aug params boxlists = [] for i, boxlist_ts in enumerate(boxlists_ts): bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts]) scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts]) boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode) boxlist.add_field('scores', scores) boxlists.append(boxlist) # Apply NMS and limit the final detections results = [] post_processor = make_roi_box_post_processor(cfg) for boxlist in boxlists: results.append(post_processor.filter_results(boxlist, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES)) return results
def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1) objectness = objectness.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode(box_regression.view(-1, 4), concat_anchors.view(-1, 4)) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result
def __getitem__(self, index): """ get dataset item """ # get image ref = self.refer.Refs[self.ref_ids[index]] img_id = ref["image_id"] ann_id = ref["ann_id"] img_path = os.path.join(self.refer.IMAGE_DIR, self.refer.Imgs[img_id]["file_name"]) img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #print("img original size", img.shape) width, height = img.shape[0], img.shape[1] #print("img after size", img.shape) ref_expr = "\n".join([s["raw"] for s in ref["sentences"]]) # get object bounding boxes, labels and relations #obj_boxes = [[34.79, 272.54, 106.72, 80.43]] # dummy target bbox referent_box = [self.refer.Anns[ann_id]["bbox"]] target_raw = BoxList(referent_box, (width, height), mode="xyxy") if self.transforms is not None: img, target = self.transforms(img, target_raw) else: img, target = img, target_raw target.add_field("ref_sents", [s["raw"] for s in ref["sentences"]]) target.add_field("label", self.refer.Anns[ann_id]["category_id"]) #target.add_field("labels", torch.from_numpy(obj_labels)) #target.add_field("pred_labels", torch.from_numpy(obj_relations)) #target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets)) target = target.clip_to_image(remove_empty=False) info = {"img_id":img_id, "ann_id":ann_id, "ref_id": self.ref_ids[index], "ref_sents": [s["raw"] for s in ref["sentences"]]} return img, target, index, info
def get_triplets_as_string(self, top_obj: BoxList, top_pred: BoxPairList) -> List[str]: """ Given top detected objects and top predicted relationships, return the triplets in human-readable form. :param top_obj: BoxList containing top detected objects :param top_pred: BoxPairList containing the top detected triplets :return: List of triplets (in decreasing score order) """ # num_detected_objects obj_indices = top_obj.get_field("labels") # 100 x 2 (indices in obj_indices) obj_pairs_indices = top_pred.get_field("idx_pairs") # 100 (indices in GLOBAL relationship indices list) rel_indices = top_pred.get_field("scores").max(1)[1] # 100 x 3 top_triplets = torch.stack( (obj_indices[obj_pairs_indices[:, 0]], obj_indices[obj_pairs_indices[:, 1]], rel_indices), 1).tolist() idx_to_obj = self.data_loader_test.dataset.ind_to_classes idx_to_rel = self.data_loader_test.dataset.ind_to_predicates # convert integers to labels top_triplets_str = [] for t in top_triplets: top_triplets_str.append(idx_to_obj[t[0]] + " " + idx_to_rel[t[2]] + " " + idx_to_obj[t[1]]) return top_triplets_str
def __getitem__(self, index): """ get dataset item """ # get image img_id = self.img_ids[index] img_path = self.img_paths[index] img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #print("img original size", img.shape) width, height = img.shape[0], img.shape[1] #print("img after size", img.shape) ref_expr = "\n".join([]) # get object bounding boxes, labels and relations referent_box = [[34.79, 272.54, 106.72, 80.43]] # dummy target bbox #referent_box = [self.refer.Anns[ann_id]["bbox"]] target_raw = BoxList(referent_box, (width, height), mode="xyxy") if self.transforms is not None: img, target = self.transforms(img, target_raw) else: img, target = img, target_raw target.add_field("ref_sents", []) target.add_field("label", None) target = target.clip_to_image(remove_empty=False) info = {"img_id": img_id} return img, target, index, info
def __getitem__(self, index): """ get dataset item """ # get image img = Image.fromarray(self._im_getter(index)) width, height = img.size if self.NO_EVAL: target_raw = BoxList([[0, 0, 0, 0]], (width, height), mode="xyxy") img, target = self.transforms(img, target_raw) return img, target, index # get object bounding boxes, labels and relations obj_boxes = self.gt_boxes[index].copy() obj_labels = self.gt_classes[index].copy() obj_relation_triplets = self.relationships[index].copy() # TODO(cjrd) can we use this logic for inference? if self.filter_duplicate_rels: # Filter out dupes! assert self.split == 'train' old_size = obj_relation_triplets.shape[0] all_rel_sets = defaultdict(list) for (o0, o1, r) in obj_relation_triplets: all_rel_sets[(o0, o1)].append(r) obj_relation_triplets = [(k[0], k[1], np.random.choice(v)) for k, v in all_rel_sets.items()] obj_relation_triplets = np.array(obj_relation_triplets) obj_relations = np.zeros((obj_boxes.shape[0], obj_boxes.shape[0])) for i in range(obj_relation_triplets.shape[0]): subj_id = obj_relation_triplets[i][0] obj_id = obj_relation_triplets[i][1] pred = obj_relation_triplets[i][2] obj_relations[subj_id, obj_id] = pred target_raw = BoxList(obj_boxes, (width, height), mode="xyxy") img, target = self.transforms(img, target_raw) target.add_field("labels", torch.from_numpy(obj_labels)) target.add_field("pred_labels", torch.from_numpy(obj_relations)) target.add_field("relation_labels", torch.from_numpy(obj_relation_triplets)) target = target.clip_to_image(remove_empty=False) return img, target, index
def prepare_boxlist(self, boxes, scores, image_shape): """ Returns BoxList from `boxes` and adds probability scores information as an extra field `boxes` has shape (#detections, 4 * #classes), where each row represents a list of predicted bounding boxes for each of the object classes in the dataset (including the background class). The detections in each row originate from the same object proposal. `scores` has shape (#detection, #classes), where each row represents a list of object detection confidence scores for each of the object classes in the dataset (including the background class). `scores[i, j]`` corresponds to the box at `boxes[i, j * 4:(j + 1) * 4]`. """ boxes = boxes.reshape(-1, 4) scores = scores.reshape(-1) boxlist = BoxList(boxes, image_shape, mode="xyxy") boxlist.add_field("scores", scores) return boxlist
def filter_results(self, boxlist, num_classes): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) logits = BoxList.get_field("logits").reshape(-1, num_classes) features = boxlist.get_field("features") device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero().squeeze(1) scores_j = scores[inds, j] features_j = features[inds] boxes_j = boxes[inds, j * 4:(j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class.add_field("features", features_j) boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels, ), j, dtype=torch.int64, device=device)) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] return result
def forward(self, image_list, feature_maps): grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) anchors = [] for i, (image_height, image_width) in enumerate(image_list.image_sizes): anchors_in_image = [] for anchors_per_feature_map in anchors_over_all_feature_maps: boxlist = BoxList(anchors_per_feature_map, (image_width, image_height), mode="xyxy") self.add_visibility_to(boxlist) anchors_in_image.append(boxlist) anchors.append(anchors_in_image) return anchors
def select_over_all_levels(self, boxlists): num_images = len(boxlists) results = [] for i in range(num_images): scores = boxlists[i].get_field("scores") labels = boxlists[i].get_field("labels") boxes = boxlists[i].bbox boxlist = boxlists[i] result = [] # skip the background for j in range(1, self.num_classes): inds = (labels == j).nonzero().view(-1) scores_j = scores[inds] boxes_j = boxes[inds, :].view(-1, 4) boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms_thresh, score_field="scores" ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=scores.device) ) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.fpn_post_nms_top_n > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.fpn_post_nms_top_n + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] results.append(result) return results
def forward_for_single_feature_map( self, anchors, box_cls, box_regression): """ Arguments: anchors: list[BoxList] box_cls: tensor of size N, A * C, H, W box_regression: tensor of size N, A * 4, H, W """ device = box_cls.device N, _, H, W = box_cls.shape A = box_regression.size(1) // 4 C = box_cls.size(1) // A # put in the same format as anchors box_cls = permute_and_flatten(box_cls, N, A, C, H, W) box_cls = box_cls.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) box_regression = box_regression.reshape(N, -1, 4) num_anchors = A * H * W candidate_inds = box_cls > self.pre_nms_thresh pre_nms_top_n = candidate_inds.view(N, -1).sum(1) pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n) results = [] for per_box_cls, per_box_regression, per_pre_nms_top_n, \ per_candidate_inds, per_anchors in zip( box_cls, box_regression, pre_nms_top_n, candidate_inds, anchors): # Sort and select TopN # TODO most of this can be made out of the loop for # all images. # TODO:Yang: Not easy to do. Because the numbers of detections are # different in each image. Therefore, this part needs to be done # per image. per_box_cls = per_box_cls[per_candidate_inds] per_box_cls, top_k_indices = \ per_box_cls.topk(per_pre_nms_top_n, sorted=False) per_candidate_nonzeros = \ per_candidate_inds.nonzero()[top_k_indices, :] per_box_loc = per_candidate_nonzeros[:, 0] per_class = per_candidate_nonzeros[:, 1] per_class += 1 detections = self.box_coder.decode( per_box_regression[per_box_loc, :].view(-1, 4), per_anchors.bbox[per_box_loc, :].view(-1, 4) ) boxlist = BoxList(detections, per_anchors.size, mode="xyxy") boxlist.add_field("labels", per_class) boxlist.add_field("scores", per_box_cls) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) results.append(boxlist) return results
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5): """Calculate precision and recall based on evaluation code of PASCAL VOC. This function calculates precision and recall of predicted bounding boxes obtained from a dataset which has :math:`N` images. The code is based on the evaluation code used in PASCAL VOC Challenge. """ n_pos = defaultdict(int) score = defaultdict(list) match = defaultdict(list) for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists): pred_bbox = pred_boxlist.bbox.numpy() pred_label = pred_boxlist.get_field("labels").numpy() pred_score = pred_boxlist.get_field("scores").numpy() gt_bbox = gt_boxlist.bbox.numpy() gt_label = gt_boxlist.get_field("labels").numpy() gt_difficult = gt_boxlist.get_field("difficult").numpy() for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)): pred_mask_l = pred_label == l pred_bbox_l = pred_bbox[pred_mask_l] pred_score_l = pred_score[pred_mask_l] # sort by score order = pred_score_l.argsort()[::-1] pred_bbox_l = pred_bbox_l[order] pred_score_l = pred_score_l[order] gt_mask_l = gt_label == l gt_bbox_l = gt_bbox[gt_mask_l] gt_difficult_l = gt_difficult[gt_mask_l] n_pos[l] += np.logical_not(gt_difficult_l).sum() score[l].extend(pred_score_l) if len(pred_bbox_l) == 0: continue if len(gt_bbox_l) == 0: match[l].extend((0, ) * pred_bbox_l.shape[0]) continue # VOC evaluation follows integer typed bounding boxes. pred_bbox_l = pred_bbox_l.copy() pred_bbox_l[:, 2:] += 1 gt_bbox_l = gt_bbox_l.copy() gt_bbox_l[:, 2:] += 1 iou = boxlist_iou( BoxList(pred_bbox_l, gt_boxlist.size), BoxList(gt_bbox_l, gt_boxlist.size), ).numpy() gt_index = iou.argmax(axis=1) # set -1 if there is no matching ground truth gt_index[iou.max(axis=1) < iou_thresh] = -1 del iou selec = np.zeros(gt_bbox_l.shape[0], dtype=bool) for gt_idx in gt_index: if gt_idx >= 0: if gt_difficult_l[gt_idx]: match[l].append(-1) else: if not selec[gt_idx]: match[l].append(1) else: match[l].append(0) selec[gt_idx] = True else: match[l].append(0) n_fg_class = max(n_pos.keys()) + 1 prec = [None] * n_fg_class rec = [None] * n_fg_class for l in n_pos.keys(): score_l = np.array(score[l]) match_l = np.array(match[l], dtype=np.int8) order = score_l.argsort()[::-1] match_l = match_l[order] tp = np.cumsum(match_l == 1) fp = np.cumsum(match_l == 0) # If an element of fp + tp is 0, # the corresponding element of prec[l] is nan. prec[l] = tp / (fp + tp) # If n_pos[l] is 0, rec[l] is None. if n_pos[l] > 0: rec[l] = tp / n_pos[l] return prec, rec
def evaluate_box_proposals(predictions, dataset, thresholds=None, area="all", limit=None): """Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0**2, 1e5**2], # all [0**2, 32**2], # small [32**2, 96**2], # medium [96**2, 1e5**2], # large [96**2, 128**2], # 96-128 [128**2, 256**2], # 128-256 [256**2, 512**2], # 256-512 [512**2, 1e5**2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for image_id, prediction in enumerate(predictions): original_id = image_id # dataset.id_to_img_map[image_id] img_info = dataset.get_img_info(image_id) image_width = img_info["width"] image_height = img_info["height"] prediction = prediction.resize((image_width, image_height)) # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = prediction.get_field("objectness").sort(descending=True)[1] prediction = prediction[inds] ann_ids = dataset.coco.getAnnIds(imgIds=original_id) anno = dataset.coco.loadAnns(ann_ids) gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0] gt_boxes = torch.as_tensor(gt_boxes).reshape( -1, 4) # guard against no boxes gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert("xyxy") gt_areas = torch.as_tensor( [obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if len(prediction) == 0: continue if limit is not None and len(prediction) > limit: prediction = prediction[:limit] overlaps = boxlist_iou(prediction, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(prediction), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = torch.cat(gt_overlaps, dim=0) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, }
def forward(self, x, boxes, skip_nms=False): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logit, box_regression = x class_prob = F.softmax(class_logit, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] features = [box.get_field("features") for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) if not skip_nms: if self.cls_agnostic_bbox_reg: box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes) if self.cls_agnostic_bbox_reg: proposals = proposals.repeat(1, class_prob.shape[1]) proposals = proposals.split(boxes_per_image, dim=0) else: proposals = concat_boxes.split(boxes_per_image, dim=0) num_classes = class_prob.shape[1] class_prob = class_prob.split(boxes_per_image, dim=0) class_logit = class_logit.split(boxes_per_image, dim=0) results = [] idx = 0 for prob, logit, boxes_per_img, features_per_img, image_shape in zip( class_prob, class_logit, proposals, features, image_shapes): if not self.bbox_aug_enabled and not skip_nms: # If bbox aug is enabled, we will do it later boxlist = self.prepare_boxlist(boxes_per_img, features_per_img, prob, logit, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) if not self.relation_on: boxlist_filtered = self.filter_results( boxlist, num_classes) else: # boxlist_pre = self.filter_results(boxlist, num_classes) boxlist_filtered = self.filter_results_nm( boxlist, num_classes) # to enforce minimum number of detections per image # we will do a binary search on the confidence threshold score_thresh = 0.05 while len(boxlist_filtered) < self.min_detections_per_img: score_thresh /= 2.0 print(("\nNumber of proposals {} is too small, " "retrying filter_results with score thresh" " = {}").format(len(boxlist_filtered), score_thresh)) boxlist_filtered = self.filter_results_nm( boxlist, num_classes, thresh=score_thresh) else: boxlist = BoxList(boxes_per_img, image_shape, mode="xyxy") boxlist.add_field("scores", prob[:, 1:].max(1)[0]) boxlist.add_field("logits", logit) boxlist.add_field("features", features_per_img) boxlist.add_field("labels", boxes[idx].get_field("labels")) boxlist.add_field("regression_targets", boxes[idx].bbox.clone().fill_(0.0)) boxlist_filtered = boxlist idx += 1 if len(boxlist) == 0: raise ValueError("boxlist shoud not be empty!") results.append(boxlist_filtered) return results
def filter_results_nm(self, boxlist, num_classes, thresh=0.05): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Similar to Neural-Motif Network """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) logits = boxlist.get_field("logits").reshape(-1, num_classes) features = boxlist.get_field("features") valid_cls = (scores[:, 1:].max(0)[0] > thresh).nonzero() + 1 nms_mask = scores.clone() nms_mask.zero_() device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in valid_cls.view(-1).cpu(): scores_j = scores[:, j] boxes_j = boxes[:, j * 4:(j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class.add_field( "idxs", torch.arange(0, scores.shape[0]).long()) # boxlist_for_class = boxlist_nms( # boxlist_for_class, self.nms # ) boxlist_for_class = boxlist_nms(boxlist_for_class, 0.3) nms_mask[:, j][boxlist_for_class.get_field("idxs")] = 1 num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels, ), j, dtype=torch.int64, device=device)) result.append(boxlist_for_class) dists_all = nms_mask * scores # filter duplicate boxes scores_pre, labels_pre = dists_all.max(1) inds_all = scores_pre.nonzero() assert inds_all.dim() != 0 inds_all = inds_all.squeeze(1) labels_all = labels_pre[inds_all] scores_all = scores_pre[inds_all] features_all = features[inds_all] logits_all = logits[inds_all] box_inds_all = inds_all * scores.shape[1] + labels_all result = BoxList(boxlist.bbox.view(-1, 4)[box_inds_all], boxlist.size, mode="xyxy") result.add_field("labels", labels_all) result.add_field("scores", scores_all) result.add_field("logits", logits_all) result.add_field("features", features_all) number_of_detections = len(result) vs, idx = torch.sort(scores_all, dim=0, descending=True) idx = idx[vs > thresh] if self.detections_per_img < idx.size(0): idx = idx[:self.detections_per_img] result = result[idx] return result
v = v.draw_instance_predictions(pred) showarray(v.get_image()[:, :, ::-1], save_dir + "pred_%i.jpg" % i) #print('instances:\n', instances) #print() #print('boxes:\n', instances.pred_boxes) #print() #print('Shape of features:\n', features.shape) #pred_class_logits, pred_attr_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(features) #pred_class_probs = torch.nn.functional.softmax(pred_class_logits, -1)[:, :-1] #max_probs, max_classes = pred_class_probs.max(-1) #print("%d objects are different, it is because the classes-aware NMS process" % (NUM_OBJECTS - torch.eq(instances.pred_classes, max_classes).sum().item())) #print("The total difference of score is %0.4f" % (instances.scores - max_probs).abs().sum().item()) boxes = instances.pred_boxes.tensor boxlist = BoxList(boxes.cpu(), (image_w, image_h), mode="xyxy") boxlist.add_field("scores", instances.scores.cpu()) boxlist.add_field("labels", instances.pred_classes.cpu()) boxlist.add_field("attr_logits", instances.attr_logits.cpu()) boxlist.add_field("cls_logits", instances.cls_logits.cpu()) data.append(boxlist) #new_entry["boxes"] = instances.pred_boxes.tensor.cpu().numpy() #new_entry["box_scores"] = instances.scores.cpu().numpy() #new_entry["pred_classes"] = instances.pred_classes.cpu().numpy() #new_entry["attr_logits"] = instances.attr_logits.cpu().numpy() #new_entry["cls_logits"] = instances.cls_logits.cpu().numpy() #np_dict = np.array(list(new_entry.items())) #np.savetxt("./data/%i.npy" % i, np_dict) #dd.io.save("./data/%i.h5" % i, new_entry, compression="default") #data.append(new_entry) #with open("./data/%i.json" % i, "w") as f: