def test_greedy_assignment(self): gt_big_mask = self.square_mask.copy() gt_small_mask = np.zeros(gt_big_mask.shape, dtype=gt_big_mask.dtype) gt_small_mask[900:1101, 900:1101] = True gt_big = GroundTruthInstance(gt_big_mask, 0) gt_small = GroundTruthInstance(gt_small_mask, 0) gts = [gt_big, gt_small] # Big Det det1 = PBoxDetInst( self.square_label_list, [700, 700, 1300, 1300], [[[10000, 0], [0, 10000]], [[10000, 0], [0, 10000]]]) # Small Det det2 = PBoxDetInst( self.square_label_list, [800, 800, 1200, 1200], [[[10000, 0], [0, 10000]], [[10000, 0], [0, 10000]]]) dets = [det1, det2] evaluator = PDQ(greedy_mode=True) evaluator.score([(gts, dets)]) det_evals = evaluator._det_evals for img_det_evals in det_evals: for det_eval in img_det_evals: # Assume that big detection should be matched to small gt and small det to big gt self.assertNotEqual(det_eval['det_id'], det_eval['gt_id'])
def test_no_detections_for_image_with_small_and_big_gt(self): gts1 = [val for val in self.square_gt] small_mask = np.zeros(self.img_size, dtype=np.bool) small_mask[500:504, 500:501] = True gts2 = [GroundTruthInstance(self.square_mask, 0, 1, 1), GroundTruthInstance(small_mask, 0, 1, 2)] dets1 = [BBoxDetInst(self.default_label_list, self.gt_box)] dets2 = [] evaluator = PDQ() score = evaluator.score([(gts1, dets1), (gts2, dets2)]) self.assertAlmostEqual(score, 0.5)
def setUp(self): self.img_size = (2000, 2000) self.default_covar = [[1000, 0], [0, 1000]] self.default_label_list = [1, 0] self.square_mask = np.zeros(self.img_size, dtype=np.bool) self.square_mask[750:1250, 750:1250] = True self.square_gt = [GroundTruthInstance(self.square_mask, 0, 0, 0)] self.gt_box = self.square_gt[0].bounding_box
def test_no_detections_for_image(self): gts1 = [val for val in self.square_gt] gts2 = [GroundTruthInstance(self.square_mask, 0, 1, 1)] dets1 = [BBoxDetInst(self.default_label_list, self.gt_box)] dets2 = [] evaluator = PDQ() score = evaluator.score([(gts1, dets1), (gts2, dets2)]) self.assertAlmostEqual(score, 0.5)
def setUp(self): self.img_size = (2000, 2000) self.default_covar = [[1000, 0], [0, 1000]] self.default_filter_gt = False self.default_segment_mode = False self.default_greedy_mode = False self.square_mask = np.zeros(self.img_size, dtype=np.bool) self.square_mask[750:1250, 750:1250] = True self.square_gt = [GroundTruthInstance(self.square_mask, 0)] self.square_gt_box = self.square_gt[0].bounding_box self.square_label_list = [1, 0] self.cross_mask = np.zeros(self.img_size, dtype=np.bool) self.cross_mask[875:1125, 750:1250] = True self.cross_mask[750:1250, 875:1125] = True self.cross_gt = [GroundTruthInstance(self.cross_mask, 1)] self.cross_gt_box = self.cross_gt[0].bounding_box self.cross_label_list = [0, 1]
def test_multiple_missed_gts_too_small(self): gts = [val for val in self.square_gt] for i in range(9): # Create small 2x2 boxes which are missed around an edge of the image (buffer of 2 pixels) new_gt_mask = np.zeros(gts[0].segmentation_mask.shape, gts[0].segmentation_mask.dtype) new_gt_mask[2:4, 2 + i * 4:4 + i * 4] = np.amax(gts[0].segmentation_mask) gts.append(GroundTruthInstance(new_gt_mask, 0, 0, i+1)) detections = [BBoxDetInst(self.default_label_list, self.gt_box)] evaluator = PDQ() score = evaluator.score([(gts, detections)]) self.assertAlmostEqual(score, 1.0)
def test_missed_gts_and_unmatched_detections(self): gts = [val for val in self.square_gt] for i in range(10): # Create small 11x11 boxes which are missed around an edge of the image (buffer of 2 pixels) new_gt_mask = np.zeros(gts[0].segmentation_mask.shape, gts[0].segmentation_mask.dtype) new_gt_mask[2:14, 2 + i * 14:14 + i * 14] = np.amax( gts[0].segmentation_mask) gts.append(GroundTruthInstance(new_gt_mask, 0)) detections = [ BBoxDetInst(self.square_label_list, self.square_gt_box) for _ in range(10) ] evaluator = PDQ() score = evaluator.score([(gts, detections)]) self.assertAlmostEqual(score, 1 / 20.)
def __iter__(self): coco_annotations = self.coco_obj.imgToAnns img_ids = sorted(self.coco_obj.imgs.keys()) # Create map to transfer from category id to index id (used as class id in our tests) ann_idx_map = { cat_id: idx for idx, cat_id in enumerate(sorted(self.coco_obj.cats.keys())) } if self.n_imgs is not None: img_id_iter = islice(img_ids, 0, self.n_imgs) else: img_id_iter = iter(img_ids) for img_idx, img_id in enumerate(img_id_iter): if img_id not in coco_annotations.keys(): yield [] else: # load the annotations available for the given image # filter out any annotations which do not have segmentations for annotation in coco_annotations[img_id]: if "segmentation" not in annotation.keys(): print("SKIPPED A GT OBJECT!") img_annotations = [ annotation for annotation in coco_annotations[img_id] if 'segmentation' in annotation.keys() ] # extract the class ids for each annotation (note that we subtract 1 so that class ids start at 0) class_ids = [annotation['category_id'] for annotation in img_annotations] bboxes = [] seg_masks = [] ignores = [annotation['ignore'] if 'ignore' in annotation.keys() else False for annotation in img_annotations] iscrowds = [annotation['iscrowd'] for annotation in img_annotations] areas = [annotation['area'] for annotation in img_annotations] for annotation in img_annotations: # transform bbox to [x1, y1, x2, y2] box = annotation['bbox'] box[2] += box[0] box[3] += box[1] bboxes.append(box) # define GT segmentation mask # If segmentation mask is expected to be pixels within bounding box, adjust accordingly seg_mask = self.coco_obj.annToMask(annotation) if self.bbox_gt: eval_mask = np.zeros(seg_mask.shape, dtype=np.bool) # Use the COCO bounding box (note not exact around segmentation masks) # Round down for lower bounds and round up for upper bounds to accommodate floats seg_bbox = np.floor(box).astype(np.int) seg_bbox[-2:] = np.ceil(box[-2:]).astype(np.int) # clamp box to image size to make sure not outside extremes seg_bbox = clamp_bbox(seg_bbox, eval_mask.shape) # Create segmentation mask with bounding box eval_mask[seg_bbox[1]:seg_bbox[3]+1, seg_bbox[0]:seg_bbox[2]+1] = True # Below use seg_bbox for simplicity rather than trying to account for rounding in box # (USED IN REBUTTAL) # seg_bbox = generate_bounding_box_from_mask(seg_mask) # eval_mask[seg_bbox[1]:seg_bbox[3]+1, seg_bbox[0]:seg_bbox[2]+1] = True seg_masks.append(eval_mask) else: seg_masks.append(seg_mask) # generate ground truth instances from the COCO annotation information # NOTE this will skip any annotation which has a bad segmentation mask (all zeros) yield [ GroundTruthInstance( segmentation_mask=seg_masks[ann_idx], true_class_label=ann_idx_map[class_ids[ann_idx]], coco_bounding_box=bboxes[ann_idx], coco_ignore=ignores[ann_idx], coco_iscrowd=iscrowds[ann_idx], coco_area=areas[ann_idx] ) for ann_idx in range(len(img_annotations)) if np.amax(seg_masks[ann_idx] > 0) ]