Esempio n. 1
0
    def evaluate(self, json_file_pred, n_imgs = -1, iou_thres=0.4):
        self.predictions["annotations"] = json_file_pred['annotations']

        coco_ds = COCO()
        coco_ds.dataset = self.annotations
        coco_ds.createIndex()
        
        coco_dt = COCO()
        coco_dt.dataset = self.predictions
        coco_dt.createIndex()
        
        imgIds = sorted(coco_ds.getImgIds())
        
        if n_imgs > 0:
            imgIds = np.random.choice(imgIds, n_imgs)

        cocoEval = COCOeval(coco_ds, coco_dt, 'bbox')
        cocoEval.params.imgIds  = imgIds
        cocoEval.params.useCats = True
        cocoEval.params.iouType = "bbox"
        cocoEval.params.iouThrs = np.array([iou_thres])

        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        
        return cocoEval
Esempio n. 2
0
 def after_validate(self):
     with redirect_stdout(io.StringIO()):
         gt = COCO()
         gt.dataset = self.gt_ds
         gt.createIndex()
         dt = COCO()
         dt.dataset = self.dt_ds
         dt.createIndex()
         coco_eval = COCOeval(gt, dt, iouType='bbox')
         coco_eval.evaluate()
         coco_eval.accumulate()
         coco_eval.summarize()
         self.stats = coco_eval.stats
         self.reset_counters()
Esempio n. 3
0
def visualize(filename, annotations, threshold=0.2):
    for annotation in annotations:
      
        keypoints = np.asarray(annotation['keypoints']).reshape(-1, 3) 
        
        low_confidence = keypoints[:, -1] < threshold
        keypoints[low_confidence, :] = [0, 0, 0]
        annotation['keypoints'] = keypoints.reshape(-1).tolist()

    plt.imshow(mpimg.imread(filename))
    plt.axis('off')
    coco = COCO()
    coco.dataset = {
        "categories": [
            {
                "supercategory": "person",
                "id": 1,
                "name": "person",
                "keypoints": ["nose", "left_eye", "right_eye", "left_ear", "right_ear", "left_shoulder",
                              "right_shoulder", "left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip",
                              "right_hip", "left_knee", "right_knee", "left_ankle", "right_ankle"],
                "skeleton": [[1, 2], [1, 3], [2, 3], [2, 4], [3, 5], [4, 6], [5, 7], [6, 7], [6, 8], [6, 12], [7, 9],
                             [7, 13], [8, 10], [9, 11], [12, 13], [14, 12], [15, 13], [16, 14], [17, 15]]
            }
        ]
    }
    coco.createIndex()
    coco.showAnns(annotations)
    
    plt.savefig('kim02.jpg')
    img = cv2.imread('kim02.jpg', cv2.IMREAD_UNCHANGED)
    cv2.imshow('Result', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Esempio n. 4
0
def convert_to_coco_api(ds, bbox_fmt='voc'):
    """
    """
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        # to coco format: xmin, ymin, w, h
        if bbox_fmt.lower() == "voc":  # xmin, ymin, xmax, ymax
            bboxes[:, 2:] -= bboxes[:, :2]
        elif bbox_fmt.lower() == "yolo":  # xcen, ycen, w, h
            bboxes[:, :2] = bboxes[:, :2] - bboxes[:, 2:] / 2
        elif bbox_fmt.lower() == "coco":
            pass
        else:
            raise ValueError(f"bounding box format {bbox_fmt} not supported!")
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 5
0
    def compute(self):
        for valid_batch_num in range(len(self._keypoints_gt_accumulated)):
            self.append_to_results(
                data=self._keypoints_dt_accumulated[valid_batch_num], gt=False)
            self.append_to_results(
                data=self._keypoints_gt_accumulated[valid_batch_num], gt=True)
            self._image_counter += len(
                self._keypoints_gt_accumulated[valid_batch_num])

        coco_gt = COCO()
        coco_gt.dataset = self._keypoints_gt
        coco_gt.createIndex()

        json_dt_res_file = 'temp.json'

        with open(json_dt_res_file, "w+") as f:
            json.dump(self._keypoints_dt, f)
        coco_dt = coco_gt.loadRes(resFile=json_dt_res_file)

        coco_eval = COCOeval(coco_gt, coco_dt, 'keypoints')
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        stats = coco_eval.stats
        print(stats)
        self.reset_state()

        return stats[0]
def _coco_eval(gts, detections, height, width, labelmap=("car", "pedestrian")):
    """simple helper function wrapping around COCO's Python API
    :params:  gts iterable of numpy boxes for the ground truth
    :params:  detections iterable of numpy boxes for the detections
    :params:  height int
    :params:  width int
    :params:  labelmap iterable of class labels
    """
    categories = [{
        "id": id + 1,
        "name": class_name,
        "supercategory": "none"
    } for id, class_name in enumerate(labelmap)]

    dataset, results = _to_coco_format(gts,
                                       detections,
                                       categories,
                                       height=height,
                                       width=width)

    coco_gt = COCO()
    coco_gt.dataset = dataset
    coco_gt.createIndex()
    coco_pred = coco_gt.loadRes(results)

    coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
    coco_eval.params.imgIds = np.arange(1, len(gts) + 1, dtype=int)
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
Esempio n. 7
0
def tf_data_to_COCO(ds: tf.data.Dataset, class2idx: Mapping[str, int]) -> COCO:

    gt_coco: dict = dict(images=[], annotations=[])
    image_id = 1
    annot_id = 1

    # Create COCO categories
    categories = [
        dict(supercategory='instance', id=i, name=n)
        for n, i in class2idx.items()
    ]
    gt_coco['categories'] = categories

    for image, (labels, bbs) in ds.unbatch():
        h, w = image.shape[0:2]
        im_annot, annots = _COCO_gt_annot(image_id, annot_id, (h, w), labels,
                                          bbs)
        gt_coco['annotations'].extend(annots)
        gt_coco['images'].append(im_annot)

        annot_id += len(annots)
        image_id += 1

    gtCOCO = COCO()
    gtCOCO.dataset = gt_coco
    gtCOCO.createIndex()

    return gtCOCO
Esempio n. 8
0
 def coco(self):
     """
     :return: a Coco-like object that we can use to evaluate detection!
     """
     anns = []
     for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
         for cls, box in zip(cls_array.tolist(), box_array.tolist()):
             anns.append({
                 'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
                 'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
                 'category_id': cls,
                 'id': len(anns),
                 'image_id': i,
                 'iscrowd': 0,
             })
     fauxcoco = COCO()
     fauxcoco.dataset = {
         'info': {'description': 'ayy lmao'},
         'images': [{'id': i} for i in range(self.__len__())],
         'categories': [{'supercategory': 'person',
                            'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
         'annotations': anns,
     }
     fauxcoco.createIndex()
     return fauxcoco
def coco_api_from_records(records):
    """ Create pycocotools COCO dataset from records
    """
    coco_ds = COCO()
    coco_ds.dataset = convert_records_to_coco_style(records)
    coco_ds.createIndex()
    return coco_ds
Esempio n. 10
0
def convert_to_coco_api(ds):
    """

  :param ds:
  :type ds:
  :return:
  :rtype:
  """
    coco_ds = COCO()
    ann_id = 0
    dataset = {"images": [], "categories": [], "annotations": []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets["image_id"].item()
        dataset["images"].append({
            "id": image_id,
            "height": img.shape[-2],
            "width": img.shape[-1]
        })
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets["labels"].tolist()
        areas = targets["area"].tolist()
        iscrowd = targets["iscrowd"].tolist()
        if "masks" in targets:
            masks = targets["masks"]
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if "keypoints" in targets:
            keypoints = targets["keypoints"]
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = CocoPolyAnnotation(
                image_id=image_id,
                bbox=bboxes[i],
                category_id=labels[i],
                area=areas[i],
                iscrowd=iscrowd[i],
                id=ann_id,
                segmentation=None,
                keypoints=None,
                num_keypoints=None,
            )
            categories.add(labels[i])
            if "masks" in targets:
                ann.segmentation = mask.encode(masks[i].numpy())
            if "keypoints" in targets:
                ann.keypoints = keypoints[i]
                ann.num_keypoints = sum(k != 0 for k in keypoints[i][2::3])
            dataset["annotations"].append(ann)
            ann_id += 1
    dataset["categories"] = [{"id": i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 11
0
  def evaluate(self):
    """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [12] representing the
        coco-style evaluation metrics.
    """
    if self.filename:
      coco_gt = COCO(self.filename)
    else:
      coco_gt = COCO()
      coco_gt.dataset = self.dataset
      coco_gt.createIndex()

    if self.testdev_dir:
      # Run on test-dev dataset.
      box_result_list = []
      for det in self.detections:
        box_result_list.append({
            'image_id': int(det[0]),
            'category_id': int(det[6]),
            'bbox': np.around(
                det[1:5].astype(np.float64), decimals=2).tolist(),
            'score': float(np.around(det[5], decimals=3)),
        })
      json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
      # Must be in the formst of 'detections_test-dev2017_xxx_results'.
      fname = 'detections_test-dev2017_test_results'
      output_path = os.path.join(self.testdev_dir, fname + '.json')
      logging.info('Writing output json file to: %s', output_path)
      with tf.io.gfile.GFile(output_path, 'w') as fid:
        json.dump(box_result_list, fid)
      return np.array([-1.], dtype=np.float32)
    else:
      # Run on validation dataset.
      detections = np.array(self.detections)
      image_ids = list(set(detections[:, 0]))
      coco_dt = coco_gt.loadRes(detections)
      coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
      coco_eval.params.imgIds = image_ids
      coco_eval.evaluate()
      coco_eval.accumulate()
      coco_eval.summarize()
      coco_metrics = coco_eval.stats

      # Get per_class AP, see pycocotools/cocoeval.py:334
      # TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets
      # Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default
      precision = coco_eval.eval['precision'][:, :, :, 0, -1]
      ap_perclass = [0] * 100  # assumeing at most 100 classes.
      for c in range(precision.shape[-1]):  # iterate over all classes
        precision_c = precision[:, :, c]
        # Only consider values if > -1.
        precision_c = precision_c[precision_c > -1]
        ap_c = np.mean(precision_c) if precision_c.size else -1.
        ap_perclass[c] = ap_c
      coco_metrics = np.concatenate((coco_metrics, ap_perclass))
      return np.array(coco_metrics, dtype=np.float32)
Esempio n. 12
0
def convert_to_coco_api(ds, crop_inference_to_fov=False):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    cropper = tvdt.Cropper() if crop_inference_to_fov else None
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]

        if crop_inference_to_fov:
            img, targets, _ = _crop_all_to_fov(
                images=[img], targets=[targets], cropper=cropper)
            img = img[0]
            targets = targets[0]

        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 13
0
def convert_to_coco_api(ds, box_threshold):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets[
            "image_id"]  # ofekp: this used to be `targets["image_id"].item()` but image_id is no longer tensor in IMATDataset
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["bbox"]  # YXYX
        bboxes = bboxes[:, [1, 0, 3, 2]]  # XYXY
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()  # XYWH
        # if box_threshold == None:
        #     labels = targets['labels'].tolist()
        # else:
        # print("labels in coco util")
        # print(targets['cls'])
        labels = (targets['cls']).tolist(
        )  # ofekp:this used to be just 'targets['labels'].tolist()', substracting one since in the dataset we added one for effdet training
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 14
0
def convert_to_coco_api(ds):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    for img_idx in tqdm.tqdm(range(len(ds)),
                             desc="Loading groundtruths for evaluation"):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]

        # DEBUG: if there is at most one key (only `image_id`) then there are
        # no annotations
        if len(targets) <= 1:
            continue

        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 15
0
def convert_to_coco_api(dataloader):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    dataloader = torch.utils.data.DataLoader(
        dataloader.dataset,
        batch_size=1,
        shuffle=False,
        num_workers=dataloader.num_workers,
        collate_fn=dataloader.collate_fn)
    for img, targets in dataloader:
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img = img[0]
        targets = targets[0]
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
def convert_to_coco_api(ds):
    coco_ds = COCO()
    ann_id = 0
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()

    print("Converting to COCO API")
    for img_idx in tqdm.trange(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        if bboxes.nelement() != 0:
            bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        if len(bboxes) == 1 and len(bboxes[0]) == 0:
            num_objs = 0
        else:
            num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
        dataset['categories'] = [{'id': i} for i in sorted(categories)]

    coco_ds.dataset = dataset
    coco_ds.createIndex()

    return coco_ds
Esempio n. 17
0
def convert_to_coco_api(ds):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {"images": [], "categories": [], "annotations": []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds.get_in_coco_format(img_idx)
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict["id"] = image_id
        height, width = targets["orig_size"].tolist()
        img_dict["height"] = height
        img_dict["width"] = width
        dataset["images"].append(img_dict)
        bboxes = targets["boxes"]
        # the boxes are in 0-1 format, in cxcywh format
        # let's convert it into the format expected by COCO api
        bboxes = box_cxcywh_to_xyxy(bboxes)
        bboxes = bboxes * torch.tensor([width, height, width, height],
                                       dtype=torch.float32)
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets["labels"].tolist()
        areas = targets["area"].tolist()
        iscrowd = targets["iscrowd"].tolist()
        if "masks" in targets:
            masks = targets["masks"]
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if "keypoints" in targets:
            keypoints = targets["keypoints"]
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann["image_id"] = image_id
            ann["bbox"] = bboxes[i]
            ann["category_id"] = labels[i]
            categories.add(labels[i])
            ann["area"] = areas[i]
            ann["iscrowd"] = iscrowd[i]
            ann["id"] = ann_id
            if "masks" in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if "keypoints" in targets:
                ann["keypoints"] = keypoints[i]
                ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
            dataset["annotations"].append(ann)
            ann_id += 1
    dataset["categories"] = [{"id": i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 18
0
    def create_coco_from_dataset(self, dataset):
        coco = COCO()
        coco_dataset = {}
        coco_dataset['annotations'] = self.create_annotations(dataset)
        coco_dataset['images'] = self.create_images()
        coco_dataset['categories'] = self.create_categories()
        coco.dataset = coco_dataset
        coco.createIndex()

        return coco
Esempio n. 19
0
def compile_results(results):
    post_process(results)

    coco = COCO()
    for result in results:
        coco_result = COCO()
        coco_result.dataset = result
        coco_result.createIndex()
        coco = merge_cocos(coco, coco_result)
    return coco
Esempio n. 20
0
    def _make_subset_loader(path):
        # COCO API has an 'unclosed file' warning
        coco_api = COCO()
        with open(path, 'r') as f:
            import json
            dataset = json.load(f)

        coco_api.dataset = dataset
        coco_api.createIndex()
        return coco_api
 def _build_coco_dataset(self, coco_annotations, coco_categories, coco_images):
     self._populate_id(coco_annotations)
     coco_gt = COCO()
     coco_gt.dataset = {
         "annotations": coco_annotations,
         "categories": coco_categories,
         "images": coco_images
     }
     coco_gt.createIndex()
     return coco_gt
Esempio n. 22
0
def _create_coco(categories, images, annotations):
    coco = COCO()
    coco.dataset = {
        'categories': categories,
        'images': images,
        'annotations': annotations
    }
    with NoStdout():
        coco.createIndex()
    return coco
Esempio n. 23
0
    def add_transplanted_dataset(self, dataset_dir, class_map=None):

        # Load dataset
        ann_filepath = os.path.join(dataset_dir, 'annotations.json')
        dataset = json.load(open(ann_filepath, 'r'))

        # Map dataset classes
        self.replace_dataset_classes(dataset, class_map)

        taco_alla_coco = COCO()
        taco_alla_coco.dataset = dataset
        taco_alla_coco.createIndex()

        class_ids = sorted(taco_alla_coco.getCatIds())

        # Select images by class
        # Add images
        image_ids = []
        background_id = -1
        for i in class_ids:
            class_name = taco_alla_coco.loadCats(i)[0]["name"]
            if class_name != 'Background':
                image_ids.extend(list(taco_alla_coco.getImgIds(catIds=i)))
                # TODO: Select how many
            else:
                background_id = i
        image_ids = list(set(image_ids))

        if background_id > -1:
            class_ids.remove(background_id)

        # Retrieve list of training image ids
        train_image_ids = [x['id'] for x in self.image_info]

        nr_train_images_so_far = len(train_image_ids)

        # Add images
        transplant_counter = 0
        for i in image_ids:
            if taco_alla_coco.imgs[i]['source_id'] in train_image_ids:
                transplant_counter += 1
                self.add_image("taco",
                               image_id=i + nr_train_images_so_far,
                               path=os.path.join(
                                   dataset_dir,
                                   taco_alla_coco.imgs[i]['file_name']),
                               width=taco_alla_coco.imgs[i]["width"],
                               height=taco_alla_coco.imgs[i]["height"],
                               annotations=taco_alla_coco.loadAnns(
                                   taco_alla_coco.getAnnIds(imgIds=[i],
                                                            catIds=class_ids,
                                                            iscrowd=None)))

        print('Number of transplanted images added: ', transplant_counter, '/',
              len(image_ids))
Esempio n. 24
0
def convert_to_coco_api(ds):
    """
    Adapted from:
    https://github.com/pytorch/vision/blob/main/references/detection/coco_utils.py
    """
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {"images": [], "categories": [], "annotations": []}
    categories = set()
    for img_idx in range(len(ds)):
        img_dict = {}

        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets, *_ = ds[img_idx]
        img_dict["height"] = img.shape[-2]
        img_dict["width"] = img.shape[-1]
        image_id = targets["image_id"].item()
        img_dict["id"] = image_id
        dataset["images"].append(img_dict)
        bboxes = targets["boxes"].clone()
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets["labels"].tolist()
        areas = targets["area"].tolist()
        iscrowd = targets["iscrowd"].tolist()
        if "masks" in targets:
            masks = targets["masks"]
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if "keypoints" in targets:
            keypoints = targets["keypoints"]
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann["image_id"] = image_id
            ann["bbox"] = bboxes[i]
            ann["category_id"] = labels[i]
            categories.add(labels[i])
            ann["area"] = areas[i]
            ann["iscrowd"] = iscrowd[i]
            ann["id"] = ann_id
            if "masks" in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if "keypoints" in targets:
                ann["keypoints"] = keypoints[i]
                ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
            dataset["annotations"].append(ann)
            ann_id += 1
    dataset["categories"] = [{"id": i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
Esempio n. 25
0
    def coco(self):
        """
    :return: a Coco-like object that we can use to evaluate detection!
    """
        anns = []

        for index in range(self.__len__()):
            image_id = self.ids[index]
            # H, W = self.image_size
            objs, boxes, masks = [], [], []
            for object_data in self.image_id_to_objects[image_id]:
                objs.append(object_data['category_id'])
                x, y, w, h = object_data['bbox']
                x0 = x
                y0 = y
                x1 = (x + w)
                y1 = (y + h)
                boxes.append([x0, y0, x1, y1])

        # for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
        #   for cls, box in zip(cls_array.tolist(), box_array.tolist()):
            for cls, box in zip(objs, boxes):
                anns.append({
                    'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
                    'bbox':
                    [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
                    'category_id':
                    cls,
                    'id':
                    len(anns),
                    # 'image_id': i,
                    'image_id':
                    image_id,
                    'iscrowd':
                    0,
                })
        fauxcoco = COCO()
        fauxcoco.dataset = {
            'info': {
                'description': 'ayy lmao'
            },
            'images': [{
                'id': i
            } for i in range(self.__len__())],
            'categories': [{
                'supercategory': 'person',
                'id': i,
                'name': name
            } for i, name in enumerate(self.ind_to_classes)
                           if name != '__image__'],
            'annotations':
            anns,
        }
        fauxcoco.createIndex()
        return fauxcoco
Esempio n. 26
0
    def evaluate(self, pred_df, n_imgs = -1, iou_thres=0.4):
        """Evaluating your results
        
        Arguments:
            pred_df: pd.DataFrame your predicted results in the
                     competition output format.

            n_imgs:  int Number of images use for calculating the
                     result.All of the images if `n_imgs` <= 0
                     
        Returns:
            COCOEval object
        """
        
        if pred_df is not None:
            self.predictions["annotations"] = self.__gen_predictions(pred_df, self.image_ids)

        coco_ds = COCO()
        coco_ds.dataset = self.annotations
        coco_ds.createIndex()
        
        coco_dt = COCO()
        coco_dt.dataset = self.predictions
        coco_dt.createIndex()
        
        imgIds=sorted(coco_ds.getImgIds())
        
        if n_imgs > 0:
            imgIds = np.random.choice(imgIds, n_imgs)

        cocoEval = COCOeval(coco_ds, coco_dt, 'bbox')
        cocoEval.params.imgIds  = imgIds
        cocoEval.params.useCats = True
        cocoEval.params.iouType = "bbox"
        cocoEval.params.iouThrs = np.array([iou_thres])

        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        
        return cocoEval
Esempio n. 27
0
def filter_accepted(coco, accepted=True):
    filtered = COCO()
    filtered.dataset = copy.deepcopy(coco.dataset)

    annotations = []
    for ann in filtered.dataset["annotations"]:
        task = ann["completed_task"]
        if task["accepted"] == accepted:
            annotations.append(ann)
    filtered.dataset["annotations"] = annotations
    filtered.createIndex()
    return filtered
Esempio n. 28
0
    def convert_to_coco_api(self):
        """
        """
        print("in function convert_to_coco_api...")
        coco_ds = COCO()
        # annotation IDs need to start at 1, not 0, see torchvision issue #1530
        ann_id = 1
        dataset = {'images': [], 'categories': [], 'annotations': []}
        categories = set()
        for img_idx in range(len(self.val_dataset)):
            # find better way to get target
            # targets = ds.get_annotations(img_idx)
            img, targets = self.val_dataset[img_idx]
            image_id = targets["image_id"].item()
            img_dict = {}
            img_dict['id'] = image_id
            img_dict['height'] = img.shape[-2]
            img_dict['width'] = img.shape[-1]
            dataset['images'].append(img_dict)
            bboxes = targets["boxes"]

            bboxes = bboxes.tolist()
            labels = targets['labels'].tolist()
            areas = targets['area'].tolist()
            iscrowd = targets['iscrowd'].tolist()
            # if 'masks' in targets:
            #     masks = targets['masks']
            #     # make masks Fortran contiguous for coco_mask
            #     masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
            # if 'keypoints' in targets:
            #     keypoints = targets['keypoints']
            #     keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
            num_objs = len(bboxes)
            for i in range(num_objs):
                ann = {}
                ann['image_id'] = image_id
                ann['bbox'] = bboxes[i]
                ann['category_id'] = labels[i]
                categories.add(labels[i])
                ann['area'] = areas[i]
                ann['iscrowd'] = iscrowd[i]
                ann['id'] = ann_id
                # if 'masks' in targets:
                #     ann["segmentation"] = coco_mask.encode(masks[i].numpy())
                # if 'keypoints' in targets:
                #     ann['keypoints'] = keypoints[i]
                #     ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
                dataset['annotations'].append(ann)
                ann_id += 1
        dataset['categories'] = [{'id': i} for i in sorted(categories)]
        coco_ds.dataset = dataset
        coco_ds.createIndex()
        return coco_ds
def create_coco_api(coco_records) -> COCO:
    """Create COCO dataset api

    Args:
        coco_records: Records in coco style (use convert_records_to_coco_style to convert
        records to coco style.
    """
    coco_ds = COCO()
    coco_ds.dataset = coco_records
    coco_ds.createIndex()

    return coco_ds
Esempio n. 30
0
def filter_by_task(coco, task_type="yesno"):
    filtered = COCO()
    filtered.dataset = copy.deepcopy(coco.dataset)

    annotations = []
    for ann in filtered.dataset["annotations"]:
        task = ann["completed_task"]
        if task["type"] == task_type:
            annotations.append(ann)
    filtered.dataset["annotations"] = annotations
    filtered.createIndex()
    return filtered