コード例 #1
0
ファイル: cocoMetrix.py プロジェクト: qtw1998/COCOAPI
    def _produce_result_json(self):
        """return loadRes
        """
        self._save_json(self.result)
        _tmp_result = COCO()
        _tmp_result.dataset['images'] = [img for img in self.coco_gt.dataset['images']]
        self.ann_in_detections = json.load(open(self.result))
        assert type(self.ann_in_detections) == list, 'results in not an array of objects'
        annsImgId_list = [ann['image_id'] for ann in self.ann_in_detections]
        assert set(annsImgId_list) == (set(annsImgId_list) & set(self.coco_gt.getImgIds())), \
                "Results do not correspond to current coco set"
        if 'bbox' in self.ann_in_detections[0] and not self.ann_in_detections[0]['bbox'] == []:
            _tmp_result.dataset['categories'] = copy.deepcopy(self.coco_gt.dataset['categories'])
            for id, ann in enumerate(self.ann_in_detections):
                bb = ann['bbox']
                x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
                ann['area'] = bb[2]*bb[3]
                ann['id'] = id+1
        _tmp_result.dataset['annotations'] = self.ann_in_detections
        _tmp_result.createIndex()
        return _tmp_result
        

    




        
コード例 #2
0
            def calc_pred(coco, anns):
                import numpy as np
                import copy

                pred = COCO()
                pred.dataset["images"] = [
                    img for img in coco.dataset["images"]
                ]

                annsImgIds = [ann["image_id"] for ann in anns]
                assert set(annsImgIds) == (set(annsImgIds)
                                           & set(coco.getImgIds()))

                pred.dataset["categories"] = copy.deepcopy(
                    coco.dataset["categories"])
                for id, ann in enumerate(anns):
                    s = ann["keypoints"]
                    x = s[0::3]
                    y = s[1::3]
                    x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
                    ann["area"] = (x1 - x0) * (y1 - y0)
                    ann["id"] = id + 1
                    ann["bbox"] = [x0, y0, x1 - x0, y1 - y0]

                pred.dataset["annotations"] = anns
                pred.createIndex()
                return pred
コード例 #3
0
def tf_data_to_COCO(ds: tf.data.Dataset, class2idx: Mapping[str, int]) -> COCO:

    gt_coco: dict = dict(images=[], annotations=[])
    image_id = 1
    annot_id = 1

    # Create COCO categories
    categories = [
        dict(supercategory='instance', id=i, name=n)
        for n, i in class2idx.items()
    ]
    gt_coco['categories'] = categories

    for image, (labels, bbs) in ds.unbatch():
        h, w = image.shape[0:2]
        im_annot, annots = _COCO_gt_annot(image_id, annot_id, (h, w), labels,
                                          bbs)
        gt_coco['annotations'].extend(annots)
        gt_coco['images'].append(im_annot)

        annot_id += len(annots)
        image_id += 1

    gtCOCO = COCO()
    gtCOCO.dataset = gt_coco
    gtCOCO.createIndex()

    return gtCOCO
コード例 #4
0
def convert_to_coco_api(ds):
    """

  :param ds:
  :type ds:
  :return:
  :rtype:
  """
    coco_ds = COCO()
    ann_id = 0
    dataset = {"images": [], "categories": [], "annotations": []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets["image_id"].item()
        dataset["images"].append({
            "id": image_id,
            "height": img.shape[-2],
            "width": img.shape[-1]
        })
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets["labels"].tolist()
        areas = targets["area"].tolist()
        iscrowd = targets["iscrowd"].tolist()
        if "masks" in targets:
            masks = targets["masks"]
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if "keypoints" in targets:
            keypoints = targets["keypoints"]
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = CocoPolyAnnotation(
                image_id=image_id,
                bbox=bboxes[i],
                category_id=labels[i],
                area=areas[i],
                iscrowd=iscrowd[i],
                id=ann_id,
                segmentation=None,
                keypoints=None,
                num_keypoints=None,
            )
            categories.add(labels[i])
            if "masks" in targets:
                ann.segmentation = mask.encode(masks[i].numpy())
            if "keypoints" in targets:
                ann.keypoints = keypoints[i]
                ann.num_keypoints = sum(k != 0 for k in keypoints[i][2::3])
            dataset["annotations"].append(ann)
            ann_id += 1
    dataset["categories"] = [{"id": i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #5
0
ファイル: mAP.py プロジェクト: Khoacannotcode/VinAI_2020
    def evaluate(self, json_file_pred, n_imgs = -1, iou_thres=0.4):
        self.predictions["annotations"] = json_file_pred['annotations']

        coco_ds = COCO()
        coco_ds.dataset = self.annotations
        coco_ds.createIndex()
        
        coco_dt = COCO()
        coco_dt.dataset = self.predictions
        coco_dt.createIndex()
        
        imgIds = sorted(coco_ds.getImgIds())
        
        if n_imgs > 0:
            imgIds = np.random.choice(imgIds, n_imgs)

        cocoEval = COCOeval(coco_ds, coco_dt, 'bbox')
        cocoEval.params.imgIds  = imgIds
        cocoEval.params.useCats = True
        cocoEval.params.iouType = "bbox"
        cocoEval.params.iouThrs = np.array([iou_thres])

        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        
        return cocoEval
コード例 #6
0
def split_coco(coco, size=10000):
    images = coco.dataset["images"]
    annotations = coco.dataset["annotations"]
    categories = coco.dataset["categories"]

    random.seed(4)
    random.shuffle(images)
    num = math.ceil(len(images) / size)

    cocos = []
    for i in range(num):
        imgs = []
        anns = []
        for img in images[i * size:(i + 1) * size]:
            imgs.append(img)

            annIds = coco.getAnnIds(imgIds=[img["id"]])
            anns.extend(coco.loadAnns(annIds))

        c = COCO()
        c.dataset["images"] = imgs
        c.dataset["annotations"] = anns
        c.dataset["categories"] = categories
        c.createIndex()
        cocos.append(c)
    return cocos
コード例 #7
0
 def coco(self):
     """
     :return: a Coco-like object that we can use to evaluate detection!
     """
     anns = []
     for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
         for cls, box in zip(cls_array.tolist(), box_array.tolist()):
             anns.append({
                 'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
                 'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
                 'category_id': cls,
                 'id': len(anns),
                 'image_id': i,
                 'iscrowd': 0,
             })
     fauxcoco = COCO()
     fauxcoco.dataset = {
         'info': {'description': 'ayy lmao'},
         'images': [{'id': i} for i in range(self.__len__())],
         'categories': [{'supercategory': 'person',
                            'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
         'annotations': anns,
     }
     fauxcoco.createIndex()
     return fauxcoco
コード例 #8
0
    def compute(self):
        for valid_batch_num in range(len(self._keypoints_gt_accumulated)):
            self.append_to_results(
                data=self._keypoints_dt_accumulated[valid_batch_num], gt=False)
            self.append_to_results(
                data=self._keypoints_gt_accumulated[valid_batch_num], gt=True)
            self._image_counter += len(
                self._keypoints_gt_accumulated[valid_batch_num])

        coco_gt = COCO()
        coco_gt.dataset = self._keypoints_gt
        coco_gt.createIndex()

        json_dt_res_file = 'temp.json'

        with open(json_dt_res_file, "w+") as f:
            json.dump(self._keypoints_dt, f)
        coco_dt = coco_gt.loadRes(resFile=json_dt_res_file)

        coco_eval = COCOeval(coco_gt, coco_dt, 'keypoints')
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        stats = coco_eval.stats
        print(stats)
        self.reset_state()

        return stats[0]
コード例 #9
0
ファイル: subcoco_utils.py プロジェクト: bguan/mcbbox
        def toCOCO(rec: dict) -> COCO:
            coco = COCO()
            img_id = int(rec.get('image_id',
                                 0))  # could be tensor, cast to int
            coco.dataset['images'] = [{
                'id': img_id,
                'file_name': f'{img_id:012d}.jpg',
                'width': width,
                'height': height
            }]
            coco.dataset['categories'] = []
            coco.dataset['annotations'] = []
            scores = rec.get('scores', [])
            anno_ids = rec.get('ids', [])
            for bi, b in enumerate(listify(rec['boxes'])):
                x, y, w, h = b
                cat_id = int(rec['labels'][bi])
                anno_id = int(anno_ids[bi]) if bi < len(anno_ids) else 0
                s = float(scores[bi]) if bi < len(scores) else 0.
                coco.dataset["annotations"].append({
                    'id': anno_id,
                    'image_id': img_id,
                    'category_id': cat_id,
                    'bbox': b,
                    'iscrowd': 0,
                    'area': w * h,
                    'score': s
                })
                coco.dataset['categories'].append({'id': cat_id})

            coco.createIndex()
            return coco
コード例 #10
0
ファイル: main.py プロジェクト: codss-ctrl/ddit_python
def visualize(filename, annotations, threshold=0.2):
    for annotation in annotations:
      
        keypoints = np.asarray(annotation['keypoints']).reshape(-1, 3) 
        
        low_confidence = keypoints[:, -1] < threshold
        keypoints[low_confidence, :] = [0, 0, 0]
        annotation['keypoints'] = keypoints.reshape(-1).tolist()

    plt.imshow(mpimg.imread(filename))
    plt.axis('off')
    coco = COCO()
    coco.dataset = {
        "categories": [
            {
                "supercategory": "person",
                "id": 1,
                "name": "person",
                "keypoints": ["nose", "left_eye", "right_eye", "left_ear", "right_ear", "left_shoulder",
                              "right_shoulder", "left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip",
                              "right_hip", "left_knee", "right_knee", "left_ankle", "right_ankle"],
                "skeleton": [[1, 2], [1, 3], [2, 3], [2, 4], [3, 5], [4, 6], [5, 7], [6, 7], [6, 8], [6, 12], [7, 9],
                             [7, 13], [8, 10], [9, 11], [12, 13], [14, 12], [15, 13], [16, 14], [17, 15]]
            }
        ]
    }
    coco.createIndex()
    coco.showAnns(annotations)
    
    plt.savefig('kim02.jpg')
    img = cv2.imread('kim02.jpg', cv2.IMREAD_UNCHANGED)
    cv2.imshow('Result', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #11
0
def convert_to_coco_api(ds, bbox_fmt='voc'):
    """
    """
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        # to coco format: xmin, ymin, w, h
        if bbox_fmt.lower() == "voc":  # xmin, ymin, xmax, ymax
            bboxes[:, 2:] -= bboxes[:, :2]
        elif bbox_fmt.lower() == "yolo":  # xcen, ycen, w, h
            bboxes[:, :2] = bboxes[:, :2] - bboxes[:, 2:] / 2
        elif bbox_fmt.lower() == "coco":
            pass
        else:
            raise ValueError(f"bounding box format {bbox_fmt} not supported!")
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #12
0
def _coco_eval(gts, detections, height, width, labelmap=("car", "pedestrian")):
    """simple helper function wrapping around COCO's Python API
    :params:  gts iterable of numpy boxes for the ground truth
    :params:  detections iterable of numpy boxes for the detections
    :params:  height int
    :params:  width int
    :params:  labelmap iterable of class labels
    """
    categories = [{
        "id": id + 1,
        "name": class_name,
        "supercategory": "none"
    } for id, class_name in enumerate(labelmap)]

    dataset, results = _to_coco_format(gts,
                                       detections,
                                       categories,
                                       height=height,
                                       width=width)

    coco_gt = COCO()
    coco_gt.dataset = dataset
    coco_gt.createIndex()
    coco_pred = coco_gt.loadRes(results)

    coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
    coco_eval.params.imgIds = np.arange(1, len(gts) + 1, dtype=int)
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
コード例 #13
0
def coco_api_from_records(records):
    """ Create pycocotools COCO dataset from records
    """
    coco_ds = COCO()
    coco_ds.dataset = convert_records_to_coco_style(records)
    coco_ds.createIndex()
    return coco_ds
コード例 #14
0
    def evaluate(self):
        """Evaluate the model for one epoch using the provided parameters. 
           Return the epoch's average CIDEr score."""

        # Switch to validation mode
        self.encoder.eval()
        self.decoder.eval()

        cocoRes = COCO()
        anns = []

        # Disable gradient calculation because we are in inference mode
        with torch.no_grad():
            pbar = tqdm(self.val_loader)
            pbar.set_description('evaluating epoch {}'.format(self.epoch));
            for batch in pbar:
                images, img_id = batch[0], batch[3]

                # Move to GPU if CUDA is available
                if torch.cuda.is_available():
                    images = images.cuda()

                # Pass the inputs through the CNN-RNN model
                features = encoder(images).unsqueeze(1)
                for i in range(img_id.size()[0]):
                    slice = features[i].unsqueeze(0)
                    outputs = decoder.sample_beam_search(slice)
                    sentence = self.clean_sentence(outputs[0])
                    id = img_id[i].item()
                    #print('id: {}, cap: {}'.format(id, sentence))
                    anns.append({'image_id': id, 'caption': sentence})
             
        for id, ann in enumerate(anns):
            ann['id'] = id
    
        cocoRes.dataset['annotations'] = anns
        cocoRes.createIndex()

        cocoEval = COCOEvalCap(self.val_loader.coco_dataset.coco, cocoRes)
        imgIds = set([ann['image_id'] for ann in cocoRes.dataset['annotations']])
        cocoEval.params['image_id'] = imgIds
        cocoEval.evaluate()
        cider = cocoEval.eval['CIDEr']
        old_max = 0
        if len(self.cider) > 0:
            old_max = max(self.cider)

        if len(self.cider) < self.epoch:
            self.cider.append(cider)
        else:
            self.cider[self.epoch-1] = cider
        self.save()
        print("DEBUG: self.epoch: {}, self.cider: {}".format(self.epoch, self.cider))
        if cider > old_max:
            print('CIDEr improved: {:.2f} => {:.2f}'.format(old_max, cider))
            self.save_as(os.path.join("./models", "best-model.pkl"))

        return self.cider[self.epoch-1]
コード例 #15
0
ファイル: coco_utils.py プロジェクト: zhangqinghua1008/NuCLS
def convert_to_coco_api(ds, crop_inference_to_fov=False):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    cropper = tvdt.Cropper() if crop_inference_to_fov else None
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]

        if crop_inference_to_fov:
            img, targets, _ = _crop_all_to_fov(
                images=[img], targets=[targets], cropper=cropper)
            img = img[0]
            targets = targets[0]

        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #16
0
def convert_to_coco_api(ds, box_threshold):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets[
            "image_id"]  # ofekp: this used to be `targets["image_id"].item()` but image_id is no longer tensor in IMATDataset
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["bbox"]  # YXYX
        bboxes = bboxes[:, [1, 0, 3, 2]]  # XYXY
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()  # XYWH
        # if box_threshold == None:
        #     labels = targets['labels'].tolist()
        # else:
        # print("labels in coco util")
        # print(targets['cls'])
        labels = (targets['cls']).tolist(
        )  # ofekp:this used to be just 'targets['labels'].tolist()', substracting one since in the dataset we added one for effdet training
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #17
0
ファイル: coco_metric.py プロジェクト: swapnil3597/automl
  def evaluate(self):
    """Evaluates with detections from all images with COCO API.

    Returns:
      coco_metric: float numpy array with shape [12] representing the
        coco-style evaluation metrics.
    """
    if self.filename:
      coco_gt = COCO(self.filename)
    else:
      coco_gt = COCO()
      coco_gt.dataset = self.dataset
      coco_gt.createIndex()

    if self.testdev_dir:
      # Run on test-dev dataset.
      box_result_list = []
      for det in self.detections:
        box_result_list.append({
            'image_id': int(det[0]),
            'category_id': int(det[6]),
            'bbox': np.around(
                det[1:5].astype(np.float64), decimals=2).tolist(),
            'score': float(np.around(det[5], decimals=3)),
        })
      json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
      # Must be in the formst of 'detections_test-dev2017_xxx_results'.
      fname = 'detections_test-dev2017_test_results'
      output_path = os.path.join(self.testdev_dir, fname + '.json')
      logging.info('Writing output json file to: %s', output_path)
      with tf.io.gfile.GFile(output_path, 'w') as fid:
        json.dump(box_result_list, fid)
      return np.array([-1.], dtype=np.float32)
    else:
      # Run on validation dataset.
      detections = np.array(self.detections)
      image_ids = list(set(detections[:, 0]))
      coco_dt = coco_gt.loadRes(detections)
      coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
      coco_eval.params.imgIds = image_ids
      coco_eval.evaluate()
      coco_eval.accumulate()
      coco_eval.summarize()
      coco_metrics = coco_eval.stats

      # Get per_class AP, see pycocotools/cocoeval.py:334
      # TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets
      # Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default
      precision = coco_eval.eval['precision'][:, :, :, 0, -1]
      ap_perclass = [0] * 100  # assumeing at most 100 classes.
      for c in range(precision.shape[-1]):  # iterate over all classes
        precision_c = precision[:, :, c]
        # Only consider values if > -1.
        precision_c = precision_c[precision_c > -1]
        ap_c = np.mean(precision_c) if precision_c.size else -1.
        ap_perclass[c] = ap_c
      coco_metrics = np.concatenate((coco_metrics, ap_perclass))
      return np.array(coco_metrics, dtype=np.float32)
コード例 #18
0
def convert_to_coco_api(dataloader):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    dataloader = torch.utils.data.DataLoader(
        dataloader.dataset,
        batch_size=1,
        shuffle=False,
        num_workers=dataloader.num_workers,
        collate_fn=dataloader.collate_fn)
    for img, targets in dataloader:
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img = img[0]
        targets = targets[0]
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #19
0
def convert_to_coco_api(ds):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()
    for img_idx in tqdm.tqdm(range(len(ds)),
                             desc="Loading groundtruths for evaluation"):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]

        # DEBUG: if there is at most one key (only `image_id`) then there are
        # no annotations
        if len(targets) <= 1:
            continue

        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
    dataset['categories'] = [{'id': i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #20
0
def convert_to_coco_api(ds):
    coco_ds = COCO()
    ann_id = 0
    dataset = {'images': [], 'categories': [], 'annotations': []}
    categories = set()

    print("Converting to COCO API")
    for img_idx in tqdm.trange(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds[img_idx]
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict['id'] = image_id
        img_dict['height'] = img.shape[-2]
        img_dict['width'] = img.shape[-1]
        dataset['images'].append(img_dict)
        bboxes = targets["boxes"]
        if bboxes.nelement() != 0:
            bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets['labels'].tolist()
        areas = targets['area'].tolist()
        iscrowd = targets['iscrowd'].tolist()
        if 'masks' in targets:
            masks = targets['masks']
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if 'keypoints' in targets:
            keypoints = targets['keypoints']
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        if len(bboxes) == 1 and len(bboxes[0]) == 0:
            num_objs = 0
        else:
            num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann['image_id'] = image_id
            ann['bbox'] = bboxes[i]
            ann['category_id'] = labels[i]
            categories.add(labels[i])
            ann['area'] = areas[i]
            ann['iscrowd'] = iscrowd[i]
            ann['id'] = ann_id
            if 'masks' in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if 'keypoints' in targets:
                ann['keypoints'] = keypoints[i]
                ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
            dataset['annotations'].append(ann)
            ann_id += 1
        dataset['categories'] = [{'id': i} for i in sorted(categories)]

    coco_ds.dataset = dataset
    coco_ds.createIndex()

    return coco_ds
コード例 #21
0
ファイル: eval_main.py プロジェクト: Linyou/STDet
def evaluate_curve(annFile, resFile, annType, score_thr=0.2, split=10):
    cocoGt = COCO(annFile)
    cocoDt = cocoGt.loadRes(resFile)
    anns = cocoDt.dataset['annotations']
    anns = [ann for ann in anns if ann['score'] >= score_thr]

    # cocoDt = cocoGt.loadRes(anns)
    # cocoEval = COCOeval(cocoGt,cocoDt,annType)
    # cocoEval.evaluate()
    # cocoEval.accumulate()
    # cocoEval.summarize()
    # for i in range(0, 100, 2):
    #     pr = np.mean(cocoEval.eval['precision'][0,i,:,0,2])
    #     print('ACC: {:.3f}, RECALL: {:.3f}'.format(pr, (i + 1) / 100.0))
    # prs = [np.mean(cocoEval.eval['precision'][0,i,:,0,2]) for i in range(0, 101)]
    # x = np.arange(0.0, 1.01, 0.01)
    # plt.xlabel('recall')
    # plt.ylabel('precision')
    # plt.xlim(0, 1.0)
    # plt.ylim(0, 1.01)
    # plt.grid(True)
    # plt.plot(x, prs)
    # plt.show()

    anns = sorted(anns, key=lambda ann: -ann['score'])
    n = int(len(anns) / split) # chunk length
    anns_chunks = [anns[0:i + n] for i in range(0, len(anns), n)]
    num_gt = len(cocoGt.anns)
    output = []
    for chunk in anns_chunks:
        tmp_json_dict = {'images': copy.deepcopy(cocoGt.dataset['images']),
                         'categories':  copy.deepcopy(cocoGt.dataset['categories']),
                         'annotations': chunk}
        with open('tmp.json', 'w') as outfile:
            json.dump(tmp_json_dict, outfile)
        cocoDt = COCO('tmp.json')
        cocoDt.dataset['annotations'] = chunk
        cocoDt.createIndex()
        cocoEval = COCOeval(cocoGt,cocoDt,annType)
        cocoEval.evaluate()

        num_hit = 0
        K0 = len(cocoEval.params.catIds)
        A0 = len(cocoEval._paramsEval.areaRng)
        I0 = len(cocoEval._paramsEval.imgIds)
        res_list = [cocoEval.evalImgs[k * A0 * I0 + a * I0 + i] 
                    for i in range(0, I0)
                    for a in [1, 2, 3]
                    for k in range(0, K0)]
        res_list = [res for res in res_list if not res is None]
        for res in res_list:
            num_hit += np.count_nonzero(res['gtMatches'][0]) # .5 IOU hits
        num_hit /= 3
        output.append((num_hit / len(chunk), num_hit / num_gt))
    for out in output:
        print('ACC: {:.3f}, RECALL: {:.3f}'.format(out[0], out[1]))
コード例 #22
0
def convert_to_coco_api(ds):
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {"images": [], "categories": [], "annotations": []}
    categories = set()
    for img_idx in range(len(ds)):
        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets = ds.get_in_coco_format(img_idx)
        image_id = targets["image_id"].item()
        img_dict = {}
        img_dict["id"] = image_id
        height, width = targets["orig_size"].tolist()
        img_dict["height"] = height
        img_dict["width"] = width
        dataset["images"].append(img_dict)
        bboxes = targets["boxes"]
        # the boxes are in 0-1 format, in cxcywh format
        # let's convert it into the format expected by COCO api
        bboxes = box_cxcywh_to_xyxy(bboxes)
        bboxes = bboxes * torch.tensor([width, height, width, height],
                                       dtype=torch.float32)
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets["labels"].tolist()
        areas = targets["area"].tolist()
        iscrowd = targets["iscrowd"].tolist()
        if "masks" in targets:
            masks = targets["masks"]
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if "keypoints" in targets:
            keypoints = targets["keypoints"]
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann["image_id"] = image_id
            ann["bbox"] = bboxes[i]
            ann["category_id"] = labels[i]
            categories.add(labels[i])
            ann["area"] = areas[i]
            ann["iscrowd"] = iscrowd[i]
            ann["id"] = ann_id
            if "masks" in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if "keypoints" in targets:
                ann["keypoints"] = keypoints[i]
                ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
            dataset["annotations"].append(ann)
            ann_id += 1
    dataset["categories"] = [{"id": i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #23
0
    def create_coco_from_dataset(self, dataset):
        coco = COCO()
        coco_dataset = {}
        coco_dataset['annotations'] = self.create_annotations(dataset)
        coco_dataset['images'] = self.create_images()
        coco_dataset['categories'] = self.create_categories()
        coco.dataset = coco_dataset
        coco.createIndex()

        return coco
コード例 #24
0
 def _build_coco_dataset(self, coco_annotations, coco_categories, coco_images):
     self._populate_id(coco_annotations)
     coco_gt = COCO()
     coco_gt.dataset = {
         "annotations": coco_annotations,
         "categories": coco_categories,
         "images": coco_images
     }
     coco_gt.createIndex()
     return coco_gt
コード例 #25
0
def _create_coco(categories, images, annotations):
    coco = COCO()
    coco.dataset = {
        'categories': categories,
        'images': images,
        'annotations': annotations
    }
    with NoStdout():
        coco.createIndex()
    return coco
コード例 #26
0
ファイル: extractor.py プロジェクト: Abhishek-katta/Cvat
    def _make_subset_loader(path):
        # COCO API has an 'unclosed file' warning
        coco_api = COCO()
        with open(path, 'r') as f:
            import json
            dataset = json.load(f)

        coco_api.dataset = dataset
        coco_api.createIndex()
        return coco_api
コード例 #27
0
ファイル: results_get.py プロジェクト: jefequien/LabelMeTools
def compile_results(results):
    post_process(results)

    coco = COCO()
    for result in results:
        coco_result = COCO()
        coco_result.dataset = result
        coco_result.createIndex()
        coco = merge_cocos(coco, coco_result)
    return coco
コード例 #28
0
    def coco(self):
        """
    :return: a Coco-like object that we can use to evaluate detection!
    """
        anns = []

        for index in range(self.__len__()):
            image_id = self.ids[index]
            # H, W = self.image_size
            objs, boxes, masks = [], [], []
            for object_data in self.image_id_to_objects[image_id]:
                objs.append(object_data['category_id'])
                x, y, w, h = object_data['bbox']
                x0 = x
                y0 = y
                x1 = (x + w)
                y1 = (y + h)
                boxes.append([x0, y0, x1, y1])

        # for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
        #   for cls, box in zip(cls_array.tolist(), box_array.tolist()):
            for cls, box in zip(objs, boxes):
                anns.append({
                    'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
                    'bbox':
                    [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
                    'category_id':
                    cls,
                    'id':
                    len(anns),
                    # 'image_id': i,
                    'image_id':
                    image_id,
                    'iscrowd':
                    0,
                })
        fauxcoco = COCO()
        fauxcoco.dataset = {
            'info': {
                'description': 'ayy lmao'
            },
            'images': [{
                'id': i
            } for i in range(self.__len__())],
            'categories': [{
                'supercategory': 'person',
                'id': i,
                'name': name
            } for i, name in enumerate(self.ind_to_classes)
                           if name != '__image__'],
            'annotations':
            anns,
        }
        fauxcoco.createIndex()
        return fauxcoco
コード例 #29
0
def convert_to_coco_api(ds):
    """
    Adapted from:
    https://github.com/pytorch/vision/blob/main/references/detection/coco_utils.py
    """
    coco_ds = COCO()
    # annotation IDs need to start at 1, not 0, see torchvision issue #1530
    ann_id = 1
    dataset = {"images": [], "categories": [], "annotations": []}
    categories = set()
    for img_idx in range(len(ds)):
        img_dict = {}

        # find better way to get target
        # targets = ds.get_annotations(img_idx)
        img, targets, *_ = ds[img_idx]
        img_dict["height"] = img.shape[-2]
        img_dict["width"] = img.shape[-1]
        image_id = targets["image_id"].item()
        img_dict["id"] = image_id
        dataset["images"].append(img_dict)
        bboxes = targets["boxes"].clone()
        bboxes[:, 2:] -= bboxes[:, :2]
        bboxes = bboxes.tolist()
        labels = targets["labels"].tolist()
        areas = targets["area"].tolist()
        iscrowd = targets["iscrowd"].tolist()
        if "masks" in targets:
            masks = targets["masks"]
            # make masks Fortran contiguous for coco_mask
            masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
        if "keypoints" in targets:
            keypoints = targets["keypoints"]
            keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
        num_objs = len(bboxes)
        for i in range(num_objs):
            ann = {}
            ann["image_id"] = image_id
            ann["bbox"] = bboxes[i]
            ann["category_id"] = labels[i]
            categories.add(labels[i])
            ann["area"] = areas[i]
            ann["iscrowd"] = iscrowd[i]
            ann["id"] = ann_id
            if "masks" in targets:
                ann["segmentation"] = coco_mask.encode(masks[i].numpy())
            if "keypoints" in targets:
                ann["keypoints"] = keypoints[i]
                ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
            dataset["annotations"].append(ann)
            ann_id += 1
    dataset["categories"] = [{"id": i} for i in sorted(categories)]
    coco_ds.dataset = dataset
    coco_ds.createIndex()
    return coco_ds
コード例 #30
0
    def add_transplanted_dataset(self, dataset_dir, class_map=None):

        # Load dataset
        ann_filepath = os.path.join(dataset_dir, 'annotations.json')
        dataset = json.load(open(ann_filepath, 'r'))

        # Map dataset classes
        self.replace_dataset_classes(dataset, class_map)

        taco_alla_coco = COCO()
        taco_alla_coco.dataset = dataset
        taco_alla_coco.createIndex()

        class_ids = sorted(taco_alla_coco.getCatIds())

        # Select images by class
        # Add images
        image_ids = []
        background_id = -1
        for i in class_ids:
            class_name = taco_alla_coco.loadCats(i)[0]["name"]
            if class_name != 'Background':
                image_ids.extend(list(taco_alla_coco.getImgIds(catIds=i)))
                # TODO: Select how many
            else:
                background_id = i
        image_ids = list(set(image_ids))

        if background_id > -1:
            class_ids.remove(background_id)

        # Retrieve list of training image ids
        train_image_ids = [x['id'] for x in self.image_info]

        nr_train_images_so_far = len(train_image_ids)

        # Add images
        transplant_counter = 0
        for i in image_ids:
            if taco_alla_coco.imgs[i]['source_id'] in train_image_ids:
                transplant_counter += 1
                self.add_image("taco",
                               image_id=i + nr_train_images_so_far,
                               path=os.path.join(
                                   dataset_dir,
                                   taco_alla_coco.imgs[i]['file_name']),
                               width=taco_alla_coco.imgs[i]["width"],
                               height=taco_alla_coco.imgs[i]["height"],
                               annotations=taco_alla_coco.loadAnns(
                                   taco_alla_coco.getAnnIds(imgIds=[i],
                                                            catIds=class_ids,
                                                            iscrowd=None)))

        print('Number of transplanted images added: ', transplant_counter, '/',
              len(image_ids))
コード例 #31
0
ファイル: main.py プロジェクト: tony32769/mask_rcnn_pytorch
def validate(val_loader, model, i, silence=False):
    batch_time = AverageMeter()
    coco_gt = val_loader.dataset.coco
    coco_pred = COCO()
    coco_pred.dataset['images'] = [img for img in coco_gt.datasets['images']]
    coco_pred.dataset['categories'] = copy.deepcopy(coco_gt.dataset['categories'])
    id = 0

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (inputs, anns) in enumerate(val_loader):

        # forward images one by one (TODO: support batch mode later, or
        # multiprocess)
        for j, input in enumerate(inputs):
            input_anns= anns[j] # anns of this input
            gt_bbox= np.vstack([ann['bbox'] + [ann['ordered_id']] for ann in input_anns])
            im_info= [[input.size(1), input.size(2),
                        input_anns[0]['scale_ratio']]]
            input_var= Variable(input.unsqueeze(0),
                                 requires_grad=False).cuda()

            cls_prob, bbox_pred, rois = model(input_var, im_info)
            scores, pred_boxes = model.interpret_outputs(cls_prob, bbox_pred, rois, im_info)
            print(scores, pred_boxes)
            # for i in range(scores.shape[0]):


        # measure elapsed time
        batch_time.update(time.time() - end)
        end= time.time()

    coco_pred.createIndex()
    coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
    coco_eval.params.imgIds= sorted(coco_gt.getImgIds())
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()

    print('iter: [{0}] '
          'Time {batch_time.avg:.3f} '
          'Val Stats: {1}'
          .format(i, coco_eval.stats,
                  batch_time=batch_time))

    return coco_eval.stats[0]
コード例 #32
0
ファイル: salicon.py プロジェクト: caomw/salicon-api
 def createIndex(self):
     """
     Didn't change the original method, just call super
     """
     return COCO.createIndex(self)