Example #1
0
class EvalCoco(object):
    def __init__(self,
                 coco,
                 processor,
                 annotations_inverse,
                 *,
                 max_per_image=20,
                 small_threshold=0.0):
        self.coco = coco
        self.processor = processor
        self.annotations_inverse = annotations_inverse
        self.max_per_image = max_per_image
        self.small_threshold = small_threshold

        self.predictions = []
        self.image_ids = []
        self.eval = None
        self.decoder_time = 0.0

    def stats(self, predictions=None, image_ids=None):
        # from pycocotools.cocoeval import COCOeval
        if predictions is None:
            predictions = self.predictions
        if image_ids is None:
            image_ids = self.image_ids

        cat_ids = self.coco.getCatIds(catNms=['person'])
        print('cat_ids', cat_ids)

        coco_eval = self.coco.loadRes(predictions)

        self.eval = COCOeval(self.coco, coco_eval, iouType='keypoints')
        self.eval.params.catIds = cat_ids

        if image_ids is not None:
            print('image ids', image_ids)
            self.eval.params.imgIds = image_ids
        self.eval.evaluate()
        self.eval.accumulate()
        self.eval.summarize()
        return self.eval.stats

    @staticmethod
    def view_keypoints(image_cpu, annotations, gt):
        highlight = [5, 7, 9, 11, 13, 15]
        keypoint_painter = show.KeypointPainter(highlight=highlight)
        skeleton_painter = show.KeypointPainter(show_box=False,
                                                color_connections=True,
                                                markersize=1,
                                                linewidth=6)

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            keypoint_painter.annotations(
                ax, [ann for ann in annotations if ann.score() > 0.01])

        with show.canvas() as ax:
            ax.set_axis_off()
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            skeleton_painter.annotations(
                ax, [ann for ann in annotations if ann.score() > 0.01])

        instances_gt = None
        if gt:
            instances_gt = np.stack([a['keypoints'] for a in gt])

            # for test: overwrite prediction with true values
            # instances = instances_gt.copy()[:1]

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            keypoint_painter.keypoints(ax,
                                       instances_gt,
                                       skeleton=COCO_PERSON_SKELETON)

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            show.white_screen(ax)
            keypoint_painter.keypoints(ax,
                                       instances_gt,
                                       color='lightgrey',
                                       skeleton=COCO_PERSON_SKELETON)
            keypoint_painter.annotations(
                ax, [ann for ann in annotations if ann.score() > 0.01])

    def from_predictions(self,
                         predictions,
                         meta,
                         debug=False,
                         gt=None,
                         image_cpu=None,
                         verbose=False,
                         category_id=1):
        image_id = int(meta['image_id'])
        self.image_ids.append(image_id)

        if debug:
            self.view_keypoints(image_cpu, predictions, gt)

        predictions = self.annotations_inverse(predictions, meta)
        if self.small_threshold:
            predictions = [
                pred for pred in predictions
                if pred.scale(v_th=0.01) >= self.small_threshold
            ]
        if len(predictions) > self.max_per_image:
            predictions = predictions[:self.max_per_image]
        image_annotations = []
        for pred in predictions:
            # avoid visible keypoints becoming invisible due to rounding
            v_mask = pred.data[:, 2] > 0.0
            pred.data[v_mask, 2] = np.maximum(0.01, pred.data[v_mask, 2])

            keypoints = np.around(pred.data, 2)
            keypoints[:, 2] = 2.0
            image_annotations.append({
                'image_id':
                image_id,
                'category_id':
                category_id,
                'keypoints':
                keypoints.reshape(-1).tolist(),
                'score':
                max(0.01, pred.score()),
            })

        # force at least one annotation per image (for pycocotools)
        if not image_annotations:
            image_annotations.append({
                'image_id': image_id,
                'category_id': category_id,
                'keypoints': np.zeros((17 * 3, )).tolist(),
                'score': 0.01,
            })

        if debug:
            self.stats(image_annotations, [image_id])
            if verbose:
                print('detected', image_annotations, len(image_annotations))
                oks = self.eval.computeOks(image_id, category_id)
                oks[oks < 0.5] = 0.0
                print('oks', oks)
                print(
                    'evaluate',
                    self.eval.evaluateImg(image_id, category_id, (0, 1e5**2),
                                          20))
            print(meta)

        self.predictions += image_annotations

    def write_predictions(self, filename):
        predictions = [{
            k: v
            for k, v in annotation.items()
            if k in ('image_id', 'category_id', 'keypoints', 'score')
        } for annotation in self.predictions]
        with open(filename + '.pred.json', 'w') as f:
            json.dump(predictions, f)
        print('wrote {}'.format(filename + '.pred.json'))
        with zipfile.ZipFile(filename + '.zip', 'w') as myzip:
            myzip.write(filename + '.pred.json', arcname='predictions.json')
        print('wrote {}'.format(filename + '.zip'))
class EvalCoco(object):
    def __init__(self, coco, processor, keypoint_sets_inverse, skeleton=None):
        self.coco = coco
        self.processor = processor
        self.keypoint_sets_inverse = keypoint_sets_inverse
        self.skeleton = skeleton or COCO_PERSON_SKELETON

        self.predictions = []
        self.image_ids = []
        self.eval = None
        self.decoder_time = 0.0

    def stats(self, predictions=None, image_ids=None):
        # from pycocotools.cocoeval import COCOeval
        if predictions is None:
            predictions = self.predictions
        if image_ids is None:
            image_ids = self.image_ids

        cat_ids = self.coco.getCatIds(catNms=['person'])
        print('cat_ids', cat_ids)

        coco_eval = self.coco.loadRes(predictions)

        self.eval = COCOeval(self.coco, coco_eval, iouType='keypoints')
        self.eval.params.catIds = cat_ids

        if image_ids is not None:
            print('image ids', image_ids)
            self.eval.params.imgIds = image_ids
        self.eval.evaluate()
        self.eval.accumulate()
        self.eval.summarize()
        return self.eval.stats

    def view_keypoints(self, image_cpu, annotations, gt):
        highlight = [5, 7, 9, 11, 13, 15]
        keypoint_painter = show.KeypointPainter(skeleton=self.skeleton, highlight=highlight)
        skeleton_painter = show.KeypointPainter(skeleton=self.skeleton,
                                                show_box=False, color_connections=True,
                                                markersize=1, linewidth=6)

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            keypoint_painter.annotations(ax, annotations)

        with show.canvas() as ax:
            ax.set_axis_off()
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            skeleton_painter.annotations(ax, [ann for ann in annotations if ann.score() > 0.1])

        instances_gt = None
        if gt:
            instances_gt = np.stack([a['keypoints'] for a in gt])

            # for test: overwrite prediction with true values
            # instances = instances_gt.copy()[:1]

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            keypoint_painter.keypoints(ax, instances_gt)

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            show.white_screen(ax)
            keypoint_painter.keypoints(ax, instances_gt, color='lightgrey')
            keypoint_painter.annotations(ax, [ann for ann in annotations if ann.score() > 0.01])

    def from_fields(self, fields, meta,
                    debug=False, gt=None, image_cpu=None, verbose=False,
                    category_id=1):
        if image_cpu is not None:
            self.processor.set_cpu_image(None, image_cpu)

        start = time.time()
        annotations = self.processor.annotations(fields, meta)[:20]
        self.decoder_time += time.time() - start

        if isinstance(meta, (list, tuple)):
            meta = meta[0]

        image_id = int(meta['image_id'])
        self.image_ids.append(image_id)

        if debug:
            self.view_keypoints(image_cpu, annotations, gt)

        instances, scores = self.processor.keypoint_sets_from_annotations(annotations)
        instances = self.keypoint_sets_inverse(instances, meta)
        image_annotations = []
        for instance, score in zip(instances, scores):
            keypoints = np.around(instance, 2)
            keypoints[:, 2] = 2.0
            image_annotations.append({
                'image_id': image_id,
                'category_id': category_id,
                'keypoints': keypoints.reshape(-1).tolist(),
                'score': score,
            })

        # force at least one annotation per image (for pycocotools)
        if not image_annotations:
            image_annotations.append({
                'image_id': image_id,
                'category_id': category_id,
                'keypoints': np.zeros((17*3,)).tolist(),
                'score': 0.0,
            })

        if debug:
            self.stats(image_annotations, [image_id])
            if verbose:
                print('detected', image_annotations, len(image_annotations))
                oks = self.eval.computeOks(image_id, category_id)
                oks[oks < 0.5] = 0.0
                print('oks', oks)
                print('evaluate', self.eval.evaluateImg(image_id, category_id, (0, 1e5 ** 2), 20))
            print(meta)

        self.predictions += image_annotations

    def write_predictions(self, filename):
        predictions = [
            {k: v for k, v in annotation.items()
             if k in ('image_id', 'category_id', 'keypoints', 'score')}
            for annotation in self.predictions
        ]
        with open(filename + '.json', 'w') as f:
            json.dump(predictions, f)
        print('wrote {}'.format(filename + '.json'))
        with zipfile.ZipFile(filename + '.zip', 'w') as myzip:
            myzip.write(filename + '.json', arcname='predictions.json')
        print('wrote {}'.format(filename + '.zip'))
Example #3
0
class MapEvaluator:
    def __init__(self, gt_json, cat_ids):
        # Eval options
        self.iou_threshs = [.5]
        self.max_dets = [1, 10, 100]

        self.cat_ids = cat_ids

        self.coco_gt = COCO(gt_json)

    def _init_coco_eval(self, dt_json_data):
        coco_dt = self.coco_gt.loadRes(dt_json_data)

        self.coco_eval = COCOeval(self.coco_gt, coco_dt, 'bbox')
        self.coco_eval.params.iouThrs = np.array(self.iou_threshs)
        self.coco_eval.params.maxDets = self.max_dets
        self.coco_eval.params.catIds = self.cat_ids

    def do_eval(self, dt_json_data):
        self._init_coco_eval(dt_json_data)

        self.coco_eval.evaluate()
        self.coco_eval.accumulate()

    def do_eval_and_print(self, dt_json_data):
        self._init_coco_eval(dt_json_data)

        self.coco_eval.evaluate()
        self.coco_eval.accumulate()
        self.coco_eval.summarize()

    def get_avg_precision_recall(self, t=0, a=0, m=-1):
        p = self.coco_eval.eval['precision'][t, :, :, a, m]
        r = self.coco_eval.eval['recall'][t, :, a, m]

        # Removing -1 entries for classes that have no GT objects.
        p = p[:, np.where(r > -1)[0]]
        r = r[r > -1]
        assert (np.all(p >= 0.) and np.all(r >= 0.))

        ap = p.mean(axis=1)
        ar = r.mean()

        return ap, ar

    def evaluateImg(self, imgId, catId, aRng=(-np.inf, np.inf), maxDet=100):
        p = self.coco_eval.params
        # add backward compatibility if useSegm is specified in params
        p.iouType = 'bbox'
        p.imgIds = list(np.unique(p.imgIds))
        if p.useCats:
            p.catIds = list(np.unique(p.catIds))

        p.maxDets = sorted(p.maxDets)
        self.coco_eval.params = p

        self.coco_eval._prepare()

        self.coco_eval.ious = {
            (imgId, catId): self.coco_eval.computeIoU(imgId, catId)
        }
        img_eval = self.coco_eval.evaluateImg(imgId, catId, aRng, maxDet)
        inds = img_eval['dtIds']
        matches = img_eval['dtMatches']

        return inds, matches