Example #1
0
 def _do_python_eval(self, _coco):
     coco_dt = _coco.loadRes(self._result_file)
     coco_eval = COCOeval(_coco, coco_dt)
     coco_eval.params.useSegm = False
     coco_eval.evaluate()
     coco_eval.accumulate()
     self._print_detection_metrics(coco_eval)
def evaluate():
    cocoGt = COCO('annotations.json')
    cocoDt = cocoGt.loadRes('detections.json')
    cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Example #3
0
def coco_evaluate(json_dataset, res_file, image_ids):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval
def _do_segmentation_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
Example #6
0
def cocoval(detected_json):
    eval_json = config.eval_json
    eval_gt = COCO(eval_json)

    eval_dt = eval_gt.loadRes(detected_json)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')

    # cocoEval.params.imgIds = eval_gt.getImgIds()
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Example #7
0
 def _do_coco_eval(self, dtFile, output_dir):
     """
     Evaluate using COCO API
     """
     if self._image_set == 'train' or self._image_set == 'val':
         cocoGt = self._coco[0]
         cocoDt = COCO(dtFile)
         E = COCOeval(cocoGt, cocoDt)
         E.evaluate()
         E.accumulate()
         E.summarize()
Example #8
0
 def evaluate_detections(self, all_boxes, output_dir=None):
     resFile = self._write_coco_results_file(all_boxes)
     cocoGt = self._annotations
     cocoDt = cocoGt.loadRes(resFile)
     # running evaluation
     cocoEval = COCOeval(cocoGt,cocoDt)
     # useSegm should default to 0
     #cocoEval.params.useSegm = 0
     cocoEval.evaluate()
     cocoEval.accumulate()
     cocoEval.summarize()
Example #9
0
    def compute_ap(self):
        coco_res = self.loader.coco.loadRes(self.filename)

        cocoEval = COCOeval(self.loader.coco, coco_res)
        cocoEval.params.imgIds = self.loader.get_filenames()
        cocoEval.params.useSegm = False

        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        return cocoEval
Example #10
0
 def _do_detection_eval(self, res_file, output_dir):
   ann_type = 'bbox'
   coco_dt = self._COCO.loadRes(res_file)
   coco_eval = COCOeval(self._COCO, coco_dt)
   coco_eval.params.useSegm = (ann_type == 'segm')
   coco_eval.evaluate()
   coco_eval.accumulate()
   self._print_detection_eval_metrics(coco_eval)
   eval_file = osp.join(output_dir, 'detection_results.pkl')
   with open(eval_file, 'wb') as fid:
     pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
   print('Wrote COCO eval results to: {}'.format(eval_file))
def _do_keypoint_eval(json_dataset, res_file, output_dir):
    ann_type = 'keypoints'
    imgIds = json_dataset.COCO.getImgIds()
    imgIds.sort()
    coco_dt = json_dataset.COCO.loadRes(res_file)
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
    coco_eval.params.imgIds = imgIds
    coco_eval.evaluate()
    coco_eval.accumulate()
    eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    coco_eval.summarize()
Example #12
0
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    limit: if not 0, it's the number of images to use for evaluation
    """
    # Pick COCO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]

    # Get corresponding COCO image IDs.
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()

    results = []
    for i, image_id in enumerate(image_ids):
        # Load image
        image = dataset.load_image(image_id)

        # Run detection
        t = time.time()
        r = model.detect([image], verbose=0)[0]
        t_prediction += (time.time() - t)

        # Convert results to COCO format
        # Cast masks to uint8 because COCO tools errors out on bool
        image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"],
                                           r["scores"],
                                           r["masks"].astype(np.uint8))
        results.extend(image_results)

    # Load results. This modifies results with additional attributes.
    coco_results = coco.loadRes(results)

    # Evaluate
    cocoEval = COCOeval(coco, coco_results, eval_type)
    cocoEval.params.imgIds = coco_image_ids
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
Example #13
0
def evaluate_coco(model, dataset, coco, config, eval_type="bbox", limit=None, image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    """
    # Pick COCO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]
        
    # Get corresponding COCO image IDs.
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()

    results = []
    for i, image_id in enumerate(image_ids):
        if i%10==0:
            print('Processed %d images'%i )
        # Load image
        image = dataset.load_image(image_id)
        # Run detection
        t = time.time()
        r = inference(image, model, config)
        t_prediction += (time.time() - t)

        # Convert results to COCO format
        image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"],
                                           r["scores"], r["masks"])
        results.extend(image_results)

    # Load results. This modifies results with additional attributes.
    coco_results = coco.loadRes(results)

    # Evaluate
    cocoEval = COCOeval(coco, coco_results, eval_type)
    cocoEval.params.imgIds = coco_image_ids
    # Only evaluate for person.
    cocoEval.params.catIds = coco.getCatIds(catNms=['person']) 
    cocoEval.evaluate()
    a=cocoEval.accumulate()
    b=cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
Example #14
0
def validate(val_loader, model, i, silence=False):
    batch_time = AverageMeter()
    coco_gt = val_loader.dataset.coco
    coco_pred = COCO()
    coco_pred.dataset['images'] = [img for img in coco_gt.datasets['images']]
    coco_pred.dataset['categories'] = copy.deepcopy(coco_gt.dataset['categories'])
    id = 0

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (inputs, anns) in enumerate(val_loader):

        # forward images one by one (TODO: support batch mode later, or
        # multiprocess)
        for j, input in enumerate(inputs):
            input_anns= anns[j] # anns of this input
            gt_bbox= np.vstack([ann['bbox'] + [ann['ordered_id']] for ann in input_anns])
            im_info= [[input.size(1), input.size(2),
                        input_anns[0]['scale_ratio']]]
            input_var= Variable(input.unsqueeze(0),
                                 requires_grad=False).cuda()

            cls_prob, bbox_pred, rois = model(input_var, im_info)
            scores, pred_boxes = model.interpret_outputs(cls_prob, bbox_pred, rois, im_info)
            print(scores, pred_boxes)
            # for i in range(scores.shape[0]):


        # measure elapsed time
        batch_time.update(time.time() - end)
        end= time.time()

    coco_pred.createIndex()
    coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
    coco_eval.params.imgIds= sorted(coco_gt.getImgIds())
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()

    print('iter: [{0}] '
          'Time {batch_time.avg:.3f} '
          'Val Stats: {1}'
          .format(i, coco_eval.stats,
                  batch_time=batch_time))

    return coco_eval.stats[0]
def _do_eval(res_file, output_dir,_COCO,classes):
## The function is borrowed from https://github.com/rbgirshick/fast-rcnn/ and changed
        ann_type = 'bbox'
        coco_dt = _COCO.loadRes(res_file)
        coco_eval = COCOeval(_COCO, coco_dt)
        coco_eval.params.useSegm = (ann_type == 'segm')
        coco_eval.evaluate()
        coco_eval.accumulate()
        _print_eval_metrics(coco_eval,classes)
        # Write the result file
        eval_file = osp.join(output_dir)
        eval_result = {}
        eval_result['precision'] = coco_eval.eval['precision']
        eval_result['recall'] = coco_eval.eval['recall']
        sio.savemat(eval_file,eval_result)
        print 'Wrote COCO eval results to: {}'.format(eval_file)
Example #16
0
def evaluate_predictions_on_coco(
    coco_gt, coco_results, json_result_file, iou_type="bbox"
):
    import json

    with open(json_result_file, "w") as f:
        json.dump(coco_results, f)

    from pycocotools.cocoeval import COCOeval

    coco_dt = coco_gt.loadRes(str(json_result_file))
    # coco_dt = coco_gt.loadRes(coco_results)
    coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval
def calc_coco_metrics(coco_annotations, predictions, classes):
  annotations = ObjectDetectorJson.convert_coco_to_toolbox_format(coco_annotations, classes)
  detections = []
  for annotation, prediction in zip(annotations, predictions):
    width, height = annotation['image_size']
    image_id = annotation['image_id']

    for obj_id, obj in enumerate(prediction):
      label = int(obj[1])
      score = float(obj[2])
      if obj_id != 0 and score == 0:  # At least one prediction must be (COCO API issue)
        continue
      bbox = (obj[3:]).tolist()
      bbox[::2] = [width * i for i in bbox[::2]]
      bbox[1::2] = [height * i for i in bbox[1::2]]

      xmin, ymin, xmax, ymax = bbox
      w_bbox = round(xmax - xmin, 1)
      h_bbox = round(ymax - ymin, 1)
      xmin, ymin = round(xmin, 1), round(ymin, 1)

      coco_det = {}
      coco_det['image_id'] = image_id
      coco_det['category_id'] = label
      coco_det['bbox'] = [xmin, ymin, w_bbox, h_bbox]
      coco_det['score'] = score
      detections.append(coco_det)

  coco_dt = coco_annotations.loadRes(detections)
  img_ids = sorted(coco_annotations.getImgIds())
  coco_eval = COCOeval(coco_annotations, coco_dt, 'bbox')
  coco_eval.params.imgIds = img_ids
  coco_eval.evaluate()
  coco_eval.accumulate()
  coco_eval.summarize()

  metrics = {}
  for metric_name, value in zip(METRICS_NAMES, coco_eval.stats):
    metrics[metric_name] = value

  return metrics
Example #18
0
def print_evaluation_scores(json_file):
    ret = {}
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    ret['mAP(bbox)'] = cocoEval.stats[0]

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        ret['mAP(segm)'] = cocoEval.stats[0]
    return ret
    def _update(self):
        """Use coco to get real scores. """
        if not self._current_id == len(self._img_ids):
            warnings.warn(
                'Recorded {} out of {} validation images, incompelete results'.format(
                    self._current_id, len(self._img_ids)))
        import json
        try:
            with open(self._filename, 'w') as f:
                json.dump(self._results, f)
        except IOError as e:
            raise RuntimeError("Unable to dump json file, ignored. What(): {}".format(str(e)))

        pred = self.dataset.coco.loadRes(self._filename)
        gt = self.dataset.coco
        # lazy import pycocotools
        try_import_pycocotools()
        from pycocotools.cocoeval import COCOeval
        coco_eval = COCOeval(gt, pred, 'bbox')
        coco_eval.evaluate()
        coco_eval.accumulate()
        self._coco_eval = coco_eval
        return coco_eval
Example #20
0
def print_evaluation_scores(json_file):
    ret = {}
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']
    for k in range(6):
        ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        for k in range(6):
            ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
    return ret
Example #21
0
File: coco.py Project: Hrener/mmdet
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=np.arange(0.5, 0.96, 0.05)):
        """Evaluation in COCO protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str: float]
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError('metric {} is not supported'.format(metric))

        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
        print(result_files)
        eval_results = {}
        cocoGt = self.coco
        for metric in metrics:
            msg = 'Evaluating {}...'.format(metric)
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(
                    results, proposal_nums, iou_thrs, logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results['AR@{}'.format(num)] = ar[i]
                    log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError('{} is not in results'.format(metric))
            try:
                cocoDt = cocoGt.loadRes(result_files[metric])
            except IndexError:
                print_log(
                    'The testing results of the whole dataset is empty.',
                    logger=logger,
                    level=logging.ERROR)
                break
            print(cocoDt)
            iou_type = 'bbox' if metric == 'proposal' else metric
            cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
            cocoEval.params.imgIds = self.img_ids
            if metric == 'proposal':
                cocoEval.params.useCats = 0
                cocoEval.params.maxDets = list(proposal_nums)
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                metric_items = [
                    'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000',
                    'AR_l@1000'
                ]
                for i, item in enumerate(metric_items):
                    val = float('{:.3f}'.format(cocoEval.stats[i + 6]))
                    eval_results[item] = val
            else:
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if classwise:  # Compute per-category AP
                    pass  # TODO
                metric_items = [
                    'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                ]
                for i in range(len(metric_items)):
                    key = '{}_{}'.format(metric, metric_items[i])
                    val = float('{:.3f}'.format(cocoEval.stats[i]))
                    eval_results[key] = val
                eval_results['{}_mAP_copypaste'.format(metric)] = (
                    '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
                    '{ap[4]:.3f} {ap[5]:.3f}').format(ap=cocoEval.stats[:6])
        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
Example #22
0
class InstanceEvaluator(object):
	def __init__(self, dataset_json, preds_json):
		# load dataset ground truths
		self.dataset = COCO(dataset_json)
		category_ids = self.dataset.getCatIds()
		categories = [x['name'] for x in self.dataset.loadCats(category_ids)]
		self.category_to_id_map = dict(zip(categories, category_ids))
		self.classes = ['__background__'] + categories
		self.num_classes = len(self.classes)

		# load predictions
		self.preds = self.dataset.loadRes(preds_json)
		self.coco_eval = COCOeval(self.dataset, self.preds, 'segm')
		self.coco_eval.params.maxDets = [1, 50, 255]

	def evaluate(self):
		self.coco_eval.evaluate()
		self.coco_eval.accumulate()

	def _summarize(self, ap=1, iouThr=None, areaRng='all', maxDets=255):
		p = self.coco_eval.params
		iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
		titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
		typeStr = '(AP)' if ap==1 else '(AR)'
		iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
		if iouThr is None else '{:0.2f}'.format(iouThr)

		aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
		mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
		if ap == 1:
			# dimension of precision: [TxRxKxAxM]
			s = self.coco_eval.eval['precision']
			# IoU
			if iouThr is not None:
				t = np.where(iouThr == p.iouThrs)[0]
				s = s[t]
			s = s[:,:,:,aind,mind]
		else:
			# dimension of recall: [TxKxAxM]
			s = self.coco_eval.eval['recall']
			if iouThr is not None:
				t = np.where(iouThr == p.iouThrs)[0]
				s = s[t]
			s = s[:,:,aind,mind]
		if len(s[s>-1])==0:
			mean_s = -1
		else:
		    mean_s = np.mean(s[s>-1])
		print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
		return mean_s

	def summarize(self, IoU_lo_thres=0.5, IoU_hi_thres=0.95):
		def _get_thr_ind(thr):
			ind = np.where((self.coco_eval.params.iouThrs > thr - 1e-5) &
						   (self.coco_eval.params.iouThrs < thr + 1e-5))[0][0]
			iou_thr = self.coco_eval.params.iouThrs[ind]
			assert np.isclose(iou_thr, thr)
			return ind

		ind_lo = _get_thr_ind(IoU_lo_thres)
		ind_hi = _get_thr_ind(IoU_hi_thres)

		# (iou, recall, cls, area, max_dets)
		precision = self.coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
		ap_mean = np.mean(precision[precision > -1])
		print('* MeanAP: {}'.format(ap_mean))

		print('* Performance by class:')
		ap_by_class = []
		for cls_ind, cls_name in enumerate(self.classes):
			if cls_name == '__background__':
				continue
			cls_precision = self.coco_eval.eval['precision'][ind_lo: (ind_hi + 1), :, cls_ind - 1, 0, 2]
			cls_ap = np.mean(cls_precision[cls_precision > -1])
			ap_by_class.append(cls_ap)
			print('{}, AP: {}'.format(cls_name, cls_ap))
		ap_by_class = np.asarray(ap_by_class)

		print('* Performance at different thresholds:')
		ap_by_thres = np.zeros((12,))
		ap_by_thres[0] = self._summarize(1)
		ap_by_thres[1] = self._summarize(1, iouThr=.5, maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[2] = self._summarize(1, iouThr=.75, maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[3] = self._summarize(1, areaRng='small', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[4] = self._summarize(1, areaRng='medium', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[5] = self._summarize(1, areaRng='large', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[6] = self._summarize(0, maxDets=self.coco_eval.params.maxDets[0])
		ap_by_thres[7] = self._summarize(0, maxDets=self.coco_eval.params.maxDets[1])
		ap_by_thres[8] = self._summarize(0, maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[9] = self._summarize(0, areaRng='small', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[10] = self._summarize(0, areaRng='medium', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[11] = self._summarize(0, areaRng='large', maxDets=self.coco_eval.params.maxDets[2])
		return ap_mean, ap_by_class, ap_by_thres
Example #23
0
def test(test_model, logger):
    eval_gt = COCO(cfg.gt_path)
    import json
    with open(cfg.det_path, 'r') as f:
        dets = json.load(f)

    test_subset = False
    if test_subset:
        eval_gt.imgs = dict(list(eval_gt.imgs.items())[:100])
        anns = dict()
        for i in eval_gt.imgs:
            for j in eval_gt.getAnnIds(i):
                anns[j] = eval_gt.anns[j]
        eval_gt.anns = anns
    dets = [i for i in dets if i['image_id'] in eval_gt.imgs]

    dets = [i for i in dets if i['category_id'] == 1]
    dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True)
    for i in dets:
        i['imgpath'] = 'val2014/COCO_val2014_000000%06d.jpg' % i['image_id']
        # i['imgpath'] = 'val2014/COCO_val2014_000000%06d.jpg' % i['image_id']
    img_num = len(np.unique([i['image_id'] for i in dets]))

    use_gtboxes = False
    if use_gtboxes:
        d = COCOJoints()
        coco_train_data, coco_test_data = d.load_data()
        coco_test_data.sort(key=lambda x: x['imgid'])
        for i in coco_test_data:
            i['image_id'] = i['imgid']
            i['score'] = 1.
        dets = coco_test_data

    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['image_id'] == dets[
                img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(id):
        cfg.set_args(args.gpu_ids.split(',')[id])
        tester = Tester(Network(), cfg)
        tester.load_weights(test_model)
        range = [ranges[id], ranges[id + 1]]
        return test_net(tester, logger, dets, range)

    # func(0)
    # MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    all_res, dump_results = func(0)  #MultiGPUFunc.work()

    # evaluation
    result_path = osp.join(cfg.output_dir, 'results.json')
    with open(result_path, 'w') as f:
        json.dump(dump_results, f)

    eval_dt = eval_gt.loadRes(result_path)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints')

    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Example #24
0
def evaluate(model, coco, cocoGt, encoder, inv_map, args):
    if args.distributed:
        N_gpu = torch.distributed.get_world_size()
    else:
        N_gpu = 1

    model.eval()
    model.cuda()

    ret = []
    start = time.time()

    # for idx, image_id in enumerate(coco.img_keys):
    for nbatch, (img, img_id, img_size, _, _) in enumerate(coco):
        print("Parsing batch: {}/{}".format(nbatch, len(coco)), end='\r')
        with torch.no_grad():
            inp = img.cuda()
            if args.fp16:
                inp = inp.half()

            # Get predictions
            ploc, plabel = model(inp)
            ploc, plabel = ploc.float(), plabel.float()

            # Handle the batch of predictions produced
            # This is slow, but consistent with old implementation.
            for idx in range(ploc.shape[0]):
                # ease-of-use for specific predictions
                ploc_i = ploc[idx, :, :].unsqueeze(0)
                plabel_i = plabel[idx, :, :].unsqueeze(0)

                try:
                    result = encoder.decode_batch(ploc_i, plabel_i, 0.50,
                                                  200)[0]
                except:
                    # raise
                    print("")
                    print("No object detected in idx: {}".format(idx))
                    continue

                htot, wtot = img_size[0][idx].item(), img_size[1][idx].item()
                loc, label, prob = [r.cpu().numpy() for r in result]
                for loc_, label_, prob_ in zip(loc, label, prob):
                    ret.append([img_id[idx], loc_[0] * wtot, \
                                loc_[1] * htot,
                                (loc_[2] - loc_[0]) * wtot,
                                (loc_[3] - loc_[1]) * htot,
                                prob_,
                                inv_map[label_]])

    # Now we have all predictions from this rank, gather them all together
    # if necessary
    ret = np.array(ret).astype(np.float32)

    # Multi-GPU eval
    if args.distributed:
        # NCCL backend means we can only operate on GPU tensors
        ret_copy = torch.tensor(ret).cuda()
        # Everyone exchanges the size of their results
        ret_sizes = [torch.tensor(0).cuda() for _ in range(N_gpu)]

        torch.cuda.synchronize()
        torch.distributed.all_gather(ret_sizes,
                                     torch.tensor(ret_copy.shape[0]).cuda())
        torch.cuda.synchronize()

        # Get the maximum results size, as all tensors must be the same shape for
        # the all_gather call we need to make
        max_size = 0
        sizes = []
        for s in ret_sizes:
            max_size = max(max_size, s.item())
            sizes.append(s.item())

        # Need to pad my output to max_size in order to use in all_gather
        ret_pad = torch.cat([
            ret_copy,
            torch.zeros(max_size - ret_copy.shape[0], 7,
                        dtype=torch.float32).cuda()
        ])

        # allocate storage for results from all other processes
        other_ret = [
            torch.zeros(max_size, 7, dtype=torch.float32).cuda()
            for i in range(N_gpu)
        ]
        # Everyone exchanges (padded) results

        torch.cuda.synchronize()
        torch.distributed.all_gather(other_ret, ret_pad)
        torch.cuda.synchronize()

        # Now need to reconstruct the _actual_ results from the padded set using slices.
        cat_tensors = []
        for i in range(N_gpu):
            cat_tensors.append(other_ret[i][:sizes[i]][:])

        final_results = torch.cat(cat_tensors).cpu().numpy()
    else:
        # Otherwise full results are just our results
        final_results = ret

    if args.local_rank == 0:
        print("")
        print("Predicting Ended, total time: {:.2f} s".format(time.time() -
                                                              start))

    cocoDt = cocoGt.loadRes(final_results)

    E = COCOeval(cocoGt, cocoDt, iouType='bbox')
    E.evaluate()
    E.accumulate()
    if args.local_rank == 0:
        E.summarize()
        print("Current AP: {:.5f}".format(E.stats[0]))
    else:
        # fix for cocoeval indiscriminate prints
        with redirect_stdout(io.StringIO()):
            E.summarize()

    # put your model in training mode back on
    model.train()

    return E.stats[
        0]  # Average Precision  (AP) @[ IoU=050:0.95 | area=   all | maxDets=100 ]
Example #25
0
def coco_eval(result_files,
              result_types,
              coco,
              max_dets=(100, 300, 1000),
              classwise=False,
              outs_file=None):
    for res_type in result_types:
        assert res_type in [
            'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
        ]

    if mmcv.is_str(coco):
        coco = COCO(coco)
    assert isinstance(coco, COCO)

    if result_types == ['proposal_fast']:
        ar = fast_eval_recall(result_files, coco, np.array(max_dets))
        for i, num in enumerate(max_dets):
            print('AR@{}\t= {:.4f}'.format(num, ar[i]))
        return

    for res_type in result_types:
        if isinstance(result_files, str):
            result_file = result_files
        elif isinstance(result_files, dict):
            result_file = result_files[res_type]
        else:
            assert TypeError('result_files must be a str or dict')
        assert result_file.endswith('.json')

        coco_dets = coco.loadRes(result_file)
        img_ids = coco.getImgIds()
        iou_type = 'bbox' if res_type == 'proposal' else res_type
        cocoEval = COCOeval(coco, coco_dets, iou_type)
        cocoEval.params.imgIds = img_ids
        if res_type == 'proposal':
            cocoEval.params.useCats = 0
            cocoEval.params.maxDets = list(max_dets)
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

        if outs_file is not None:
            outs_file = open(outs_file, 'a')
            outs_file.write(str(cocoEval.stats) + '\n')
            outs_file.close()

        if classwise:
            # Compute per-category AP
            # from https://github.com/facebookresearch/detectron2/blob/03064eb5bafe4a3e5750cc7a16672daf5afe8435/detectron2/evaluation/coco_evaluation.py#L259-L283 # noqa
            precisions = cocoEval.eval['precision']
            catIds = coco.getCatIds()
            # precision has dims (iou, recall, cls, area range, max dets)
            assert len(catIds) == precisions.shape[2]

            results_per_category = []
            for idx, catId in enumerate(catIds):
                # area range index 0: all area ranges
                # max dets index -1: typically 100 per image
                nm = coco.loadCats(catId)[0]
                precision = precisions[:, :, idx, 0, -1]
                precision = precision[precision > -1]
                ap = np.mean(precision) if precision.size else float('nan')
                results_per_category.append(
                    ('{}'.format(nm['name']),
                     '{:0.3f}'.format(float(ap * 100))))

            N_COLS = min(6, len(results_per_category) * 2)
            results_flatten = list(itertools.chain(*results_per_category))
            headers = ['category', 'AP'] * (N_COLS // 2)
            results_2d = itertools.zip_longest(
                *[results_flatten[i::N_COLS] for i in range(N_COLS)])
            table_data = [headers]
            table_data += [result for result in results_2d]
            table = AsciiTable(table_data)
            print(table.table)
def evaluate_wider_pedestrian(epoch, dataset, model_new, retinanet_sk,
                              threshold):
    print(
        "\n==> Evaluating wider pedestrian dataset with threshold:{}.".format(
            threshold))
    new_model_1 = model_new._modules['module']
    state_dict_new = new_model_1.state_dict()
    retinanet_sk.load_state_dict(state_dict_new)
    model1 = copy.deepcopy(retinanet_sk).cuda(0)

    model1.eval()

    with torch.no_grad():

        # start collecting results
        results = []
        image_ids = []
        scores_for_upload = []
        for index in range(len(dataset)):
            # if index>50:
            #     break
            data = dataset[index]
            scale = data['scale']
            progress_bar(index, len(dataset), "Evaluating........")

            # run network
            data1 = data['img'].permute(2, 0, 1).cuda(0)
            scores, labels, boxes = model1(data1.float().unsqueeze(dim=0))
            scores = scores.cpu()
            labels = labels.cpu()
            boxes = boxes.cpu()

            # correct boxes for image scale
            boxes /= scale

            # change to (x, y, w, h) (MS COCO standard)
            if boxes.shape[0] > 0:
                boxes[:, 2] -= boxes[:, 0]
                boxes[:, 3] -= boxes[:, 1]

            # compute predicted labels and scores
            # for box, score, label in zip(boxes[0], scores[0], labels[0]):
            for box_id in range(boxes.shape[0]):
                score = float(scores[box_id])
                label = int(labels[box_id])
                box = boxes[box_id, :]

                # scores are sorted, so we can break
                if score < threshold:
                    break

                # append detection for each positively labeled class
                box_list = box.tolist()
                image_result = {
                    'image_id': dataset.image_ids[index],
                    'category_id': dataset.label_to_coco_label(label),
                    'score': float(score),
                    'bbox': box_list,
                }
                image_name = os.path.basename(dataset.get_image_name(index))
                score_row = "{img} {score:.3f} {xmin:.1f} {ymin:.1f} {w:.1f} {h:.1f}".format(
                    img=image_name,
                    score=score,
                    xmin=box_list[0],
                    ymin=box_list[1],
                    w=box_list[2],
                    h=box_list[3])
                scores_for_upload.append(score_row)
                # append detection to results
                results.append(image_result)

            # append image to list of processed images
            image_ids.append(dataset.image_ids[index])
        from utils.file_utils import save_to_file
        save_to_file('submit_files/scores_validation.txt', scores_for_upload)
        # print progress
        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'),
                  indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        summary = coco_eval.summarize()
        validation_score = -1
        from score_pedestrian_detection import get_average_precision_validation

        return get_average_precision_validation()
Example #27
0
def main(args):
    debug_show = True
    debug_show = False
    write_image = True
    # create model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = network.__dict__[cfg.model](cfg.output_shape, cfg.num_class, pretrained = True)
    model = torch.nn.DataParallel(model).cuda().to(device)

    # model =model.cuda()

    # img_dir = os.path.join(cur_dir,'/home/yiliu/work/fiberPJ/data/fiber_labeled_data/instance_labeled_file/')

    test_loader = torch.utils.data.DataLoader(
        MscocoMulti_double_only(cfg, train=False),
        batch_size=args.batch*args.num_gpus, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # load trainning weights
    checkpoint_file = os.path.join(args.checkpoint, args.test+'.pth.tar')

    checkpoint = torch.load(checkpoint_file)
    # print("info : '{}'").format(checkpoint['info'])

    model.load_state_dict(checkpoint['state_dict'])

    print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_file, checkpoint['epoch']))

    # change to evaluation mode
    model.eval()

    print('testing...')
    full_result = []
    torch.no_grad()

    file_name = []

    final_len_skel = []
    final_num_skel = []
    final_ave_len_skel = []

    final_len_march = []
    final_num_march = []
    final_ave_len_march = []

    # for i, (inputs, targets, meta) in enumerate(test_loader):
    for i, (inputs, image_org, meta) in enumerate(test_loader):
        if i == 0:
            continue

        this_file_name = meta['imgID']
        file_name.append(this_file_name)
        image_org_ = image_org.data.cpu().numpy()[0]
            #-------------------------------------------------------------
        input_var = torch.autograd.Variable(inputs.to(device))
        input_ = input_var.data.cpu().numpy()[0]

            #-----------------------------------------------------------------


        outputs = model(input_var)
        end_point_pred, intersection_point_pred, end_points_short_offsets_pred, intersection_points_short_offsets_pred = outputs


        image_show = np.transpose(input_,(1,2,0))
        # image_show = image_org_ # * 0.6 + image_show * 0.4
        # cv2.imshow('org',image_show)
        # cv2.waitKey(0)
        # image_show = cv2.resize(image_show, )
        intersection_points_short_h = intersection_points_short_offsets_pred[:,0,:,:].cpu().detach().numpy()[0]
        intersection_points_short_v = intersection_points_short_offsets_pred[:,1,:,:].cpu().detach().numpy()[0]
        canvas = np.zeros_like(intersection_points_short_h)
        canvas = visualize_offset(canvas, intersection_points_short_h, intersection_points_short_v)

        if debug_show:
            combined_show = draw_mask(image_show, canvas)
        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )
            cv2.imshow('intersection_points_short.png',combined_show*1.0)
            cv2.waitKey(0)



        image_show = np.transpose(input_,(1,2,0))
        control_points_map_pred_h = end_points_short_offsets_pred[:,0,:,:].cpu().detach().numpy()[0]
        control_points_map_pred_v = end_points_short_offsets_pred[:,1,:,:].cpu().detach().numpy()[0]
        canvas = np.zeros_like(control_points_map_pred_h)
        canvas = visualize_offset(canvas, control_points_map_pred_h, control_points_map_pred_v)


        combined_show = draw_mask(image_show, canvas)
        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )
        if debug_show:
            cv2.imshow('end_points_short.png',combined_show)
            cv2.waitKey(0)

        mask_pred_numpy = end_point_pred[0,0,:,:].cpu().detach().numpy()
        canvas = np.zeros_like(mask_pred_numpy)

        mask_pred_numpy = (mask_pred_numpy>0.5) * 1.0

        combined_show = draw_mask(image_show, mask_pred_numpy)
        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )

        if debug_show:
            cv2.imshow('end_point.png',combined_show)
            cv2.waitKey(0)



        mask_pred_numpy = intersection_point_pred[0,0,:,:].cpu().detach().numpy()
        canvas = np.zeros_like(mask_pred_numpy)

        mask_pred_numpy = (mask_pred_numpy>0.4) * 1.0
        combined_show = draw_mask(image_show, mask_pred_numpy)

        # combined_show = image_show * 0.7 + np.tile(np.expand_dims(intersection_point_pred[0,0,:,:].cpu().detach().numpy(), -1),3) * 0.3

        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )
        if debug_show:
            cv2.imshow('intersection.png ',combined_show)
            cv2.waitKey(0)

        # import pdb; pdb.set_trace()
        canvas = np.zeros_like(mask_pred_numpy)
        heatmap_control = compute_heatmaps(mask_pred_numpy, intersection_points_short_offsets_pred[0,:,:,:].cpu().detach().numpy())
        if debug_show:
            # combined_show = image_show * 0.7 + np.tile(np.expand_dims(normalized_heatmap_control, -1),3) * 0.3
            combined_show = draw_mask(image_show, heatmap_control)
        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )
            # cv2.imshow('bla.png', combined_show * 1.0)
            cv2.waitKey(0)



        heatmap_control = gaussian_filter(heatmap_control, sigma=4)

        normalized_heatmap_control = normalize_include_neg_val(heatmap_control)



        kp_control = get_keypoints(heatmap_control)
        kp_control_intersections= kp_control
        for i in range(len(kp_control)):

            curr = (kp_control[i]['xy'][0],kp_control[i]['xy'][1])
            cv2.circle(canvas, curr, 3, 1, 1)

        # import pdb; pdb.set_trace()
        combined_show = draw_mask(image_show, canvas)
        if debug_show:

        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )
            cv2.imshow('keypoints.png', combined_show * 1.0)
            cv2.waitKey(0)

        if write_image:
            cv2.imwrite('keypoints_write.png',combined_show*255.0)

        canvas = np.zeros_like(mask_pred_numpy)
        heatmap_control = compute_heatmaps(end_point_pred[0,0,:,:].cpu().detach().numpy(), end_points_short_offsets_pred[0,:,:,:].cpu().detach().numpy())
        heatmap_control = gaussian_filter(heatmap_control, sigma=5)
        kp_control = get_keypoints(heatmap_control)

        for i in range(len(kp_control)):

            curr = (kp_control[i]['xy'][0],kp_control[i]['xy'][1])
            cv2.circle(canvas, curr, 3, 1, 1)

        # import pdb; pdb.set_trace()
        if debug_show:
            combined_show = draw_mask(image_show, canvas)
        # combined_show =  cv2.resize(combined_show, (512,512), interpolation = cv2.INTER_NEAREST )
            cv2.imshow('end_point.png', combined_show)
            cv2.waitKey(0)
        mask = image_show
        mask = 1.0 * (rgb2gray(image_show)> 0)
        # cv2.imshow('end_point', mask)
        # cv2.waitKey(0)
        # import pdb;pdb.set_trace()import time

        start_time = time.time()
        file_name_to_save = this_file_name[0][:-4]
        skel_len, num_skel, march_len, num_march = fast_march(kp_control_intersections,mask, filename = file_name_to_save, write = True)
        print("--- %s seconds ---" % (time.time() - start_time))

        final_len_skel.append(skel_len * 2)
        final_num_skel.append(num_skel)
        final_ave_len_skel.append(skel_len * 2 / num_skel)

        final_len_march.append(march_len * 2)
        final_num_march.append(num_march)
        final_ave_len_march.append(march_len * 2 / num_march)


    book = xlwt.Workbook()
    sh = book.add_sheet('Sheet 1')
    for row, this_file_name in enumerate(file_name):
        sh.write(row, 0, this_file_name)

    for row, length in enumerate(final_len_skel):
        sh.write(row, 1, float(length))

    for row, num in enumerate(final_num_skel):
        sh.write(row, 2, float(num))

    for row, ave_len in enumerate(final_ave_len_skel):
        sh.write(row, 3, float(ave_len))

    for row, length in enumerate(final_len_march):
        sh.write(row, 4, float(length))

    for row, num in enumerate(final_num_march):
        sh.write(row, 5, float(num))

    for row, ave_len_march in enumerate(final_ave_len_march):
        sh.write(row, 6, float(ave_len_march))

    book.save('test.xlsx')

    # evaluate on COCO
    eval_gt = COCO(cfg.ori_gt_path)
    eval_dt = eval_gt.loadRes(result_file)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Example #28
0
def evaluate_coco(model,
                  dataset,
                  coco,
                  eval_type="bbox",
                  limit=0,
                  image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    limit: if not 0, it's the number of images to use for evaluation
    """
    # Pick TACO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]

    # Get corresponding TACO image IDs.
    taco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()
    results = []
    for i, image_id in enumerate(image_ids):
        # Load image
        image = dataset.load_image(image_id)

        # Run detection
        t = time.time()
        r = model.detect([image], verbose=0)[0]
        # r = utils.fuse_instances(r)
        t_prediction += (time.time() - t)

        if not model.config.DETECTION_SCORE_RATIO:
            scores = r["scores"]
        else:
            scores = r["scores"] / (r["full_scores"][:, 0] + 0.0001)

        # Convert results to COCO format
        # Cast masks to uint8 because COCO tools errors out on bool
        image_results = build_coco_results(dataset, taco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"], scores,
                                           r["masks"].astype(np.uint8))
        results.extend(image_results)

    # Load results. This modifies results with additional attributes.
    coco_results = coco.loadRes(results)

    # utils.compute_confusion_matrix(coco_results, coco)

    # Evaluate
    cocoEval = COCOeval(coco, coco_results, eval_type)
    cocoEval.params.imgIds = taco_image_ids
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
Example #29
0
def evaluate_on_coco(cfg, resFile):
    annType = "bbox"  # specify type here
    with open(resFile, 'r') as f:
        unsorted_annotations = json.load(f)
    sorted_annotations = list(
        sorted(unsorted_annotations,
               key=lambda single_annotation: single_annotation["image_id"]))
    sorted_annotations = list(
        map(convert_cat_id_and_reorientate_bbox, sorted_annotations))
    reshaped_annotations = defaultdict(list)
    for annotation in sorted_annotations:
        reshaped_annotations[annotation['image_id']].append(annotation)

    with open('temp.json', 'w') as f:
        json.dump(sorted_annotations, f)

    cocoGt = COCO(cfg.gt_annotations_path)
    cocoDt = cocoGt.loadRes('temp.json')

    imgIds = sorted(cocoGt.getImgIds())
    cocoEval = COCOeval(cocoGt, cocoDt, annType)
    cocoEval.params.imgIds = imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    exit()

    with open(cfg.gt_annotations_path, 'r') as f:
        gt_annotation_raw = json.load(f)
        gt_annotation_raw_images = gt_annotation_raw["images"]
        gt_annotation_raw_labels = gt_annotation_raw["annotations"]

    rgb_label = (255, 0, 0)
    rgb_pred = (0, 255, 0)

    for i, image_id in enumerate(reshaped_annotations):
        image_annotations = reshaped_annotations[image_id]
        gt_annotation_image_raw = list(
            filter(lambda image_json: image_json['id'] == image_id,
                   gt_annotation_raw_images))
        gt_annotation_labels_raw = list(
            filter(lambda label_json: label_json['image_id'] == image_id,
                   gt_annotation_raw_labels))
        if len(gt_annotation_image_raw) == 1:
            image_path = os.path.join(cfg.dataset_dir,
                                      gt_annotation_image_raw[0]["file_name"])
            actual_image = Image.open(image_path).convert('RGB')
            draw = ImageDraw.Draw(actual_image)

            for annotation in image_annotations:
                x1_pred, y1_pred, w, h = annotation['bbox']
                x2_pred, y2_pred = x1_pred + w, y1_pred + h
                cls_id = annotation['category_id']
                label = get_class_name(cls_id)
                draw.text((x1_pred, y1_pred), label, fill=rgb_pred)
                draw.rectangle([x1_pred, y1_pred, x2_pred, y2_pred],
                               outline=rgb_pred)
            for annotation in gt_annotation_labels_raw:
                x1_truth, y1_truth, w, h = annotation['bbox']
                x2_truth, y2_truth = x1_truth + w, y1_truth + h
                cls_id = annotation['category_id']
                label = get_class_name(cls_id)
                draw.text((x1_truth, y1_truth), label, fill=rgb_label)
                draw.rectangle([x1_truth, y1_truth, x2_truth, y2_truth],
                               outline=rgb_label)
            actual_image.save("./data/outcome/predictions_{}".format(
                gt_annotation_image_raw[0]["file_name"]))
        else:
            print('please check')
            break
        if (i + 1) % 100 == 0:  # just see first 100
            break
Example #30
0
    def evaluate(self, model, half=False, distributed=False):
        """
        COCO average precision (AP) Evaluation. Iterate inference on the test dataset
        and the results are evaluated by COCO API.
        Args:
            model : model object
        Returns:
            ap50_95 (float) : calculated COCO AP for IoU=50:95
            ap50 (float) : calculated COCO AP for IoU=50
        """
        if isinstance(model, apex.parallel.DistributedDataParallel):
            model = model.module
            distributed=True

        model=model.eval()
        cuda = torch.cuda.is_available()
        if half:
            Tensor = torch.cuda.HalfTensor if cuda else torch.HalfTensor
        else:
            Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
        ids = []
        data_dict = []
        img_num = 0

        indices = list(range(self.num_images))
        if distributed:
            dis_indices = indices[distributed_util.get_rank()::distributed_util.get_world_size()]
        else:
            dis_indices = indices
        progress_bar = tqdm if distributed_util.is_main_process() else iter
        # num_classes = 80 if not self.voc else 20
        num_classes = 1 if not self.voc else 20

        inference_time=0
        nms_time=0
        n_samples=len(dis_indices)-10

        for k, i in enumerate(progress_bar(dis_indices)):
            img, _, info_img, id_ = self.dataset[i]  # load a batch
            info_img = [float(info) for info in info_img]
            id_ = int(id_)
            ids.append(id_)
            with torch.no_grad():
                img = Variable(img.type(Tensor).unsqueeze(0))
                if k > 9:
                    start=time.time()

                if self.vis:
                    outputs,fuse_weights,fused_f = model(img)
                else:
                    outputs = model(img)

                if k > 9:
                    infer_end=time.time()
                    inference_time += (infer_end-start)

                outputs = postprocess(
                    outputs, num_classes, self.confthre, self.nmsthre)

                if k > 9:
                    nms_end=time.time()
                    nms_time +=(nms_end-infer_end)

                if outputs[0] is None:
                    continue
                outputs = outputs[0].cpu().data

            bboxes = outputs[:, 0:4]
            bboxes[:, 0::2] *= info_img[0] / self.img_size[0]
            bboxes[:, 1::2] *= info_img[1] / self.img_size[1]
            bboxes[:, 2] = bboxes[:,2] - bboxes[:,0]
            bboxes[:, 3] = bboxes[:,3] - bboxes[:,1]
            cls = outputs[:, 6]
            scores = outputs[:, 4]* outputs[:,5]
            for ind in range(bboxes.shape[0]):
                label = self.dataset.class_ids[int(cls[ind])]
                A = {"image_id": id_, "category_id": label, "bbox": bboxes[ind].numpy().tolist(),
                 "score": scores[ind].numpy().item(), "segmentation": []} # COCO json format
                data_dict.append(A)
            
            if self.vis:
                o_img,_,_,_  = self.dataset.pull_item(i)
                make_vis('COCO', i, o_img, fuse_weights, fused_f)
                class_names = self.dataset._classes
                make_pred_vis('COCO', i, o_img, class_names, bboxes, cls, scores)

            if DEBUG and distributed_util.is_main_process():
                o_img,_  = self.dataset.pull_item(i)
                class_names = self.dataset._classes
                make_pred_vis('COCO', i, o_img, class_names, bboxes, cls, scores)

        if distributed:
            distributed_util.synchronize()
            data_dict = _accumulate_predictions_from_multiple_gpus(data_dict)
            inference_time = torch.FloatTensor(1).type(Tensor).fill_(inference_time)
            nms_time = torch.FloatTensor(1).type(Tensor).fill_(nms_time)
            n_samples = torch.LongTensor(1).type(Tensor).fill_(n_samples)
            distributed_util.synchronize()
            torch.distributed.reduce(inference_time, dst=0)
            torch.distributed.reduce(nms_time, dst=0)
            torch.distributed.reduce(n_samples, dst=0)
            inference_time = inference_time.item()
            nms_time = nms_time.item()
            n_samples = n_samples.item()

        if not distributed_util.is_main_process():
            return 0, 0


        print('Main process Evaluating...')

        annType = ['segm', 'bbox', 'keypoints']
        a_infer_time = 1000*inference_time / (n_samples)
        a_nms_time= 1000*nms_time / (n_samples)

        print('Average forward time: %.2f ms, Average NMS time: %.2f ms, Average inference time: %.2f ms' %(a_infer_time, \
                a_nms_time, (a_infer_time+a_nms_time)))

        # Evaluate the Dt (detection) json comparing with the ground truth
        if len(data_dict) > 0:
            cocoGt = self.dataset.coco
            # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
            if self.testset:
                json.dump(data_dict, open('yolov3_2017.json', 'w'))
                cocoDt = cocoGt.loadRes('yolov3_2017.json')
            else:
                _, tmp = tempfile.mkstemp()
                json.dump(data_dict, open(tmp, 'w'))
                cocoDt = cocoGt.loadRes(tmp)
            cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            return cocoEval.stats[0], cocoEval.stats[1]
        else:
            return 0, 0
Example #31
0
def test(cfg,
         data_cfg,
         weights=None,
         batch_size=16,
         img_size=416,
         iou_thres=0.5,
         conf_thres=0.001,
         nms_thres=0.5,
         save_json=False,
         model=None):
    if model is None:
        device = torch_utils.select_device()

        # Initialize model
        model = Darknet(cfg, img_size).to(device)

        # Load weights
        if weights.endswith('.pt'):  # pytorch format
            model.load_state_dict(
                torch.load(weights, map_location=device)['model'])
        else:  # darknet format
            _ = load_darknet_weights(model, weights)

        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:
        device = next(model.parameters()).device  # get model device

    # Configure run
    data_cfg = parse_data_cfg(data_cfg)
    nc = int(data_cfg['classes'])  # number of classes
    test_path = data_cfg['valid']  # path to test images
    names = load_classes(data_cfg['names'])  # class names

    # Dataloader
    dataset = LoadImagesAndLabels(test_path, img_size=img_size)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=4,
                            pin_memory=True,
                            collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    coco91class = coco80_to_coco91_class()
    print(('%20s' + '%10s' * 6) %
          ('Class', 'Images', 'Targets', 'P', 'R', 'mAP', 'F1'))
    loss, p, r, f1, mp, mr, map, mf1 = 0., 0., 0., 0., 0., 0., 0., 0.
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (imgs, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc='Computing mAP')):
        targets = targets.to(device)
        imgs = imgs.to(device)

        # Plot images with bounding boxes
        if batch_i == 0 and not os.path.exists('test_batch0.jpg'):
            plot_images(imgs=imgs, targets=targets, fname='test_batch0.jpg')

        # Run model
        inf_out, train_out = model(imgs)  # inference and training outputs

        # Compute loss
        if hasattr(model, 'hyp'):  # if model has loss hyperparameters
            loss_i, _ = compute_loss(train_out, targets, model)
            loss += loss_i.item()

        # Run NMS
        output = non_max_suppression(inf_out,
                                     conf_thres=conf_thres,
                                     nms_thres=nms_thres)

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append(([], torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(img_size, box, shapes[si])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for di, d in enumerate(pred):
                    jdict.append({
                        'image_id': image_id,
                        'category_id': coco91class[int(d[6])],
                        'bbox': [float3(x) for x in box[di]],
                        'score': float(d[4])
                    })

            # Assign all predictions as incorrect
            correct = [0] * len(pred)
            if nl:
                detected = []
                tbox = xywh2xyxy(labels[:, 1:5]) * img_size  # target boxes

                # Search for correct predictions
                for i, (*pbox, pconf, pcls_conf, pcls) in enumerate(pred):

                    # Break if all targets already located in image
                    if len(detected) == nl:
                        break

                    # Continue if predicted class not among image classes
                    if pcls.item() not in tcls:
                        continue

                    # Best iou, index between pred and targets
                    iou, bi = bbox_iou(pbox, tbox).max(0)

                    # If iou > threshold and class is correct mark as correct
                    if iou > iou_thres and bi not in detected:  # and pcls == tcls[bi]:
                        correct[i] = 1
                        detected.append(bi)

            # Append statistics (correct, conf, pcls, tcls)
            stats.append((correct, pred[:, 4].cpu(), pred[:, 6].cpu(), tcls))

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in list(zip(*stats))]  # to numpy
    nt = np.bincount(stats[3].astype(np.int64),
                     minlength=nc)  # number of targets per class
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()

    # Print results
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1), end='\n\n')

    # Print results per class
    if nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Save JSON
    if save_json and map and len(jdict):
        imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataset.img_files]
        with open('results.json', 'w') as file:
            json.dump(jdict, file)

        from pycocotools.coco import COCO
        from pycocotools.cocoeval import COCOeval

        # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
        cocoGt = COCO('../coco/annotations/instances_val2014.json'
                      )  # initialize COCO ground truth api
        cocoDt = cocoGt.loadRes('results.json')  # initialize COCO pred api

        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        map = cocoEval.stats[1]  # update mAP to pycocotools mAP

    # Return results
    return mp, mr, map, mf1, loss / len(dataloader)
Example #32
0
def test(
        cfg,
        data,
        weights=None,
        batch_size=16,
        img_size=416,
        conf_thres=0.001,
        iou_thres=0.6,  # for nms
        save_json=False,
        single_cls=False,
        model=None,
        dataloader=None):
    # Initialize/load model and set device
    if model is None:
        device = torch_utils.select_device(opt.device, batch_size=batch_size)
        verbose = opt.task == 'test'

        # Remove previous
        for f in glob.glob('test_batch*.png'):
            os.remove(f)

        # Initialize model
        model = Darknet(cfg, img_size)

        # Load weights
        attempt_download(weights)
        if weights.endswith('.pt'):  # pytorch format
            model.load_state_dict(
                torch.load(weights, map_location=device)['model'])
        else:  # darknet format
            load_darknet_weights(model, weights)

        # Fuse
        model.fuse()
        model.to(device)

        if device.type != 'cpu' and torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:  # called by train.py
        device = next(model.parameters()).device  # get model device
        verbose = False

    # Configure run
    data = parse_data_cfg(data)
    nc = 1 if single_cls else int(data['classes'])  # number of classes
    path = data['valid']  # path to test images
    names = load_classes(data['names'])  # class names
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if dataloader is None:
        dataset = LoadImagesAndLabels(path,
                                      img_size,
                                      batch_size,
                                      rect=True,
                                      single_cls=opt.single_cls)
        batch_size = min(batch_size, len(dataset))
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=min([
                                    os.cpu_count(),
                                    batch_size if batch_size > 1 else 0, 8
                                ]),
                                pin_memory=True,
                                collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', 'F1')
    p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (imgs, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        imgs = imgs.to(
            device).float() / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = imgs.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Plot images with bounding boxes
        f = 'test_batch%g.png' % batch_i  # filename
        if batch_i < 1 and not os.path.exists(f):
            plot_images(imgs=imgs, targets=targets, paths=paths, fname=f)

        # Disable gradients
        with torch.no_grad():
            aug = False  # augment https://github.com/ultralytics/yolov3/issues/931
            if aug:
                imgs = torch.cat(
                    (
                        imgs,
                        imgs.flip(3),  # flip-lr
                        torch_utils.scale_img(imgs, 0.7),  # scale
                    ),
                    0)

            # Run model
            t = torch_utils.time_synchronized()
            inf_out, train_out = model(imgs)  # inference and training outputs
            t0 += torch_utils.time_synchronized() - t

            if aug:
                x = torch.split(inf_out, nb, dim=0)
                x[1][..., 0] = width - x[1][..., 0]  # flip lr
                x[2][..., :4] /= 0.7  # scale
                inf_out = torch.cat(x, 1)

            # Compute loss
            if hasattr(model, 'hyp'):  # if model has loss hyperparameters
                loss += compute_loss(train_out, targets,
                                     model)[1][:3]  # GIoU, obj, cls

            # Run NMS
            t = torch_utils.time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres)  # nms
            t1 += torch_utils.time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(imgs[si].shape[1:], box, shapes[si][0],
                             shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id': image_id,
                        'category_id': coco91class[int(p[5])],
                        'bbox': [round(x, 3) for x in b],
                        'score': round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero().view(
                        -1)  # prediction indices
                    pi = (cls == pred[:,
                                      5]).nonzero().view(-1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        for j in (ious > iouv[0]).nonzero():
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        if niou > 1:
            p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(
                1), ap[:, 0]  # [P, R, [email protected]:0.95, [email protected]]
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Print speeds
    if verbose:
        t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (
            img_size, img_size, batch_size)  # tuple
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Save JSON
    if save_json and map and len(jdict):
        print('\nCOCO mAP with pycocotools...')
        imgIds = [
            int(Path(x).stem.split('_')[-1])
            for x in dataloader.dataset.img_files
        ]
        with open('results.json', 'w') as file:
            json.dump(jdict, file)

        try:
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval
        except:
            print(
                'WARNING: missing pycocotools package, can not compute official COCO mAP. See requirements.txt.'
            )

        # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
        cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')
                      [0])  # initialize COCO ground truth api
        cocoDt = cocoGt.loadRes('results.json')  # initialize COCO pred api

        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        mf1, map = cocoEval.stats[:
                                  2]  # update to pycocotools results ([email protected]:0.95, [email protected])

    # Return results
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps
Example #33
0
def evaluate(generator, model, threshold=0.05):
    """
    Use the pycocotools to evaluate a COCO model on a dataset.

    Args
        generator: The generator for generating the evaluation data.
        model: The model to evaluate.
        threshold: The score threshold to use.
    """
    # start collecting results
    results = []
    image_ids = []
    for index in trange(generator.size(), desc='COCO evaluation: '):
        image = generator.load_image(index)
        src_image = image.copy()
        image_shape = image.shape[:2]
        image_shape = np.array(image_shape)
        image = generator.preprocess_image(image)

        # run network
        detections = model.predict_on_batch([
            np.expand_dims(image, axis=0),
            np.expand_dims(image_shape, axis=0)
        ])[0]

        # change to (x, y, w, h) (MS COCO standard)
        boxes = np.zeros((detections.shape[0], 4), dtype=np.int32)
        # xmin
        boxes[:,
              0] = np.maximum(np.round(detections[:, 1]).astype(np.int32), 0)
        # ymin
        boxes[:,
              1] = np.maximum(np.round(detections[:, 0]).astype(np.int32), 0)
        # w
        boxes[:, 2] = np.minimum(
            np.round(detections[:, 3] - detections[:, 1]).astype(np.int32),
            image_shape[1])
        # h
        boxes[:, 3] = np.minimum(
            np.round(detections[:, 2] - detections[:, 0]).astype(np.int32),
            image_shape[0])
        scores = detections[:, 4]
        class_ids = detections[:, 5].astype(np.int32)
        # compute predicted labels and scores
        for box, score, class_id in zip(boxes, scores, class_ids):
            # scores are sorted, so we can break
            if score < threshold:
                break

            # append detection for each positively labeled class
            image_result = {
                'image_id': generator.image_ids[index],
                'category_id': generator.label_to_coco_label(class_id),
                'score': float(score),
                'bbox': box.tolist(),
            }
            # append detection to results
            results.append(image_result)
            class_name = generator.label_to_name(class_id)
            ret, baseline = cv2.getTextSize(class_name,
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
            cv2.rectangle(src_image, (box[0], box[1]),
                          (box[0] + box[2], box[1] + box[3]), (0, 255, 0), 1)
            cv2.putText(src_image, class_name,
                        (box[0], box[1] + box[3] - baseline),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
        cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        cv2.imshow('image', src_image)
        cv2.waitKey(0)
        # append image to list of processed images
        image_ids.append(generator.image_ids[index])

    if not len(results):
        return

    # write output
    json.dump(results,
              open('{}_bbox_results.json'.format(generator.set_name), 'w'),
              indent=4)
    json.dump(image_ids,
              open('{}_processed_image_ids.json'.format(generator.set_name),
                   'w'),
              indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
        generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval.stats
Example #34
0
def evaluate_coco(generator, model, threshold=0.05):
    # start collecting results
    results = []
    image_ids = []
    for index in range(generator.size()):
        image = generator.load_image(index)
        image_shape = image.shape
        image = generator.preprocess_image(image)
        image, scale = generator.resize_image(image)

        # run network
        _, _, _, boxes, nms_classification, masks = model.predict_on_batch(
            np.expand_dims(image, axis=0))

        # clip to image shape
        boxes[:, :, 0] = np.maximum(0, boxes[:, :, 0])
        boxes[:, :, 1] = np.maximum(0, boxes[:, :, 1])
        boxes[:, :, 2] = np.minimum(image.shape[1], boxes[:, :, 2])
        boxes[:, :, 3] = np.minimum(image.shape[0], boxes[:, :, 3])

        # correct boxes for image scale
        boxes[0, :, :4] /= scale

        # change to (x, y, w, h) (MS COCO standard)
        boxes[:, :, 2] -= boxes[:, :, 0]
        boxes[:, :, 3] -= boxes[:, :, 1]

        # compute predicted labels and scores
        for i, j in np.transpose(
                np.where(nms_classification[0, :, :] > threshold)):
            b = boxes[0,
                      i, :].astype(int)  # box (x, y, w, h) as one int vector

            mask = masks[0, i, :, :, j]
            mask = cv2.resize(mask, (boxes[0, i, 2], boxes[0, i, 3]))
            mask = (mask > 0.5).astype(
                np.uint8)  # binarize for encoding as RLE

            segmentation = np.zeros((image_shape[0], image_shape[1]),
                                    dtype=np.uint8)
            segmentation[b[1]:b[1] + b[3], b[0]:b[0] + b[2]] = mask
            segmentation = mask_utils.encode(np.asfortranarray(segmentation))

            # append boxes for each positively labeled class
            image_result = {
                'image_id': generator.image_ids[index],
                'category_id': generator.label_to_coco_label(j),
                'score': float(nms_classification[0, i, j]),
                'bbox': boxes[0, i, :].tolist(),
                'segmentation': segmentation
            }

            # convert byte to str to write in json (in Python 3)
            if not isinstance(image_result['segmentation']['counts'], str):
                image_result['segmentation']['counts'] = image_result[
                    'segmentation']['counts'].decode()

            # append detection to results
            results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.image_ids[index])

        # print progress
        print('{}/{}'.format(index, generator.size()), end='\r')

    if not len(results):
        return

    # write output
    json.dump(results,
              open('{}_segm_results.json'.format(generator.set_name), 'w'),
              indent=4)
    json.dump(image_ids,
              open('{}_processed_image_ids.json'.format(generator.set_name),
                   'w'),
              indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_segm_results.json'.format(
        generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'segm')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    graph_type = 'class'
    for threshold_index, a_threshold in enumerate(
            threshold_total_count_normal):
        for joint_index, a_value in enumerate(a_threshold):
            if joint_index not in right_mask and graph_type == 'joint':
                continue
            data.append(
                [threshold[threshold_index], joint_index, a_value, 'normal'])
    for threshold_index, a_threshold in enumerate(
            threshold_total_count_abnormal):
        for joint_index, a_value in enumerate(a_threshold):
            if joint_index not in right_mask and graph_type == 'joint':
                continue
            data.append(
                [threshold[threshold_index], joint_index, a_value, 'abnormal'])
    detect_df = pd.DataFrame(data=data,
                             columns=['threshold', 'joint', 'ratio', 'class'])
    sns.set("talk")
    ax = sns.lineplot(x='threshold', y='ratio', data=detect_df,
                      hue=graph_type)  # use hue=joint or class to change fig
    plt.show()

    # now calculate COCO results
    coco_gt = COCO(to_load_list_normal[0])
    coco_dt = coco_gt.loadRes(output_json)
    coco_eval = COCOeval(coco_gt, coco_dt, 'keypoints')
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    print('done!')
Example #36
0
def main(argv):

    yolov4 = tf.keras.models.load_model(FLAGS.model, compile=False)
    predictor = Predictor(yolov4=yolov4)
    anno = COCO(join(FLAGS.annotation_dir, 'instances_val2017.json'))
    count = 1
    for imgid in anno.getImgIds():
        print("processing (%d/%d)" % (count, len(anno.getImgIds())))
        detections = list()
        # predict
        img_info = anno.loadImgs([imgid])[0]
        img = cv2.imread(join(FLAGS.coco_eval_dir, img_info['file_name']))
        boundings = predictor.predict(img).numpy()
        # collect results
        if debug_mode:
            color_map = dict()
            img_gt = img.copy()
        for bounding in boundings:
            detections.append([
                imgid, bounding[0], bounding[1], bounding[2] - bounding[0],
                bounding[3] - bounding[1], bounding[4],
                label_map.index(int(bounding[5]) + 1)
            ])
            if debug_mode:
                if bounding[5].astype('int32') not in color_map:
                    color_map[bounding[5].astype('int32')] = tuple(
                        np.random.randint(low=0, high=256,
                                          size=(3, )).tolist())
                cv2.rectangle(img,
                              tuple(bounding[0:2].astype('int32').tolist()),
                              tuple(bounding[2:4].astype('int32').tolist()),
                              color_map[bounding[5].astype('int32')], 1)
                cv2.putText(
                    img,
                    list(
                        filter(
                            lambda x: x['id'] == label_map.index(
                                int(bounding[5]) + 1),
                            anno.dataset['categories']))[0]['name'],
                    tuple(bounding[0:2].astype('int32').tolist()),
                    cv2.FONT_HERSHEY_PLAIN, 1,
                    color_map[bounding[5].astype('int32')], 2)
        if debug_mode:
            annIds = anno.getAnnIds(imgIds=imgid)
            anns = anno.loadAnns(annIds)
            for ann in anns:
                bbox_x, bbox_y, bbox_w, bbox_h = ann['bbox']
                if label_map[ann['category_id']] - 1 not in color_map:
                    color_map[label_map[ann['category_id']] - 1] = tuple(
                        np.random.randint(low=0, high=256,
                                          size=(3, )).tolist())
                cv2.rectangle(
                    img_gt,
                    (int(bbox_x), int(bbox_y), int(bbox_w), int(bbox_h)),
                    color_map[label_map[ann['category_id']] - 1], 1)
                cv2.putText(
                    img_gt,
                    list(
                        filter(lambda x: x['id'] == ann['category_id'],
                               anno.dataset['categories']))[0]['name'],
                    (int(bbox_x), int(bbox_y)), cv2.FONT_HERSHEY_PLAIN, 1,
                    color_map[label_map[ann['category_id']] - 1], 2)
        if debug_mode:
            stacked = np.concatenate([img, img_gt], axis=0)
            cv2.imshow('detect (up), ground truth (down)', stacked)
            cv2.waitKey()
        count += 1
    cocoDt = anno.loadRes(np.array(detections))
    cocoEval = COCOeval(anno, cocoDt, iouType='bbox')
    cocoEval.params.imgIds = anno.getImgIds()
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Example #37
0
def coco_eval(model, coco, cocoGt, encoder, inv_map, threshold,
              epoch, iteration, use_cuda=True):
    from pycocotools.cocoeval import COCOeval
    print("")
    model.eval()
    if use_cuda:
        model.cuda()
    ret = []

    overlap_threshold = 0.50
    nms_max_detections = 200
    ssd_print(key=mlperf_log.NMS_THRESHOLD,
                         value=overlap_threshold, sync=False)
    ssd_print(key=mlperf_log.NMS_MAX_DETECTIONS,
                         value=nms_max_detections, sync=False)

    ssd_print(key=mlperf_log.EVAL_START, value=epoch, sync=False)

    start = time.time()
    for idx, image_id in enumerate(coco.img_keys):
        img, (htot, wtot), _, _ = coco[idx]

        with torch.no_grad():
            print("Parsing image: {}/{}".format(idx+1, len(coco)), end="\r")
            inp = img.unsqueeze(0)
            if use_cuda:
                inp = inp.cuda()
            ploc, plabel = model(inp)

            try:
                result = encoder.decode_batch(ploc, plabel,
                                              overlap_threshold,
                                              nms_max_detections)[0]

            except:
                #raise
                print("")
                print("No object detected in idx: {}".format(idx))
                continue

            loc, label, prob = [r.cpu().numpy() for r in result]
            for loc_, label_, prob_ in zip(loc, label, prob):
                ret.append([image_id, loc_[0]*wtot, \
                                      loc_[1]*htot,
                                      (loc_[2] - loc_[0])*wtot,
                                      (loc_[3] - loc_[1])*htot,
                                      prob_,
                                      inv_map[label_]])
    print("")
    print("Predicting Ended, total time: {:.2f} s".format(time.time()-start))

    cocoDt = cocoGt.loadRes(np.array(ret))

    E = COCOeval(cocoGt, cocoDt, iouType='bbox')
    E.evaluate()
    E.accumulate()
    E.summarize()
    print("Current AP: {:.5f} AP goal: {:.5f}".format(E.stats[0], threshold))

    # put your model back into training mode
    model.train()

    current_accuracy = E.stats[0]
    ssd_print(key=mlperf_log.EVAL_SIZE, value=idx + 1)
    ssd_print(key=mlperf_log.EVAL_ACCURACY,
                         value={"epoch": epoch,
                                "value": current_accuracy},
              sync=False)
    ssd_print(key=mlperf_log.EVAL_ITERATION_ACCURACY,
                         value={"iteration": iteration,
                                "value": current_accuracy},
              sync=False)
    ssd_print(key=mlperf_log.EVAL_TARGET, value=threshold, sync=False)
    ssd_print(key=mlperf_log.EVAL_STOP, value=epoch, sync=False)
    return current_accuracy>= threshold #Average Precision  (AP) @[ IoU=050:0.95 | area=   all | maxDets=100 ]
Example #38
0
def infer(model, path, detections_file, resize, max_size, batch_size, mixed_precision=True, is_master=True, world=0,
          annotations=None, use_dali=True, is_validation=False, verbose=True, rotated_bbox=False):
    'Run inference on images from path'

    backend = 'pytorch' if isinstance(model, Model) or isinstance(model, DDP) else 'tensorrt'

    stride = model.module.stride if isinstance(model, DDP) else model.stride

    # Create annotations if none was provided
    if not annotations:
        annotations = tempfile.mktemp('.json')
        images = [{'id': i, 'file_name': f} for i, f in enumerate(os.listdir(path))]
        json.dump({'images': images}, open(annotations, 'w'))

    # TensorRT only supports fixed input sizes, so override input size accordingly
    if backend == 'tensorrt': max_size = max(model.input_size)

    # Prepare dataset
    if verbose: print('Preparing dataset...')
    data_iterator = (DaliDataIterator if use_dali else DataIterator)(
            path, resize, max_size, batch_size, stride,
            world, annotations, training=False)
    
    if rotated_bbox:
        if use_dali: raise NotImplementedError("This repo does not currently support DALI for rotated bbox.")
        data_iterator = RotatedDataIterator(path, resize, max_size, batch_size, stride,
                                            world, annotations, training=False)
    else:
        data_iterator = (DaliDataIterator if use_dali else DataIterator)(
            path, resize, max_size, batch_size, stride,
            world, annotations, training=False)
    
    if verbose: print(data_iterator)

    # Prepare model
    if backend is 'pytorch':
        # If we are doing validation during training,
        # no need to register model with AMP again
        if not is_validation:
            if torch.cuda.is_available(): model = model.cuda()
            model = amp.initialize(model, None,
                                   opt_level='O2' if mixed_precision else 'O0',
                                   keep_batchnorm_fp32=True,
                                   verbosity=0)

        model.eval()

    if verbose:
        print('   backend: {}'.format(backend))
        print('    device: {} {}'.format(
            world, 'cpu' if not torch.cuda.is_available() else 'GPU' if world == 1 else 'GPUs'))
        print('     batch: {}, precision: {}'.format(batch_size,
                                                     'unknown' if backend is 'tensorrt' else 'mixed' if mixed_precision else 'full'))
        print(' BBOX type:', 'rotated' if rotated_bbox else 'axis aligned')
        print('Running inference...')

    results = []
    profiler = Profiler(['infer', 'fw'])
    with torch.no_grad():
        for i, (data, ids, ratios) in enumerate(data_iterator):
            # Forward pass
            profiler.start('fw')
            scores, boxes, classes = model(data, rotated_bbox) #Need to add model size (B, 3, W, H)
            profiler.stop('fw')

            results.append([scores, boxes, classes, ids, ratios])

            profiler.bump('infer')
            if verbose and (profiler.totals['infer'] > 60 or i == len(data_iterator) - 1):
                size = len(data_iterator.ids)
                msg = '[{:{len}}/{}]'.format(min((i + 1) * batch_size,
                                                 size), size, len=len(str(size)))
                msg += ' {:.3f}s/{}-batch'.format(profiler.means['infer'], batch_size)
                msg += ' (fw: {:.3f}s)'.format(profiler.means['fw'])
                msg += ', {:.1f} im/s'.format(batch_size / profiler.means['infer'])
                print(msg, flush=True)

                profiler.reset()

    # Gather results from all devices
    if verbose: print('Gathering results...')
    results = [torch.cat(r, dim=0) for r in zip(*results)]
    if world > 1:
        for r, result in enumerate(results):
            all_result = [torch.ones_like(result, device=result.device) for _ in range(world)]
            torch.distributed.all_gather(list(all_result), result)
            results[r] = torch.cat(all_result, dim=0)

    if is_master:
        # Copy buffers back to host
        results = [r.cpu() for r in results]

        # Collect detections
        detections = []
        processed_ids = set()
        for scores, boxes, classes, image_id, ratios in zip(*results):
            image_id = image_id.item()
            if image_id in processed_ids:
                continue
            processed_ids.add(image_id)

            keep = (scores > 0).nonzero()
            scores = scores[keep].view(-1)
            if rotated_bbox:
                boxes = boxes[keep, :].view(-1, 6)
                boxes[:, :4] /= ratios
            else:
                boxes = boxes[keep, :].view(-1, 4) / ratios
            classes = classes[keep].view(-1).int()

            for score, box, cat in zip(scores, boxes, classes):
                if rotated_bbox:
                    x1, y1, x2, y2, sin, cos = box.data.tolist()
                    theta = np.arctan2(sin, cos)
                    w = x2 - x1 + 1
                    h = y2 - y1 + 1
                    seg = rotate_box([x1, y1, w, h, theta])
                else:
                    x1, y1, x2, y2 = box.data.tolist()
                cat = cat.item()
                if 'annotations' in data_iterator.coco.dataset:
                    cat = data_iterator.coco.getCatIds()[cat]
                this_det = {
                    'image_id': image_id,
                    'score': score.item(),
                    'category_id': cat}
                if rotated_bbox:
                    this_det['bbox'] = [x1, y1, x2 - x1 + 1, y2 - y1 + 1, theta]
                    this_det['segmentation'] = [seg]
                else:
                    this_det['bbox'] = [x1, y1, x2 - x1 + 1, y2 - y1 + 1]

                detections.append(this_det)

        if detections:
            # Save detections
            if detections_file and verbose: print('Writing {}...'.format(detections_file))
            detections = {'annotations': detections}
            detections['images'] = data_iterator.coco.dataset['images']
            if 'categories' in data_iterator.coco.dataset:
                detections['categories'] = [data_iterator.coco.dataset['categories']]
            if detections_file:
                json.dump(detections, open(detections_file, 'w'), indent=4)

            # Evaluate model on dataset
            if 'annotations' in data_iterator.coco.dataset:
                if verbose: print('Evaluating model...')
                with redirect_stdout(None):
                    coco_pred = data_iterator.coco.loadRes(detections['annotations'])
                    if rotated_bbox:
                        coco_eval = COCOeval(data_iterator.coco, coco_pred, 'segm')
                    else:
                        coco_eval = COCOeval(data_iterator.coco, coco_pred, 'bbox')
                    coco_eval.evaluate()
                    coco_eval.accumulate()
                coco_eval.summarize()
        else:
            print('No detections!')
Example #39
0
def evaluate_coco(generator, model, threshold=0.05):
    """ Use the pycocotools to evaluate a COCO model on a dataset.

    Args
        generator : The generator for generating the evaluation data.
        model     : The model to evaluate.
        threshold : The score threshold to use.
    """
    # start collecting results
    results = []
    image_ids = []
    for index in progressbar.progressbar(range(generator.size()), prefix='COCO evaluation: '):
        image = generator.load_image(index)
        image = generator.preprocess_image(image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))

        # correct boxes for image scale
        boxes /= scale

        # change to (x, y, w, h) (MS COCO standard)
        boxes[:, :, 2] -= boxes[:, :, 0]
        boxes[:, :, 3] -= boxes[:, :, 1]

        # compute predicted labels and scores
        for box, score, label in zip(boxes[0], scores[0], labels[0]):
            # scores are sorted, so we can break
            if score < threshold:
                break

            # append detection for each positively labeled class
            image_result = {
                'image_id'    : generator.image_ids[index],
                'category_id' : generator.label_to_coco_label(label),
                'score'       : float(score),
                'bbox'        : box.tolist(),
            }

            # append detection to results
            results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.image_ids[index])

    if not len(results):
        return

    # write output
    json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
    json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval.stats
Example #40
0
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 iou_thr_by_class=0.2,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=np.arange(0.5, 0.96, 0.05)):
        """Evaluation in COCO protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None):
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str: float]
        """
        assert isinstance(results, list), 'results must be a list'
        assert len(results) == len(self), (
            'The length of results is not equal to the dataset len: {} != {}'.
            format(len(results), len(self)))

        bbox_results = []
        embedding_results = []
        for res in results:
            bbox_results.append(res[0])
            embedding_results.append(res[1])

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError('metric {} is not supported'.format(metric))

        if jsonfile_prefix is None:
            tmp_dir = tempfile.TemporaryDirectory()
            jsonfile_prefix = osp.join(tmp_dir.name, 'results')
        else:
            tmp_dir = None
        result_files = self.results2json(bbox_results, jsonfile_prefix)

        eval_results = {}
        cocoGt = self.coco
        for metric in metrics:
            msg = 'Evaluating {}...'.format(metric)
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(bbox_results,
                                           proposal_nums,
                                           iou_thrs,
                                           logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results['AR@{}'.format(num)] = ar[i]
                    log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError('{} is not in results'.format(metric))
            try:
                cocoDt = cocoGt.loadRes(result_files[metric])
            except IndexError:
                print_log('The testing results of the whole dataset is empty.',
                          logger=logger,
                          level=logging.ERROR)
                break

            iou_type = 'bbox' if metric == 'proposal' else metric
            cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
            cocoEval.params.imgIds = self.img_ids
            if metric == 'proposal':
                cocoEval.params.useCats = 0
                cocoEval.params.maxDets = list(proposal_nums)
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                metric_items = [
                    'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000',
                    'AR_l@1000'
                ]
                for i, item in enumerate(metric_items):
                    val = float('{:.3f}'.format(cocoEval.stats[i + 6]))
                    eval_results[item] = val
            else:
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if classwise:  # Compute per-category AP
                    gt_lst = load_coco_bboxes(cocoGt, is_gt=True)
                    dt_lst = load_coco_bboxes(cocoDt, is_gt=False)
                    evaluator = Evaluator()
                    ret, mAP = evaluator.GetMAPbyClass(
                        gt_lst,
                        dt_lst,
                        method='EveryPointInterpolation',
                        iou_thr=iou_thr_by_class)
                    # Get metric values per each class
                    for metricsPerClass in ret:
                        cl = metricsPerClass['class']
                        ap = metricsPerClass['AP']
                        ap_str = '{0:.3f}'.format(ap)
                        eval_results['class_{}'.format(cl)] = float(ap_str)
                        print('AP: %s (%s)' % (ap_str, cl))
                    mAP_str = '{0:.3f}'.format(mAP)
                    eval_results['mAP'] = float(mAP_str)
                    print('mAP: {}\n'.format(mAP_str))
                metric_items = [
                    'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                ]
                for i in range(len(metric_items)):
                    key = '{}_{}'.format(metric, metric_items[i])
                    val = float('{:.3f}'.format(cocoEval.stats[i]))
                    eval_results[key] = val
                eval_results['{}_mAP_copypaste'.format(metric)] = (
                    '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
                    '{ap[4]:.3f} {ap[5]:.3f}').format(ap=cocoEval.stats[:6])
        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
Example #41
0
import pylab
import json
pylab.rcParams['figure.figsize'] = (10.0, 8.0)

annType = 'bbox'

#
annFile = './coco_data/person_instances_val2017.json'
cocoGt = COCO(annFile)

resFile = './trained25987_w_person_2017_result.json'
cocoDt = cocoGt.loadRes(resFile)

# annFile = './coco_data/instances_val2014.json'
# cocoGt = COCO(annFile)
#
#
# resFile = './coco_data/instances_val2014_fakebbox100_results.json'
# cocoDt = cocoGt.loadRes(resFile)

# imgIds = sorted(cocoGt.getImgIds())
# resFile = './corr_wh_pertrain_person_2017.json'
# anns = json.load(open(resFile))
# imgIds = list(set([ele['image_id'] for ele in anns]))

cocoEval = COCOeval(cocoGt, cocoDt, annType)
# cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def evaluate_coco(dataset, model, threshold=0.05):

    model.eval()

    with torch.no_grad():

        # start collecting results
        results = []
        image_ids = []

        for index in range(len(dataset)):
            data = dataset[index]
            scale = data['scale']
            progress_bar(index, len(dataset), "Evaluating........")

            # run network
            scores, labels, boxes = model(data['img'].permute(
                2, 0, 1).cuda().float().unsqueeze(dim=0))
            scores = scores.cpu()
            labels = labels.cpu()
            boxes = boxes.cpu()

            # correct boxes for image scale
            boxes /= scale

            # change to (x, y, w, h) (MS COCO standard)
            if boxes.shape[0] > 0:
                boxes[:, 2] -= boxes[:, 0]
                boxes[:, 3] -= boxes[:, 1]

            # compute predicted labels and scores
            #for box, score, label in zip(boxes[0], scores[0], labels[0]):
            for box_id in range(boxes.shape[0]):
                score = float(scores[box_id])
                label = int(labels[box_id])
                box = boxes[box_id, :]

                # scores are sorted, so we can break
                if score < threshold:
                    break

                # append detection for each positively labeled class
                image_result = {
                    'image_id': dataset.image_ids[index],
                    'category_id': dataset.label_to_coco_label(label),
                    'score': float(score),
                    'bbox': box.tolist(),
                }

                # append detection to results
                results.append(image_result)

            # append image to list of processed images
            image_ids.append(dataset.image_ids[index])

            # print progress

        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'),
                  indent=4)

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

        model.train()

        return
Example #43
0
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=None,
                 metric_items=None):
        """Evaluation in COCO protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'bbox', 'segm', 'proposal', 'proposal_fast'.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float], optional): IoU threshold used for
                evaluating recalls/mAPs. If set to a list, the average of all
                IoUs will also be computed. If not specified, [0.50, 0.55,
                0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
                Default: None.
            metric_items (list[str] | str, optional): Metric items that will
                be returned. If not specified, ``['AR@100', 'AR@300',
                'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
                used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
                'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
                ``metric=='bbox' or metric=='segm'``.

        Returns:
            dict[str, float]: COCO style evaluation metric.
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported')
        if iou_thrs is None:
            iou_thrs = np.linspace(.5,
                                   0.95,
                                   int(np.round((0.95 - .5) / .05)) + 1,
                                   endpoint=True)
        else:
            iou_thrs = np.array(iou_thrs)
        if metric_items is not None:
            if not isinstance(metric_items, list):
                metric_items = [metric_items]

        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)

        eval_results = OrderedDict()
        cocoGt = self.coco
        for metric in metrics:
            msg = f'Evaluating {metric}...'
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(results,
                                           proposal_nums,
                                           iou_thrs,
                                           logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
                    log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError(f'{metric} is not in results')
            try:
                cocoDt = cocoGt.loadRes(result_files[metric])
            except IndexError:
                print_log('The testing results of the whole dataset is empty.',
                          logger=logger,
                          level=logging.ERROR)
                break

            iou_type = 'bbox' if metric == 'proposal' else metric
            cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
            cocoEval.params.catIds = self.cat_ids
            cocoEval.params.imgIds = self.img_ids
            cocoEval.params.maxDets = list(proposal_nums)
            cocoEval.params.iouThrs = iou_thrs
            # mapping of cocoEval.stats
            coco_metric_names = {
                'mAP': 0,
                'mAP_10': 12,
                'mAP_30': 13,
                'mAP_50': 1,
                'mAP_75': 2,
                'mAP_s': 3,
                'mAP_m': 4,
                'mAP_l': 5,
                'AR@100': 6,
                'AR@300': 7,
                'AR@1000': 8,
                'AR_s@1000': 9,
                'AR_m@1000': 10,
                'AR_l@1000': 11
            }
            if metric_items is not None:
                for metric_item in metric_items:
                    if metric_item not in coco_metric_names:
                        raise KeyError(
                            f'metric item {metric_item} is not supported')

            if metric == 'proposal':
                cocoEval.params.useCats = 0
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if metric_items is None:
                    metric_items = [
                        'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
                        'AR_m@1000', 'AR_l@1000'
                    ]

                for item in metric_items:
                    val = float(
                        f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
                    eval_results[item] = val
            else:
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if classwise:  # Compute per-category AP
                    # Compute per-category AP
                    # from https://github.com/facebookresearch/detectron2/
                    precisions = cocoEval.eval['precision']
                    # precision: (iou, recall, cls, area range, max dets)
                    assert len(self.cat_ids) == precisions.shape[2]

                    results_per_category = []
                    for idx, catId in enumerate(self.cat_ids):
                        # area range index 0: all area ranges
                        # max dets index -1: typically 100 per image
                        nm = self.coco.loadCats(catId)[0]
                        precision = precisions[:, :, idx, 0, -1]
                        precision = precision[precision > -1]
                        if precision.size:
                            ap = np.mean(precision)
                        else:
                            ap = float('nan')
                        results_per_category.append(
                            (f'{nm["name"]}', f'{float(ap):0.3f}'))

                    num_columns = min(6, len(results_per_category) * 2)
                    results_flatten = list(
                        itertools.chain(*results_per_category))
                    headers = ['category', 'AP'] * (num_columns // 2)
                    results_2d = itertools.zip_longest(*[
                        results_flatten[i::num_columns]
                        for i in range(num_columns)
                    ])
                    table_data = [headers]
                    table_data += [result for result in results_2d]
                    table = AsciiTable(table_data)
                    print_log('\n' + table.table, logger=logger)

                if metric_items is None:
                    metric_items = [
                        'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                    ]

                for metric_item in metric_items:
                    key = f'{metric}_{metric_item}'
                    val = float(
                        f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
                    )
                    eval_results[key] = val
                # ap = cocoEval.stats[:6]
                # eval_results[f'{metric}_mAP_copypaste'] = (
                #     f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
                #     f'{ap[4]:.3f} {ap[5]:.3f}')
                eval_results[f'{metric}_mAP_copypaste'] = (
                    f'{cocoEval.stats[0]:.3f} {cocoEval.stats[12]:.3f} {cocoEval.stats[13]:.3f} {cocoEval.stats[1]:.3f}'
                )
        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
def evaluate_coco(generator, model, threshold=0.05):
    # start collecting results
    results = []
    image_ids = []
    for i in range(generator.size()):
        image = generator.load_image(i)
        image = generator.preprocess_image(image)
        image, scale = generator.resize_image(image)

        # run network
        _, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))

        # clip to image shape
        detections[:, :, 0] = np.maximum(0, detections[:, :, 0])
        detections[:, :, 1] = np.maximum(0, detections[:, :, 1])
        detections[:, :, 2] = np.minimum(image.shape[1], detections[:, :, 2])
        detections[:, :, 3] = np.minimum(image.shape[0], detections[:, :, 3])

        # correct boxes for image scale
        detections[0, :, :4] /= scale

        # change to (x, y, w, h) (MS COCO standard)
        detections[:, :, 2] -= detections[:, :, 0]
        detections[:, :, 3] -= detections[:, :, 1]

        # compute predicted labels and scores
        for detection in detections[0, ...]:
            positive_labels = np.where(detection[4:] > threshold)[0]

            # append detections for each positively labeled class
            for label in positive_labels:
                image_result = {
                    'image_id'    : generator.image_ids[i],
                    'category_id' : generator.label_to_coco_label(label),
                    'score'       : float(detection[4 + label]),
                    'bbox'        : (detection[:4]).tolist(),
                }

                # append detection to results
                results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.image_ids[i])

        # print progress
        print('{}/{}'.format(i, generator.size()), end='\r')

    if not len(results):
        return

    # write output
    json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
    json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
Example #45
0
def evaluate_coco(generator, model, threshold=0.05):
    """ Use the pycocotools to evaluate a COCO model on a dataset.

    Args
        oU NMSgenerator : The generator for g
        model     : The model to evaluate.
        threshold : The score threshold to use.
    """
    # start collecting results
    results = []
    image_ids = []
    for index in tqdm(range(len(generator))):
        img, gt_boxes, gt_labels, scale = generator[index]
        # run network
        scores, labels, boxes = model(img.unsqueeze(dim=0).cuda())
        scores = scores.detach().cpu().numpy()
        labels = labels.detach().cpu().numpy()
        boxes = boxes.detach().cpu().numpy()
        boxes /= scale
        # correct boxes for image scale
        # change to (x, y, w, h) (MS COCO standard)
        boxes[:, :, 2] -= boxes[:, :, 0]
        boxes[:, :, 3] -= boxes[:, :, 1]

        # compute predicted labels and scores
        for box, score, label in zip(boxes[0], scores[0], labels[0]):
            # scores are sorted, so we can break
            if score < threshold:
                break

            # append detection for each positively labeled class
            image_result = {
                'image_id': generator.ids[index],
                'category_id': generator.id2category[label],
                'score': float(score),
                'bbox': box.tolist(),
            }

            # append detection to results
            results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.ids[index])

    if not len(results):
        return

    # write output
    json.dump(results, open('coco_bbox_results.json', 'w'), indent=4)
    # json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('coco_bbox_results.json')

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval.stats
def coco_evaluate_cvidata(dataset, model, threshold=0.05, valid_class_id=None):
    model.eval()
    with torch.no_grad():
        # start collecting results
        results = []
        image_ids = []
        for index in range(len(dataset)):
            data = dataset[index]
            scale = data['scale']

            # run network
            #aa = data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0)
            #print(aa.shape)
            #pdb.set_trace()
            scores, labels, boxes = model(data['img'].permute(
                2, 0, 1).cuda().float().unsqueeze(dim=0))
            scores = scores.cpu()
            labels = labels.cpu()
            boxes = boxes.cpu()

            # correct boxes for image scale
            boxes /= scale

            if boxes.shape[0] > 0:
                # change to (x, y, w, h) (MS COCO standard)
                boxes[:, 2] -= boxes[:, 0]
                boxes[:, 3] -= boxes[:, 1]

                # compute predicted labels and scores
                # for box, score, label in zip(boxes[0], scores[0], labels[0]):
                for box_id in range(boxes.shape[0]):
                    score = float(scores[box_id])
                    label = int(labels[box_id])
                    box = boxes[box_id, :]

                    if valid_class_id and label not in valid_class_id:
                        continue

                    # scores are sorted, so we can break
                    if score < threshold:
                        break

                    # append detection for each positively labeled class
                    image_result = {
                        'image_id': dataset.image_ids[index],
                        'category_id': dataset.label_to_cvidata_label(label),
                        'score': float(score),
                        'bbox': box.tolist(),
                    }

                    # append detection to results
                    results.append(image_result)

            # append image to list of processed images
            image_ids.append(dataset.image_ids[index])

            # print progress
            print('{}/{}'.format(index, len(dataset)), end='\r')

        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format('tmp'), 'w'),
                  indent=4)

        # load results in CVIData evaluation tool
        cvidata_true = dataset.cvi_data
        cvidata_pred = cvidata_true.loadRes(
            '{}_bbox_results.json'.format('tmp'))

        # run COCO evaluation
        coco_eval = COCOeval(cvidata_true, cvidata_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        model.train()

        stats_info = '''\
        Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:0.3f}
        Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = {:0.3f}
        Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = {:0.3f}
        Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:0.3f}
        Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:0.3f}
        Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:0.3f}
        Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = {:0.3f}
        Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = {:0.3f}
        Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = {:0.3f}
        Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:0.3f}
        Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:0.3f}
        Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:0.3f}\n'''
        # print('tttt\n')
        # print(stats_info.format(*coco_eval.stats))
        return stats_info.format(*coco_eval.stats)
Example #47
0
def test(
        data,
        weights=None,
        batch_size=16,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir='',
        merge=False,
        save_txt=False):
    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        set_logging()
        device = select_device(opt.device, batch_size=batch_size)
        merge, save_txt = opt.merge, opt.save_txt  # use Merge NMS, save *.txt labels
        if save_txt:
            out = Path('inference/output')
            if os.path.exists(out):
                shutil.rmtree(out)  # delete output folder
            os.makedirs(out)  # make new output folder

        # Remove previous
        for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
            os.remove(f)

        # Load model
        model = attempt_load(weights, map_location=device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size

        # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
        # if device.type != 'cpu' and torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model)

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Configure
    model.eval()
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    check_dataset(data)  # check
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if not training:
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        dataloader = create_dataloader(path,
                                       imgsz,
                                       batch_size,
                                       model.stride.max(),
                                       opt,
                                       hyp=None,
                                       augment=False,
                                       cache=False,
                                       pad=0.5,
                                       rect=True)[0]

    seen = 0
    names = model.names if hasattr(model, 'names') else model.module.names
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Disable gradients
        with torch.no_grad():
            # Run model
            t = time_synchronized()
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            t0 += time_synchronized() - t

            # Compute loss
            if training:  # if model has loss hyperparameters
                loss += compute_loss([x.float() for x in train_out], targets,
                                     model)[1][:3]  # GIoU, obj, cls

            # Run NMS
            t = time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         merge=merge)
            t1 += time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            if save_txt:
                gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0
                                                  ]]  # normalization gain whwh
                x = pred.clone()
                x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4],
                                        shapes[si][0],
                                        shapes[si][1])  # to original
                for *xyxy, conf, cls in x:
                    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                            gn).view(-1).tolist()  # normalized xywh
                    with open(str(out / Path(paths[si]).stem) + '.txt',
                              'a') as f:
                        f.write(
                            ('%g ' * 5 + '\n') % (cls, *xywh))  # label format

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = Path(paths[si]).stem
                box = pred[:, :4].clone()  # xyxy
                scale_coords(img[si].shape[1:], box, shapes[si][0],
                             shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id':
                        int(image_id) if image_id.isnumeric() else image_id,
                        'category_id':
                        coco91class[int(p[5])],
                        'bbox': [round(x, 3) for x in b],
                        'score':
                        round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(
                        -1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(
                        -1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        detected_set = set()
                        for j in (ious > iouv[0]).nonzero(as_tuple=False):
                            d = ti[i[j]]  # detected target
                            if d.item() not in detected_set:
                                detected_set.add(d.item())
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if batch_i < 1:
            f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i)  # filename
            plot_images(img, targets, paths, str(f), names)  # ground truth
            f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
            plot_images(img, output_to_target(output, width, height), paths,
                        str(f), names)  # predictions

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(
            1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3
              for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Save JSON
    if save_json and len(jdict):
        f = 'detections_val2017_%s_results.json' % \
            (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '')  # filename
        print('\nCOCO mAP with pycocotools... saving %s...' % f)
        with open(f, 'w') as file:
            json.dump(jdict, file)

        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
            cocoGt = COCO(
                glob.glob('../coco/annotations/instances_val*.json')
                [0])  # initialize COCO ground truth api
            cocoDt = cocoGt.loadRes(f)  # initialize COCO pred api
            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
            cocoEval.params.imgIds = imgIds  # image IDs to evaluate
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            map, map50 = cocoEval.stats[:
                                        2]  # update results ([email protected]:0.95, [email protected])
        except Exception as e:
            print('ERROR: pycocotools unable to run: %s' % e)

    # Return results
    model.float()  # for training
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map,
            *(loss.cpu() / len(dataloader)).tolist()), maps, t
Example #48
0

if __name__ == '__main__':

	eval_bbox = (args.eval_type in ('bbox', 'both'))
	eval_mask = (args.eval_type in ('mask', 'both'))

	print('Loading annotations...')
	gt_annotations = COCO(args.gt_ann_file)
	if eval_bbox:
		bbox_dets = gt_annotations.loadRes(args.bbox_det_file)
	if eval_mask:
		mask_dets = gt_annotations.loadRes(args.mask_det_file)

	if eval_bbox:
		print('\nEvaluating BBoxes:')
		bbox_eval = COCOeval(gt_annotations, bbox_dets, 'bbox')
		bbox_eval.evaluate()
		bbox_eval.accumulate()
		bbox_eval.summarize()
	
	if eval_mask:
		print('\nEvaluating Masks:')
		bbox_eval = COCOeval(gt_annotations, mask_dets, 'segm')
		bbox_eval.evaluate()
		bbox_eval.accumulate()
		bbox_eval.summarize()



Example #49
0
class EvalCoco(object):
    def __init__(self,
                 coco,
                 processor,
                 *,
                 max_per_image=20,
                 category_ids=None,
                 iou_type='keypoints',
                 small_threshold=0.0):
        if category_ids is None:
            category_ids = [1]

        self.coco = coco
        self.processor = processor
        self.max_per_image = max_per_image
        self.category_ids = category_ids
        self.iou_type = iou_type
        self.small_threshold = small_threshold

        self.predictions = []
        self.image_ids = []
        self.eval = None
        self.decoder_time = 0.0
        self.nn_time = 0.0

        LOG.debug('max = %d, category ids = %s, iou_type = %s',
                  self.max_per_image, self.category_ids, self.iou_type)

    def stats(self, predictions=None, image_ids=None):
        # from pycocotools.cocoeval import COCOeval
        if predictions is None:
            predictions = self.predictions
        if image_ids is None:
            image_ids = self.image_ids

        coco_eval = self.coco.loadRes(predictions)

        self.eval = COCOeval(self.coco, coco_eval, iouType=self.iou_type)
        LOG.info('cat_ids: %s', self.category_ids)
        if self.category_ids:
            self.eval.params.catIds = self.category_ids

        if image_ids is not None:
            print('image ids', image_ids)
            self.eval.params.imgIds = image_ids
        self.eval.evaluate()
        self.eval.accumulate()
        self.eval.summarize()
        return self.eval.stats

    @staticmethod
    def count_ops(model, height=641, width=641):
        device = next(model.parameters()).device
        dummy_input = torch.randn(1, 3, height, width, device=device)
        gmacs, params = thop.profile(model, inputs=(dummy_input, ))
        LOG.info('GMACs = {0:.2f}, million params = {1:.2f}'.format(
            gmacs / 1e9, params / 1e6))
        return gmacs, params

    @staticmethod
    def view_annotations(meta, predictions, ground_truth):
        annotation_painter = show.AnnotationPainter()
        with open(os.path.join(IMAGE_DIR_VAL, meta['file_name']), 'rb') as f:
            cpu_image = PIL.Image.open(f).convert('RGB')

        with show.image_canvas(cpu_image) as ax:
            annotation_painter.annotations(ax, predictions)

        if ground_truth:
            with show.image_canvas(cpu_image) as ax:
                show.white_screen(ax)
                annotation_painter.annotations(ax, ground_truth, color='grey')
                annotation_painter.annotations(ax, predictions)

    def from_predictions(self, predictions, meta, debug=False, gt=None):
        image_id = int(meta['image_id'])
        self.image_ids.append(image_id)

        predictions = transforms.Preprocess.annotations_inverse(
            predictions, meta)
        if self.small_threshold:
            predictions = [
                pred for pred in predictions
                if pred.scale(v_th=0.01) >= self.small_threshold
            ]
        if len(predictions) > self.max_per_image:
            predictions = predictions[:self.max_per_image]

        if debug:
            gt_anns = []
            for g in gt:
                if 'bbox' in g:
                    gt_anns.append(
                        AnnotationDet(COCO_CATEGORIES).set(
                            g['category_id'] - 1, None, g['bbox']))
                if 'keypoints' in g:
                    gt_anns.append(
                        Annotation(COCO_KEYPOINTS,
                                   COCO_PERSON_SKELETON).set(g['keypoints'],
                                                             fixed_score=None))
            gt_anns = transforms.Preprocess.annotations_inverse(gt_anns, meta)
            self.view_annotations(meta, predictions, gt_anns)

        image_annotations = []
        for pred in predictions:
            pred_data = pred.json_data()
            pred_data['image_id'] = image_id
            pred_data = {
                k: v
                for k, v in pred_data.items()
                if k in ('category_id', 'score', 'keypoints', 'bbox',
                         'image_id')
            }
            image_annotations.append(pred_data)

        # force at least one annotation per image (for pycocotools)
        if not image_annotations:
            image_annotations.append({
                'image_id': image_id,
                'category_id': 1,
                'keypoints': np.zeros((17 * 3, )).tolist(),
                'bbox': [0, 0, 1, 1],
                'score': 0.001,
            })

        if debug:
            self.stats(image_annotations, [image_id])
            LOG.debug(meta)

        self.predictions += image_annotations

    def write_predictions(self, filename):
        predictions = [{
            k: v
            for k, v in annotation.items()
            if k in ('image_id', 'category_id', 'keypoints', 'score')
        } for annotation in self.predictions]
        with open(filename + '.pred.json', 'w') as f:
            json.dump(predictions, f)
        LOG.info('wrote %s.pred.json', filename)
        with zipfile.ZipFile(filename + '.zip', 'w') as myzip:
            myzip.write(filename + '.pred.json', arcname='predictions.json')
        LOG.info('wrote %s.zip', filename)
from fast_rcnn.nms_wrapper import nms
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab


if __name__ == '__main__':


	pylab.rcParams['figure.figsize'] = (10.0, 8.0)

	annType = 'bbox'

	ground_truth = '/mnt/d/BigData/COCO/instances_train-val2014/annotations/instances_val2014.json' 
	generated_result = '/mnt/c/Users/Lavenger/git/py-faster-rcnn/tools/result.json'

	cocoGt = COCO(generated_result)

	cocoDt = cocoGt.loadRes(generated_result)

	cocoEval = COCOeval(cocoGt,cocoDt)
	cocoEval.params.imgIds  = imgIds
	cocoEval.params.useSegm = False
	cocoEval.evaluate()
	cocoEval.accumulate()
	cocoEval.summarize()


Example #51
0
def evaluate_coco(generator, model, threshold=0.05):
    """ Use the pycocotools to evaluate a COCO model on a dataset.

    Args
        generator : The generator for generating the evaluation data.
        model     : The model to evaluate.
        threshold : The score threshold to use.
    """
    # start collecting results
    results = []
    image_ids = []
    for index in progressbar.progressbar(range(generator.size()),
                                         prefix='COCO evaluation: '):
        image = generator.load_image(index)
        image = generator.preprocess_image(image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(
            np.expand_dims(image, axis=0))

        # correct boxes for image scale
        boxes /= scale

        # change to (x, y, w, h) (MS COCO standard)
        boxes[:, :, 2] -= boxes[:, :, 0]
        boxes[:, :, 3] -= boxes[:, :, 1]

        # compute predicted labels and scores
        for box, score, label in zip(boxes[0], scores[0], labels[0]):
            # scores are sorted, so we can break
            if score < threshold:
                break

            # append detection for each positively labeled class
            image_result = {
                'image_id': generator.image_ids[index],
                'category_id': generator.label_to_coco_label(label),
                'score': float(score),
                'bbox': box.tolist(),
            }

            # append detection to results
            results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.image_ids[index])

    if not len(results):
        return

    # write output
    json.dump(results,
              open('{}_bbox_results.json'.format(generator.set_name), 'w'),
              indent=4)
    json.dump(image_ids,
              open('{}_processed_image_ids.json'.format(generator.set_name),
                   'w'),
              indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
        generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval.stats
Example #52
0
def test(data,
         weights=None,
         batch_size=16,
         imgsz=640,
         conf_thres=0.001,
         iou_thres=0.6,  # for nms
         save_json=False,
         single_cls=False,
         augment=False,
         model=None,
         dataloader=None,
         fast=False,
         verbose=False):  # 0 fast, 1 accurate
    # Initialize/load model and set device
    if model is None:
        device = torch_utils.select_device(opt.device, batch_size=batch_size)

        # Remove previous
        for f in glob.glob('test_batch*.jpg'):
            os.remove(f)

        # Load model
        google_utils.attempt_download(weights)
        model = torch.load(weights, map_location=device)['model']
        torch_utils.model_info(model)

        # Fuse
        # model.fuse()
        model.to(device)

        if device.type != 'cpu' and torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)

        training = False
    else:  # called by train.py
        device = next(model.parameters()).device  # get model device
        training = True

    # Configure run
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for [email protected]:0.95
    # iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if dataloader is None:
        fast |= conf_thres > 0.001  # enable fast mode
        path = data['test'] if opt.task == 'test' else data['val']  # path to val/test images
        dataset = LoadImagesAndLabels(path,
                                      imgsz,
                                      batch_size,
                                      rect=True,  # rectangular inference
                                      single_cls=opt.single_cls,  # single class mode
                                      pad=0.0 if fast else 0.5)  # padding
        batch_size = min(batch_size, len(dataset))
        nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=nw,
                                pin_memory=True,
                                collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    _ = model(torch.zeros((1, 3, imgsz, imgsz), device=device)) if device.type != 'cpu' else None  # run once
    names = model.names if hasattr(model, 'names') else model.module.names
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
        imgs = imgs.to(device).float() / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = imgs.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Disable gradients
        with torch.no_grad():
            # Run model
            t = torch_utils.time_synchronized()
            inf_out, train_out = model(imgs, augment=augment)  # inference and training outputs
            t0 += torch_utils.time_synchronized() - t

            # Compute loss
            if training:  # if model has loss hyperparameters
                loss += compute_loss(train_out, targets, model)[1][:3]  # GIoU, obj, cls

            # Run NMS
            t = torch_utils.time_synchronized()
            output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, fast=fast)
            t1 += torch_utils.time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(imgs[si].shape[1:], box, shapes[si][0], shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({'image_id': image_id,
                                  'category_id': coco91class[int(p[5])],
                                  'bbox': [round(x, 3) for x in b],
                                  'score': round(p[4], 5)})

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero().view(-1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero().view(-1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1)  # best ious, indices

                        # Append detections
                        for j in (ious > iouv[0]).nonzero():
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(detected) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if batch_i < 1:
            f = 'test_batch%g_gt.jpg' % batch_i  # filename
            plot_images(imgs, targets, paths, f, names)  # ground truth
            f = 'test_batch%g_pred.jpg' % batch_i
            plot_images(imgs, output_to_target(output, width, height), paths, f, names)  # predictions

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64), minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)

    # Save JSON
    if save_json and map50 and len(jdict):
        imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
        f = 'detections_val2017_%s_results.json' % \
            (weights.split(os.sep)[-1].replace('.pt', '') if weights else '')  # filename
        print('\nCOCO mAP with pycocotools... saving %s...' % f)
        with open(f, 'w') as file:
            json.dump(jdict, file)

        try:
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0])  # initialize COCO ground truth api
            cocoDt = cocoGt.loadRes(f)  # initialize COCO pred api

            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
            cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            map, map50 = cocoEval.stats[:2]  # update to pycocotools results ([email protected]:0.95, [email protected])
        except:
            print('WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '
                  'See https://github.com/cocodataset/cocoapi/issues/356')

    # Return results
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
Example #53
0
def test(
        data,
        weights=None,
        batch_size=32,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir=Path(''),  # for saving images
        save_txt=False,  # for auto-labelling
        save_hybrid=False,  # for hybrid auto-labelling
        save_conf=False,  # save auto-label confidences
        plots=True,
        log_imgs=0):  # number of logged images

    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        set_logging()
        device = select_device(opt.device, batch_size=batch_size)

        # Directories
        save_dir = Path(
            increment_path(Path(opt.project) / opt.name,
                           exist_ok=opt.exist_ok))  # increment run
        (save_dir / 'labels' if save_txt else save_dir).mkdir(
            parents=True, exist_ok=True)  # make dir

        # Load model
        model = attempt_load(weights, map_location=device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size

        # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
        # if device.type != 'cpu' and torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model)

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Configure
    model.eval()
    is_coco = data.endswith('coco.yaml')  # is COCO dataset
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    check_dataset(data)  # check
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Logging
    log_imgs, wandb = min(log_imgs, 100), None  # ceil
    try:
        import wandb  # Weights & Biases
    except ImportError:
        log_imgs = 0

    # Dataloader
    if not training:
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        dataloader = create_dataloader(path,
                                       imgsz,
                                       batch_size,
                                       model.stride.max(),
                                       opt,
                                       pad=0.5,
                                       rect=True)[0]

    seen = 0
    confusion_matrix = ConfusionMatrix(nc=nc)

    names = {
        k: v
        for k, v in enumerate(
            model.names if hasattr(model, 'names') else model.module.names)
    }
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width

        with torch.no_grad():
            # Run model
            t = time_synchronized()
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            t0 += time_synchronized() - t

            # Compute loss
            if training:
                loss += compute_loss([x.float() for x in train_out], targets,
                                     model)[1][:3]  # box, obj, cls

            # Run NMS
            targets[:, 2:] *= torch.Tensor([width, height, width,
                                            height]).to(device)  # to pixels
            lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)
                  ] if save_hybrid else []  # for autolabelling
            t = time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         labels=lb)
            t1 += time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            path = Path(paths[si])
            seen += 1

            if len(pred) == 0:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Predictions
            predn = pred.clone()
            scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0],
                         shapes[si][1])  # native-space pred

            # Append to text file
            if save_txt:
                gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0
                                                  ]]  # normalization gain whwh
                for *xyxy, conf, cls in predn.tolist():
                    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                            gn).view(-1).tolist()  # normalized xywh

                    line = (cls, *xywh,
                            conf) if save_conf else (cls,
                                                     *xywh)  # label format
                    with open(save_dir / 'labels' / (path.stem + '.txt'),
                              'a') as f:
                        f.write(('%g ' * len(line)).rstrip() % line + '\n')

            # W&B logging
            if plots and len(wandb_images) < log_imgs:
                box_data = [{
                    "position": {
                        "minX": xyxy[0],
                        "minY": xyxy[1],
                        "maxX": xyxy[2],
                        "maxY": xyxy[3]
                    },
                    "class_id": int(cls),
                    "box_caption": "%s %.3f" % (names[cls], conf),
                    "scores": {
                        "class_score": conf
                    },
                    "domain": "pixel"
                } for *xyxy, conf, cls in pred.tolist()]
                boxes = {
                    "predictions": {
                        "box_data": box_data,
                        "class_labels": names
                    }
                }  # inference-space
                wandb_images.append(
                    wandb.Image(img[si], boxes=boxes, caption=path.name))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(
                    path.stem) if path.stem.isnumeric() else path.stem
                box = xyxy2xywh(predn[:, :4])  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id':
                        image_id,
                        'category_id':
                        coco91class[int(p[5])] if is_coco else int(p[5]),
                        'bbox': [round(x, 3) for x in b],
                        'score':
                        round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5])
                scale_coords(img[si].shape[1:], tbox, shapes[si][0],
                             shapes[si][1])  # native-space labels
                if plots:
                    confusion_matrix.process_batch(
                        pred, torch.cat((labels[:, 0:1], tbox), 1))

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(
                        -1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(
                        -1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(predn[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        detected_set = set()
                        for j in (ious > iouv[0]).nonzero(as_tuple=False):
                            d = ti[i[j]]  # detected target
                            if d.item() not in detected_set:
                                detected_set.add(d.item())
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if plots and batch_i < 3:
            f = save_dir / f'test_batch{batch_i}_labels.jpg'  # labels
            Thread(target=plot_images,
                   args=(img, targets, paths, f, names),
                   daemon=True).start()
            f = save_dir / f'test_batch{batch_i}_pred.jpg'  # predictions
            Thread(target=plot_images,
                   args=(img, output_to_target(output), paths, f, names),
                   daemon=True).start()

    # W&B logging
    if wandb_images:
        wandb.log({"outputs": wandb_images})

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = ap_per_class(*stats,
                                              plot=plots,
                                              save_dir=save_dir,
                                              names=names)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(
            1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3
              for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Plots
    if plots:
        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
        if wandb and wandb.run:
            wandb.log({"Images": wandb_images})
            wandb.log({
                "Validation": [
                    wandb.Image(str(f), caption=f.name)
                    for f in sorted(save_dir.glob('test*.jpg'))
                ]
            })

    # Save JSON
    if save_json and len(jdict):
        w = Path(weights[0] if isinstance(weights, list) else weights
                 ).stem if weights is not None else ''  # weights
        anno_json = '../coco/annotations/instances_val2017.json'  # annotations json
        pred_json = str(save_dir / f"{w}_predictions.json")  # predictions json
        print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
        with open(pred_json, 'w') as f:
            json.dump(jdict, f)

        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            anno = COCO(anno_json)  # init annotations api
            pred = anno.loadRes(pred_json)  # init predictions api
            eval = COCOeval(anno, pred, 'bbox')
            if is_coco:
                eval.params.imgIds = [
                    int(Path(x).stem) for x in dataloader.dataset.img_files
                ]  # image IDs to evaluate
            eval.evaluate()
            eval.accumulate()
            eval.summarize()
            map, map50 = eval.stats[:
                                    2]  # update results ([email protected]:0.95, [email protected])
        except Exception as e:
            print(f'pycocotools unable to run: {e}')

    # Return results
    if not training:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        print(f"Results saved to {save_dir}{s}")
    model.float()  # for training
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map,
            *(loss.cpu() / len(dataloader)).tolist()), maps, t
def eval_mscoco_with_segm(cocoGT, cocoPred):
    # running evaluation
    cocoEval = COCOeval(cocoGT, cocoPred, "keypoints")
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Example #55
0
    def finalize(self, result_dict, ds=None, output_dir=None):
        result_dict["good"] += self.good
        result_dict["total"] += self.total
        image_ids = []

        if self.use_inv_map:
            # for pytorch
            label_map = {}
            with open(ds.annotation_file) as fin:
                annotations = json.load(fin)
            for cnt, cat in enumerate(annotations["categories"]):
                label_map[cat["id"]] = cnt + 1
            inv_map = {v: k for k, v in label_map.items()}

        detections = []
        for batch in range(0, len(self.results)):
            for idx in range(0, len(self.results[batch])):
                detection = self.results[batch][idx]
                # this is the index into the image list
                #image_id = int(detections[idx][0])
                image_id = int(detection[0])
                image_ids.append(image_id)
                # map it to the coco image it
                detection[0] = ds.image_ids[image_id]
                height, width = ds.image_sizes[image_id]
                # box comes from model as: ymin, xmin, ymax, xmax
                ymin = detection[1] * height
                xmin = detection[2] * width
                ymax = detection[3] * height
                xmax = detection[4] * width
                # pycoco wants {imageID,x1,y1,w,h,score,class}
                detection[1] = xmin
                detection[2] = ymin
                detection[3] = xmax - xmin
                detection[4] = ymax - ymin
                if self.use_inv_map:
                    cat_id = inv_map.get(int(detection[6]), -1)
                    if cat_id == -1:
                        # FIXME:
                        log.info("finalize can't map category {}".format(
                            int(detection[6])))
                    detection[6] = cat_id
                detections.append(np.array(detection))

        # for debugging
        if output_dir:
            # for debugging
            pp = []
            for detection in detections:
                pp.append({
                    "image_id":
                    int(detection[0]),
                    "image_loc":
                    ds.get_item_loc(image_ids[idx]),
                    "category_id":
                    int(detection[6]),
                    "bbox": [
                        float(detection[1]),
                        float(detection[2]),
                        float(detection[3]),
                        float(detection[4])
                    ],
                    "score":
                    float(detection[5])
                })
            if not output_dir:
                output_dir = "/tmp"
            fname = "{}/{}.json".format(output_dir, result_dict["scenario"])
            with open(fname, "w") as fp:
                json.dump(pp, fp, sort_keys=True, indent=4)

        image_ids = list(set([i[0] for i in detections]))
        self.results = []
        cocoGt = pycoco.COCO(ds.annotation_file)
        cocoDt = cocoGt.loadRes(np.array(detections))
        cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox')
        cocoEval.params.imgIds = image_ids
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        result_dict["mAP"] = cocoEval.stats[0]