def _do_detection_eval(self, res_file): coco_dt = self._COCO.loadRes(res_file) coco_eval = COCOeval(self._COCO, coco_dt, 'bbox') coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() results = OrderedDict() results['bbox'] = self._derive_coco_results( coco_eval, 'bbox', class_names=self.class_name) print_csv_format(results)
def _do_detection_eval(self, res_file, output_dir): ann_type = 'bbox' coco_dt = self._COCO.loadRes(res_file) coco_eval = COCOeval(self._COCO, coco_dt) coco_eval.params.useSegm = (ann_type == 'segm') coco_eval.evaluate() coco_eval.accumulate() self._print_detection_eval_metrics(coco_eval) eval_file = os.path.join(output_dir, 'detection_results.pkl') with open(eval_file, 'wb') as fid: pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL) print('Wrote COCO eval results to: {}'.format(eval_file))
def _do_detection_eval(self, res_file, output_dir): ann_type = 'bbox' coco_dt = self._COCO.loadRes(res_file) coco_eval = COCOeval(self._COCO, coco_dt) if self.classes == "youtube_bb": coco_eval.params.setDetParams() coco_eval.params.catIds = self._COCO.getCatIds( catNms=classes_youtubebb) coco_eval.params.imgIds = self.image_indexes print(len(coco_eval.params.imgIds)) elif self.classes == "youtube_bb_sub": coco_eval.params.setDetParams() coco_eval.params.catIds = self._COCO.getCatIds( catNms=classes_youtubebb_sub) coco_eval.params.imgIds = self.image_indexes print(len(coco_eval.params.imgIds)) coco_eval.params.useSegm = (ann_type == 'segm') coco_eval.evaluate() coco_eval.accumulate() self._print_detection_eval_metrics(coco_eval) eval_file = os.path.join(output_dir, 'detection_results.pkl') with open(eval_file, 'wb') as fid: pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL) print('Wrote COCO eval results to: {}'.format(eval_file))
def evaluate_detections(FPS=None): print('Running demo for *%s* results.' % (annType)) # initialize COCO ground truth api annFile = '%s/annotations/%s_%s.json' % (dataDir, prefix, dataType) print(annFile) cocoGt = COCO(annFile) # initialize COCO detections api cocoDt = cocoGt.loadRes(resFile) imgIds = cocoGt.getImgIds() # imgIds = imgIds[0:100] # imgId = imgIds[np.random.randint(100)] # running evaluation cocoEval = COCOeval(cocoGt, cocoDt, annType) cocoEval.params.imgIds = imgIds cocoEval.evaluate() cocoEval.accumulate() means = cocoEval.summarize() with open(os.path.join(output_dir, str(int(means[0] * 10000)) + '.txt'), 'w') as res_file: res_file.write('CUDA: ' + str(args.cuda) + '\n') res_file.write('model_dir: ' + args.model_dir + '\n') res_file.write('iteration: ' + args.iteration + '\n') res_file.write('model_name: ' + args.model_name + '\n') res_file.write('backbone : ' + args.backbone + '\n') if args.backbone in ['RefineDet_VGG']: res_file.write('refine : ' + str(args.refine) + '\n') res_file.write('deform : ' + str(args.deform) + '\n') res_file.write('multi-head : ' + str(args.multihead) + '\n') res_file.write('ssd_dim: ' + str(args.ssd_dim) + '\n') res_file.write('confidence_threshold: ' + str(args.confidence_threshold) + '\n') res_file.write('nms_threshold: ' + str(args.nms_threshold) + '\n') res_file.write('top_k: ' + str(args.top_k) + '\n') res_file.write('dataset_name: ' + str(args.dataset_name) + '\n') res_file.write('set_file_name: ' + str(args.set_file_name) + '\n') res_file.write('detection: ' + str(args.detection) + '\n') res_file.write('~~~~~~~~~~~~~~~~~\n') res_file.write( 'Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = {:.4f}\n' .format(means[0])) res_file.write( 'Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = {:.4f}\n' .format(means[1])) res_file.write( 'Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = {:.4f}\n' .format(means[2])) res_file.write( 'Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.4f}\n' .format(means[3])) res_file.write( 'Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.4f}\n' .format(means[4])) res_file.write( 'Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.4f}\n' .format(means[5])) res_file.write( 'Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = {:.4f}\n' .format(means[6])) res_file.write( 'Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = {:.4f}\n' .format(means[7])) res_file.write( 'Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = {:.4f}\n' .format(means[8])) res_file.write( 'Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = {:.4f}\n' .format(means[8])) res_file.write( 'Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = {:.4f}\n' .format(means[10])) res_file.write( 'Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = {:.4f}\n' .format(means[11])) if FPS: for i, f in enumerate(FPS): res_file.write(str(i) + ': FPS = {:.4f}\n'.format(f))