Example #1
0
def do_python_eval(devkit_path,
                   year,
                   image_set,
                   classes,
                   output_dir='results'):

    devkit_path = "/home/nghiant/dataset/VOCdevkit"
    annopath = os.path.join(devkit_path, 'VOC' + year, 'Annotations',
                            '{:s}.xml')
    imagesetfile = os.path.join(devkit_path, 'VOC' + year, 'ImageSets', 'Main',
                                image_set + '.txt')
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    # print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = get_voc_results_file_template(image_set).format(cls)
        rec, prec, ap = voc_eval(filename,
                                 annopath,
                                 imagesetfile,
                                 cls,
                                 cachedir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
            pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
Example #2
0
    def _do_python_eval(self, output_dir='output'):
        annopath = os.path.join(self._devkit_path, 'VOC' + self._year,
                                'Annotations', '{:s}.xml')
        imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                    'ImageSets', 'Main',
                                    self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(filename,
                                     annopath,
                                     imagesetfile,
                                     cls,
                                     cachedir,
                                     ovthresh=0.5,
                                     use_07_metric=use_07_metric)
            aps += [ap]

            #if cls == '1':
            #cls='airplane'
            #if cls == '2':
            #cls='storagebank'
            #if cls == '3':
            #cls='bridge'
            #if cls == '4':
            #cls='boat'
            if cls == '5':
                cls = 'dock'
            #if cls == '6':
            #cls='playground'
            #if cls == '7':
            #cls='huanlu'
            #if cls == '8':
            #cls='helicopter'

            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Example #3
0
    def evaluate(self, dataset):
        if self.data_type == 'voc':

            test_root = os.path.join(dataset.root, 'VOCtest_06-Nov-2007',
                                     'VOCdevkit', 'VOC2007', 'Annotations')
            mAP = voc_eval(test_root, self.det_img_name, self.det_additional,
                           self.det_boxes, self.det_scores, self.det_labels)

        elif self.data_type == 'coco':

            _, tmp = tempfile.mkstemp()
            json.dump(self.results, open(tmp, "w"))

            cocoGt = dataset.coco
            cocoDt = cocoGt.loadRes(tmp)

            # https://github.com/argusswift/YOLOv4-pytorch/blob/master/eval/cocoapi_evaluator.py
            # workaround: temporarily write data to json file because pycocotools can't process dict in py36.

            coco_eval = COCOeval(cocoGt=cocoGt, cocoDt=cocoDt, iouType='bbox')
            coco_eval.params.imgIds = self.img_ids
            coco_eval.evaluate()
            coco_eval.accumulate()
            coco_eval.summarize()

            mAP = coco_eval.stats[0]
            mAP_50 = coco_eval.stats[1]
        return mAP
Example #4
0
def eval(imageset):
    if not os.path.exists(eval_dir):
        os.mkdir(eval_dir)

    det_list = [
        os.path.join(result_dir, file) for file in os.listdir(result_dir)
    ]
    det_classes = list()
    for file in det_list:
        classes = os.path.splitext(os.path.basename(file))[0].split('_')[-1]
        det_classes.append(classes)
        detpath = file.replace(classes, '%s')

    YOLO_PATH = os.path.abspath("../")
    VOC_PATH = os.path.join(YOLO_PATH, 'data', 'VOCdevkit', imageset[0])

    annopath = os.path.join(VOC_PATH, 'Annotations', '%s.xml')
    imagesetfile = os.path.join(VOC_PATH, 'ImageSets', 'Main',
                                imageset[1] + '.txt')

    MAPList = list()
    for classname in det_classes:
        rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname,
                                 eval_dir)
        print('%s\t AP:%.4f' % (classname, ap))
        MAPList.append(ap)

    Map = np.array(MAPList)
    mean_Map = np.mean(Map)
    print('------ Map: %.4f' % (mean_Map))
Example #5
0
 def do_python_eval(self):
     """
     python evaluation wrapper
     :return: None
     """
     annopath = os.path.join(self.data_path, 'Annotations', '{:s}.xml')
     imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main',
                                  self.image_set + '.txt')
     cache_dir = os.path.join(self.cache_path, self.name)
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self.year) < 2010 else False
     print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imageset_file,
                                  cls,
                                  cache_dir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
Example #6
0
 def _do_python_eval(self, output_dir = 'output'):
     annopath = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'ImageSets',
         'Main',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         file_name=open('/home/cuiguochen/ap/ap.txt','w+')
         file_name.write(rec)
         file_name.write(prec)
         file_name.close()
         print(rec,prec)
         pl.plot(rec, prec, lw=2, 
          label='Precision-recall curve of class {} (area = {:.4f})'
                ''.format(cls, ap))
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     pl.xlabel('Recall')
     pl.ylabel('Precision')
     plt.grid(True)
     pl.ylim([0.0, 1.05])
     pl.xlim([0.0, 1.0])
     pl.title('Precision-Recall')
     pl.legend(loc="upper right")
     plt.show()
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
    def evaluate(self, path_to_results_dir: str, image_ids: List[str], bboxes: List[List[float]], classes: List[int],
                 probs: List[float]) -> Tuple[float, str]:
        self._write_results(path_to_results_dir, image_ids, bboxes, classes, probs)

        path_to_voc2007_dir = os.path.join(self._path_to_data_dir, 'VOCdevkit', 'VOC2007')
        path_to_main_dir = os.path.join(path_to_voc2007_dir, 'ImageSets', 'Main')
        path_to_annotations_dir = os.path.join(path_to_voc2007_dir, 'Annotations')

        class_to_ap_dict = {}
        for c in range(1, VOC2007Person.num_classes()):
            category = VOC2007Person.LABEL_TO_CATEGORY_DICT[c]
            try:
                path_to_cache_dir = os.path.join('caches', 'voc2007-person')
                os.makedirs(path_to_cache_dir, exist_ok=True)
                _, _, ap = voc_eval(detpath=os.path.join(path_to_results_dir,
                                                         'comp3_det_test_{:s}.txt'.format(category)),
                                    annopath=os.path.join(path_to_annotations_dir, '{:s}.xml'),
                                    imagesetfile=os.path.join(path_to_main_dir, 'test.txt'),
                                    classname=category,
                                    cachedir=path_to_cache_dir,
                                    ovthresh=0.5,
                                    use_07_metric=True)
            except IndexError:
                ap = 0

            class_to_ap_dict[c] = ap

        mean_ap = np.mean([v for k, v in class_to_ap_dict.items()]).item()

        detail = ''
        for c in range(1, VOC2007Person.num_classes()):
            detail += '{:d}: {:s} AP = {:.4f}\n'.format(c, VOC2007Person.LABEL_TO_CATEGORY_DICT[c], class_to_ap_dict[c])

        return mean_ap, detail
Example #8
0
def do_python_eval(image_set,
                   classes,
                   confi_thresh,
                   static_result,
                   output_dir='results'):
    imagesetfile = image_set
    #cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    # use_07_metric = True if int(year) < 2010 else False
    use_07_metric = True
    print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    static_result.write(str(confi_thresh))
    static_result.write("\n")
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = get_voc_results_file_template().format(cls)
        rec, prec, ap = voc_eval(filename,
                                 imagesetfile,
                                 cls,
                                 ovthresh=confi_thresh,
                                 use_07_metric=use_07_metric)
        aps += [ap]

        static_result.write("{} AP = {:.4f}  REC = {:.4f}\n".format(
            cls, ap, rec))
        # print('AP for {} = {:.4f}'.format(cls, ap))
        # with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
        #     cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)

    static_result.write('Mean AP = {:.4f}\n'.format(np.mean(aps)))
Example #9
0
def evaluate_detections(mc, eval_dir, global_step, all_boxes, img_names_raw):
    """Evaluate detection results.
  Args:
    eval_dir: directory to write evaluation logs
    global_step: step of the checkpoint
    all_boxes: all_boxes[cls][image_idx] = N x 5 arrays of 
      [xmin, ymin, xmax, ymax, score]
    img_names_raw: raw name of all images
  Returns:
    aps: array of average precisions.
    names: class names corresponding to each ap
  """
    img_names = [name.split(".")[0] for name in img_names_raw]

    # det_file_dir = os.path.join(
    #     eval_dir, 'detection_files_{:s}'.format(global_step))
    if not os.path.isdir(eval_dir):
        os.mkdir(eval_dir)
    det_file_path_template = os.path.join(eval_dir, '{:s}.txt')

    for cls_idx, cls in enumerate(mc.CLASS_NAMES):
        det_file_name = det_file_path_template.format(cls)
        with open(det_file_name, 'wt') as f:
            for im_idx in xrange(len(img_names)):
                dets = all_boxes[cls_idx][im_idx]
                # VOC expects 1-based indices
                for k in xrange(len(dets)):
                    f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
                        img_names[im_idx], dets[k][-1], dets[k][0] + 1,
                        dets[k][1] + 1, dets[k][2] + 1, dets[k][3] + 1))

    # Evaluate detection results
    annopath = os.path.join(mc.DATA_PATH, 'VOC2012', 'Annotations', '{:s}.xml')
    # imagesetfile = os.path.join(
    #     mc.DATA_PATH,
    #     'VOC2012',
    #     'ImageSets',
    #     'Main',
    #     'val' + '.txt'
    # )
    cachedir = os.path.join(mc["EVAL_DIR"], 'annotations_cache')
    # remove cache folder before calculating all the aps
    if (os.path.exists(cachedir)):
        rmtree(cachedir)
    aps = []
    for i, cls in enumerate(mc.CLASS_NAMES):
        filename = det_file_path_template.format(cls)
        _, _, ap = voc_eval(filename,
                            annopath,
                            img_names,
                            cls,
                            cachedir,
                            ovthresh=0.5,
                            use_07_metric=False)
        aps += [ap]
        # print ('{:s}: AP = {:.4f}'.format(cls, ap))

    # print ('Mean AP = {:.4f}'.format(np.mean(aps)))
    return aps, None
Example #10
0
    def _do_python_eval(self, output_dir='output'):
        image_performance = {}
        annopath = os.path.join(self._devkit_path, 'VOC' + self._year,
                                'Annotations', '{:s}.xml')
        imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                    'ImageSets', 'Main',
                                    self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(filename,
                                     annopath,
                                     imagesetfile,
                                     cls,
                                     cachedir,
                                     ovthresh=0.5,
                                     use_07_metric=use_07_metric,
                                     image_performance=image_performance)
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        bad_pred_images = []
        for image in image_performance:
            gts = image_performance[image][0]
            preds = image_performance[image][1]
            counter = 0
            for gt in gts:
                if gt in preds:
                    counter += 1
            bad_pred_images.append(
                (counter / len(gts), counter, len(gts), image))
        print sorted(bad_pred_images)

        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Example #11
0
    def _do_python_eval(self, output_dir = 'output'):
	image_performance = {}
        annopath = os.path.join(
            self._devkit_path,
            'VOC' + self._year,
            'Annotations',
            '{:s}.xml')
        imagesetfile = os.path.join(
            self._devkit_path,
            'VOC' + self._year,
            'ImageSets',
            'Main',
            self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(
                filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
                use_07_metric=use_07_metric, image_performance=image_performance)
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
	bad_pred_images = []
	for image in image_performance:
		gts = image_performance[image][0]
		preds = image_performance[image][1]
		counter = 0
		for gt in gts:
			if gt in preds:
				counter += 1
		bad_pred_images.append((counter/len(gts), counter, len(gts), image))
	print sorted(bad_pred_images)
		
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Example #12
0
    def evaluate(self, model: Model) -> Dict[int, float]:
        all_image_ids, all_detection_bboxes, all_detection_labels, all_detection_probs = [], [], [], []

        with torch.no_grad():
            for batch_index, (image_id_batch, image_batch, scale_batch, _,
                              _) in enumerate(tqdm(self.dataloader)):
                image_id = image_id_batch[0]
                image = image_batch[0].cuda()
                scale = scale_batch[0].item()

                forward_input = Model.ForwardInput.Eval(image)
                forward_output: Model.ForwardOutput.Eval = model.eval(
                ).forward(forward_input)

                detection_bboxes = forward_output.detection_bboxes / scale
                detection_labels = forward_output.detection_labels
                detection_probs = forward_output.detection_probs

                all_detection_bboxes.extend(detection_bboxes.tolist())
                all_detection_labels.extend(detection_labels.tolist())
                all_detection_probs.extend(detection_probs.tolist())
                all_image_ids.extend([image_id] * len(detection_bboxes))

        self._write_results(all_image_ids, all_detection_bboxes,
                            all_detection_labels, all_detection_probs)

        path_to_voc2007_dir = os.path.join(self._path_to_data_dir, 'VOCdevkit',
                                           'VOC2007')
        path_to_main_dir = os.path.join(path_to_voc2007_dir, 'ImageSets',
                                        'Main')
        path_to_annotations_dir = os.path.join(path_to_voc2007_dir,
                                               'Annotations')

        label_to_ap_dict = {}
        for c in range(1, Model.NUM_CLASSES):
            category = Dataset.LABEL_TO_CATEGORY_DICT[c]
            try:
                _, _, ap = voc_eval(
                    detpath=os.path.join(
                        self._path_to_results_dir,
                        'comp3_det_test_{:s}.txt'.format(category)),
                    annopath=os.path.join(path_to_annotations_dir, '{:s}.xml'),
                    imagesetfile=os.path.join(path_to_main_dir, 'test.txt'),
                    classname=category,
                    cachedir='cache',
                    ovthresh=0.5,
                    use_07_metric=True)
            except IndexError:
                ap = 0

            label_to_ap_dict[c] = ap

        return label_to_ap_dict
Example #13
0
 def _do_python_eval(self, output_dir=None):
     results = {}
     annopath = os.path.join(self._devkit_path, 'VOC' + self._year,
                             'Annotations', '{:s}.xml')
     imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                 'ImageSets', 'Main',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
     if output_dir is not None and not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric,
                                  use_diff=self.config['use_diff'])
         aps += [ap]
         results[f"AP-{cls}"] = (rec, prec, ap)
         print(('AP for {} = {:.4f}'.format(cls, ap)))
         if output_dir is not None:
             with open(os.path.join(output_dir, cls + '_pr.pkl'),
                       'wb') as f:
                 pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print(('Mean AP = {:.4f}'.format(np.mean(aps))))
     results["Mean AP"] = np.mean(aps)
     print('~~~~~~~~')
     '''
 print('Results:')
 for ap in aps:
   print(('{:.3f}'.format(ap)))
 print(('{:.3f}'.format(np.mean(aps))))
 print('~~~~~~~~')
 print('')
 print('--------------------------------------------------------------')
 print('Results computed with the **unofficial** Python eval code.')
 print('Results should be very close to the official MATLAB eval code.')
 print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
 print('-- Thanks, The Management')
 print('--------------------------------------------------------------')
 '''
     return results
Example #14
0
def do_python_eval(devkit_path,
                   year,
                   image_set,
                   classes,
                   output_dir='results'):
    annopath = os.path.join(
        devkit_path,
        year,  # 'VOC' + year,
        'Annotations',
        '{:s}.xml')
    imagesetfile = os.path.join(
        devkit_path,
        year,  # 'VOC' + year,
        'ImageSets',
        'Main',
        image_set + '.txt')
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = get_voc_results_file_template(image_set).format(cls)
        rec, prec, ap = voc_eval(filename,
                                 annopath,
                                 imagesetfile,
                                 cls,
                                 cachedir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('Results:')
    for ap in aps:
        print('{:.3f}'.format(ap))
    print('{:.3f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------')
Example #15
0
    def _do_python_eval(self, output_dir='output'):
        annopath = os.path.join(
            self._devkit_path,
            'ICDAR' + self._year,
            'Annotations',
            '{:s}.xml')
        imagesetfile = os.path.join(
            self._devkit_path,
            'ICDAR' + self._year,
            'ImageSets',
            'Main',
            self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        recs = [] ### ADDED
        precs = [] ### ADDED
        nposes = [] ### ADDED
        tps=[] ### ADDED
        fps=[] ### ADDED

        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)

            rec, prec, ap, npos, tp, fp = voc_eval(
                filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
                use_07_metric=use_07_metric)  ### more return values ADDED: rec, prec, ap, f1, npos, tp, fp
            tps += [tp[-1]] ### ADDED
            fps += [fp[-1]] ### ADDED
            nposes += [npos] ### ADDED
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)

        print('tps is {}'.format(tps))  ### ADDED
        print('nposes is {}'.format(nposes))  ### ADDED
        print('fps is {}'.format(fps))  ### ADDED
        recs = np.sum(tps) / float(np.sum(nposes))  ### ADDED
        precs = np.sum(tps) / np.maximum(np.sum(tps) + np.sum(fps), np.finfo(np.float64).eps)  ### ADDED
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('Recall = {:.4f}'.format(recs))  ### ADDED
        print('Precision = {:.4f}'.format(precs))  ### ADDED
        print('F1 = {:.4f}'.format((2 * recs * precs) / (recs + precs)))  ### ADDED
 def _do_python_eval(self, save_name, result_name, output_dir = 'output'):
     annopath = os.path.join(
         self._devkit_path,
         'data',
         'DIRE',
         'Annotations',
         '{:s}.txt')
     imagesetfile = os.path.join(
         self._devkit_path,
         'data',
         'DIRE',
         'ImageSets',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     new_result_file = os.path.join(self._devkit_path, 'result', 
                                 self.name, save_name, result_name)
     aps = []; recs=[]
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template(save_name, result_name).format(cls)
         #pdb.set_trace()
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         recs += [rec[-1]] # gabriel
         print('AP = {:.4f}, maxRec = {:.4f} for {}'.format(ap, rec[-1], cls)) #gabriel
         #print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
         #with open(os.path.join(new_result_file, 'new_result_1sd.txt'), 'a') as f:
         #    f.write('AP for {} = {:.4f}\n'.format(cls, ap))
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('Mean maxRecall = {:.4f}'.format(np.mean(recs))) # gabriel
     #with open(os.path.join(new_result_file, 'new_result_1sd.txt'), 'a') as f:
     #    f.write('Mean AP = {:.4f}'.format(np.mean(aps)))
     #print('~~~~~~~~')
     #print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
Example #17
0
def do_python_eval(data_path, det_file_path, classes, det_dir, output_dir, ovthresh):
    anno_path = os.path.join(data_path, 'Annotations', '{:s}.xml')
    test_file = os.path.join(data_path, 'test.txt')
    cache_dir = os.path.join('../result/evaluation')
    aps = []
    # set the evaluation rules
    year = 2007
    use_07_metric = True if int(year) < 2010 else False
    print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        rec, prec, ap = voc_eval(
            det_file_path, anno_path, test_file, cls, cache_dir, ovthresh,
            use_07_metric=use_07_metric)
        aps += [ap]
        pl.plot(rec, prec, lw=2,
                label='Precision-recall curve of class {} (area = {:.4f})'''.format(cls, ap))
        print('AP for {} = {:.4f}'.format(cls, ap))
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)

    # ------------rough drawing of PR-Curve------------
    # pl.xlabel('Recall')
    # pl.ylabel('Precision')
    # plt.grid(True)
    # pl.ylim([0.0, 1.05])
    # pl.xlim([0.0, 1.0])
    # pl.title('Precision-Recall')
    # pl.legend(loc="upper right")
    # plt.show()

    print('Mean AP = {:.4f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('Results:')
    for ap in aps:
        print('{:.3f}'.format(ap))
    print('MAP:{:.3f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------')
Example #18
0
 def _do_python_eval(self, output_dir='output'):
     annopath = os.path.join(self._devkit_path, 'VOC' + self._year,
                             'Annotations', '{:s}.xml')
     imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                 'ImageSets', 'Main',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         #filename: 所有检测出来的某个类别的box
         #annopath: /data/zhenghuanxin/py-faster-rcnn1/data/VOCdevkit2007/VOC2007/Annotations/{:s}.xml
         #imagesetfile: 测试数据列表
         #cls: 当前类别的名字
         #cachedir: /data/zhenghuanxin/py-faster-rcnn1/data/VOCdevkit2007/annotations_cache
         #
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
Example #19
0
def main(img_list, det_files, ann_dir):
    """
  """
    stats = OrderedDict()
    for det_file in det_files:
        class_name = os.path.basename(det_file).split('.')[0].split('_')[-1]
        cache_dir = './cache'
        rec, prec, ap = voc_eval(det_file, ann_dir, img_list, class_name,
                                 cache_dir)
        stats[class_name] = {
            'recall': np.around(rec, decimals=2),
            'prec': np.around(prec, decimals=2),
            'ap': np.around(ap, decimals=2)
        }
    print_stats(stats)
    plot_stats(stats)
Example #20
0
 def _do_python_eval(self, output_dir = 'output'):
     annopath = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'ImageSets',
         'Main',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         #govind: It's calling a function which will give Precision, Recall and 
         # average precision if we pass it the _results_file
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     #govind: Computing Mean average precision
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
def generate_aps(results_root="res", epoch=0):
    results_dir = results_root + "/{}.txt"
    output_path = results_root + "/perf_" + str(epoch) + ".csv"
    anno_path = "./data/VOC2007/Annotations/{}.xml"
    # imageset_path = "VOCdevkit/VOC2007/ImageSets/Main/test.txt"
    class_names = [
        "aeroplane", "bicycle", "boat", "bottle", "bus", "car", "chair",
        "diningtable", "motorbike", "sofa", "train", "tvmonitor"
    ]
    recs = []
    precs = []
    aps = []
    tested_class = []
    for class_name in class_names:
        imageset_path = "./data/pascal3d+/Imagesets/{}.txt".format(class_name)
        cachedir = "./data"
        # cache_file = os.path.join(cachedir, "annots.pkl")
        # if os.path.exists(cache_file):
        #     os.remove(cache_file)
        if not os.path.exists(results_dir.format(class_name)):
            log.warning("missing data: {}".format(class_name))
            rec, prec, ap = 0., 0., 0.
        else:
            rec, prec, ap = voc_eval(results_dir,
                                     anno_path,
                                     imageset_path,
                                     class_name,
                                     cachedir,
                                     use_07_metric=True)
        recs.append(rec)
        precs.append(prec)
        aps.append(ap)
        tested_class.append(class_name)

    mAP = np.array(aps).mean()
    header = ",".join([*class_names, "mean"]) + '\n'
    data = ",".join(["{}".format(ap)
                     for ap in aps] + ["{}".format(mAP)]) + '\n'
    with open(output_path, "w") as f:
        f.write(header)
        f.write(data)
    # print(header+data)
    for i, test_c in enumerate(tested_class):
        print("{} accuracy: {:.2f}%".format(test_c, aps[i] * 100))
    print("mean accuracy: {:.2f}%".format(100 * np.array(aps).mean()))
    print('--------------------------------------------------')
Example #22
0
    def _do_python_eval(self, output_dir = 'output'):
        annopath = os.path.join(
            self._devkit_path,
            'data',
            'Annotations',
            '{:s}.xml')
        imagesetfile = os.path.join(
            self._devkit_path,
            # todo
            'data',
            'sets',
            self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # todo
        # The PASCAL VOC metric changed in 2010
        use_07_metric = self.config['old_pascal_metric']
        # todo
        print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue

            filename = self._get_nyud_results_file_template().format(cls)
            print filename
            rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,use_07_metric=use_07_metric)
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Example #23
0
    def _do_python_eval(self, output_dir='output'):
        rootpath = os.path.join(self.root, 'VOC' + self._year)
        name = self.image_set[0][1]
        annopath = os.path.join(rootpath, 'Annotations', '{:s}.xml')
        imagesetfile = os.path.join(rootpath, 'ImageSets', 'Main',
                                    name + '.txt')
        cachedir = os.path.join(self.root, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        #print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
        if output_dir is not None and not os.path.isdir(output_dir):
            os.mkdir(output_dir)

        # read list of images
        with open(imagesetfile, 'r') as f:
            lines = f.readlines()
        imagenames = [x.strip() for x in lines]
        recs = {}
        for i, imagename in enumerate(imagenames):
            recs[imagename] = parse_rec('./VOCdevkit/VOC2007/Annotations/' +
                                        str(imagename) + '.xml')

        for i, cls in enumerate(VOC_CLASSES):

            if cls == '__background__':
                continue

            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(filename,
                                     cls,
                                     recs=recs,
                                     imagenames=imagenames,
                                     ovthresh=0.5,
                                     use_07_metric=use_07_metric)
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            if output_dir is not None:
                with open(os.path.join(output_dir, cls + '_pr.pkl'),
                          'wb') as f:
                    pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        #for ap in aps:
        #print('{:.3f}'.format(ap))
        #print('{:.3f}'.format(np.mean(aps)))
        return aps, np.mean(aps)
Example #24
0
def run_voc_eval(annopath, imagesetfile, year, image_set, classes, output_dir):
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = 'voc_{}_{}_{}.txt'.format(
            year, image_set, cls)
        filepath = os.path.join(output_dir, filename)
        rec, prec, ap = voc_eval(filepath, annopath, imagesetfile, cls,
                                 output_dir, ovthresh=0.5, use_07_metric=use_07_metric)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
Example #25
0
 def _do_python_eval(self, output_dir='output'):
     anno_path = os.path.join(self._devkit_path, 'Annotations', '{:s}.xml')
     imageset_file = os.path.join(self._devkit_path, 'ImageSets', 'Main',
                                  self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     recs = []
     precs = []
     # The Pascal VOC metric changed in 2010
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  anno_path,
                                  imageset_file,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5)
         aps += [ap]
         recs += [rec[-1]]
         precs += [prec[-1]]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             json.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('Mean REC = {:.4f}'.format(np.mean(recs)))
     print('Mean PREC = {:.4f}'.format(np.mean(precs)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
Example #26
0
    def do_python_eval(self, output_dir='output'):
        annopath = os.path.join(
            self._data_path,
            'VOC2012',
            'Annotations',
            '{:s}.xml')
        imagesetfile = os.path.join(
            self._data_path,
            'VOC2012',
            'ImageSets',
            'Main',
            self._imageset + '.txt')
        cachedir = os.path.join(self._data_path, 'annotations_cache')
        aps = []

        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            filename = os.path.join(output_dir, 'cls_test_{}.txt'.format(cls))
            rec, prec, ap = voc_eval(
                filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
                use_07_metric=False)
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
                pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)

        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Example #27
0
def do_python_eval(devkit_path, year, image_set, classes, output_dir = 'results'):
    annopath = os.path.join(
        devkit_path,
        'VOC' + year,
        'Annotations',
        '{:s}.xml')
    imagesetfile = os.path.join(
        devkit_path,
        'VOC' + year,
        'ImageSets',
        'Main',
        image_set + '.txt')
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = get_voc_results_file_template(image_set).format(cls)
        rec, prec, ap = voc_eval(
            filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
            use_07_metric=use_07_metric)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('Results:')
    for ap in aps:
        print('{:.3f}'.format(ap))
    print('{:.3f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------')
Example #28
0
 def _do_python_eval(self, output_dir='output'):
     annopath = r'C:\Users\dreamer\Desktop\repositories\Object_Detection\faster_rcnn_pytorch_annotation\data\VOCdevkit2012\VOC2012\Annotations\{:s}.xml'
     imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                 'ImageSets', 'Main',
                                 self._image_set + '.txt')
     print("imagesetfile:", imagesetfile)
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):  #一类一类进行计算。
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
Example #29
0
 def _do_python_eval(self, output_dir='output'):
     annopath = os.path.join(self._devkit_path, 'VOC' + self._year,
                             'Annotations', '{:s}.xml')
     imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                 'ImageSets', 'Main',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     ious = np.linspace(.5,
                        0.95,
                        np.round((0.95 - .5) / .05) + 1,
                        endpoint=True)
     apss = []
     for ov in ious:
         aps = []
         for i, cls in enumerate(self._classes):
             if cls == '__background__':
                 continue
             filename = self._get_voc_results_file_template().format(cls)
             rec, prec, ap = voc_eval(filename,
                                      annopath,
                                      imagesetfile,
                                      cls,
                                      cachedir,
                                      ovthresh=ov,
                                      use_07_metric=use_07_metric)
             aps += [ap]
             print('AP for {} = {:.4f}'.format(cls, ap))
             with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                 cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
         apss.append(aps)
     print('mmAP {:.4f}'.format(np.mean(apss)))
     print('[email protected] {:.4f}'.format(np.mean(apss[0])))
     print('[email protected] {:.4f}'.format(
         np.mean(apss[np.where(ious == .70)[0][0]])))
     print('[email protected] {:.4f}'.format(
         np.mean(apss[np.where(ious == .75)[0][0]])))
Example #30
0
 def do_python_eval(self):
     """
     python evaluation wrapper
     :return: None
     """
     annopath = os.path.join(self.data_path, 'Annotations', '{:s}.xml')
     imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
     cache_dir = os.path.join(self.cache_path, self.name)
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self.year) < 2010 else False
     print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, cache_dir,
                                  ovthresh=0.5, use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
Example #31
0
 def _do_python_eval(self, output_dir='output'):
     print '--------------------------------------------------------------'
     print 'Computing results with **unofficial** Python eval code.'
     print 'Results should be very close to the official MATLAB eval code.'
     print 'Recompute with `./tools/reval.py --matlab ...` for your paper.'
     print '--------------------------------------------------------------'
     # !!! Edit: Modify path to annotation and images.
     annopath = os.path.join(self._devkit_path, 'data', 'Annotations',
                             '{:s}.xml')
     imagesetfile = os.path.join(self._devkit_path, 'data', 'ImageSets',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # !!! Edit use_07_metric
     use_07_metric = self.config['use_07_metric']
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
Example #32
0
 def _do_python_eval(self, output_dir='output'):
     #evaluations: aps
     #saves: threshold, true positives, false positives, false negatives, true negatives
     annopath = os.path.join(self._data_path, 'Annotations', '{:s}.txt')
     imagesetfile = os.path.join(self._data_path, 'ImageSets',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     use_07_metric = True
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_try1_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
Example #33
0
def run_voc_eval(annopath, imagesetfile, year, image_set, classes, output_dir):
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = 'voc_{}_{}_{}.txt'.format(year, image_set, cls)
        filepath = os.path.join(output_dir, filename)
        rec, prec, ap = voc_eval(filepath,
                                 annopath,
                                 imagesetfile,
                                 cls,
                                 output_dir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
Example #34
0
 def _do_python_eval(self, output_dir='output'):
     annopath = os.path.join(
         self._data_path,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._data_path,
         'ImageSets',
         self._image_set + '.txt')
     cachedir = os.path.join(self._data_path, 'annotations_cache')
     aps = []
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=False)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
 def _do_python_eval(self, output_dir = 'output'):
     annopath = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'ImageSets',
         'Main',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     cfg.mAP = np.mean(aps)
Example #36
0
 def _do_python_eval(self, output_dir = 'output'):
     annopath = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'Annotations',
         '{:s}.xml')
     imagesetfile = os.path.join(
         self._devkit_path,
         'VOC' + self._year,
         'ImageSets',
         'Main',
         self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
             use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     cfg.mAP = np.mean(aps)
Example #37
0
 def _do_python_eval(self, output_dir="output"):
     annopath = os.path.join(self._devkit_path, "VOC" + self._year, "Annotations", "{:s}.xml")
     imagesetfile = os.path.join(
         self._devkit_path, "VOC" + self._year, "ImageSets", "Main", self._image_set + ".txt"
     )
     cachedir = os.path.join(self._devkit_path, "annotations_cache")
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print "VOC07 metric? " + ("Yes" if use_07_metric else "No")
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == "__background__":
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(
             filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric
         )
         aps += [ap]
         print ("AP for {} = {:.4f}".format(cls, ap))
         with open(os.path.join(output_dir, cls + "_pr.pkl"), "w") as f:
             cPickle.dump({"rec": rec, "prec": prec, "ap": ap}, f)
     print ("Mean AP = {:.4f}".format(np.mean(aps)))
     print ("~~~~~~~~")
     print ("Results:")
     for ap in aps:
         print ("{:.3f}".format(ap))
     print ("{:.3f}".format(np.mean(aps)))
     print ("~~~~~~~~")
     print ("")
     print ("--------------------------------------------------------------")
     print ("Results computed with the **unofficial** Python eval code.")
     print ("Results should be very close to the official MATLAB eval code.")
     print ("Recompute with `./tools/reval.py --matlab ...` for your paper.")
     print ("-- Thanks, The Management")
     print ("--------------------------------------------------------------")
Example #38
0
        (im_shape, im_scale, gt_boxes, gt_classes,
            num_gt_boxes, difficult) = valid_set.get_metadata_buffers()

        num_gt_boxes = int(num_gt_boxes.get())
        im_scale = float(im_scale.get())

        # retrieve region proposals generated by the model
        (proposals, num_proposals) = proposalLayer.get_proposals()

        # convert outputs to bounding boxes
        boxes = faster_rcnn.get_bboxes(outputs, proposals, num_proposals, num_classes,
                                       im_shape.get(), im_scale, max_per_image, thresh, nms_thresh)

        all_boxes[mb_idx] = boxes

        # retrieve gt boxes
        # we add a extra column to track detections during the AP calculation
        detected = np.array([False] * num_gt_boxes)
        gt_boxes = np.hstack([gt_boxes.get()[:num_gt_boxes] / im_scale,
                              gt_classes.get()[:num_gt_boxes],
                              difficult.get()[:num_gt_boxes], detected[:, np.newaxis]])

        all_gt_boxes[mb_idx] = gt_boxes

neon_logger.display('Evaluating detections')
avg_precision = voc_eval(all_boxes, all_gt_boxes, valid_set.CLASSES, use_07_metric=True)

if args.output is not None:
    neon_logger.display('Saving inference results to {}'.format(args.output))
    save_obj([all_boxes, avg_precision], args.output)