コード例 #1
0
 def _do_python_eval(self, output_dir='output'):
   annopath = os.path.join(
     self._devkit_path,
     'VOC' + self._year,
     'Annotations',
     '{:s}.xml')
   imagesetfile = os.path.join(
     self._devkit_path,
     'VOC' + self._year,
     'ImageSets',
     'Main',
     self._image_set + '.txt')
   cachedir = os.path.join(self._devkit_path, 'annotations_cache')
   aps = []
   # The PASCAL VOC metric changed in 2010
   use_07_metric = True if int(self._year) < 2010 else False
   print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
   if not os.path.isdir(output_dir):
     os.mkdir(output_dir)
   for i, cls in enumerate(self._classes):
     # 求每个类型的ap值,不包含背景。
     if cls == '__background__':
       continue
     filename = self._get_voc_results_file_template().format(cls)
     rec, prec, ap = voc_eval(
       filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
       use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
     aps += [ap]
     print(('AP for {} = {:.4f}'.format(cls, ap)))
     with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
       pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
   print(('Mean AP = {:.4f}'.format(np.mean(aps))))
   print('~~~~~~~~')
   print('Results:')
   for ap in aps:
     print(('{:.3f}'.format(ap)))
   print(('{:.3f}'.format(np.mean(aps))))
   print('~~~~~~~~')
   print('')
   print('--------------------------------------------------------------')
   print('Results computed with the **unofficial** Python eval code.')
   print('Results should be very close to the official MATLAB eval code.')
   print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
   print('-- Thanks, The Management')
   print('--------------------------------------------------------------')
コード例 #2
0
def _do_python_eval(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(json_dataset,
                                                  salt).format(cls)
        rec, prec, ap = voc_eval(filename,
                                 anno_path,
                                 image_set_path,
                                 cls,
                                 cachedir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps += [ap]
        logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('Results:')
    for ap in aps:
        logger.info('{:.3f}'.format(ap))
    logger.info('{:.3f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')
コード例 #3
0
 def _do_python_eval(self, output_dir='output'):
     annopath = os.path.join(self._data_path, 'annotations', '{:s}.txt')
     imagesetfile = os.path.join(self._data_path, 'datasets', 'main',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._data_path, 'annotations_cache')
     aps = []
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_zlrm_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.1)
         print('rec', rec)
         print('prec', prec)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb+') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')