def do_python_eval(devkit_path, year, image_set, classes, output_dir='results', model='yolov1', eval_f=None): annopath = os.path.join(devkit_path, 'VOC' + year, 'Annotations', '{}.xml') imagesetfile = os.path.join(devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt') cachedir = os.path.join(devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(year) < 2010 else False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) print('devkit_path=', devkit_path, ', year = ', year) if not os.path.isdir(output_dir): os.mkdir(output_dir) # if not os.path.exists(output_dir+'/yolov1.eval.txt'): # eval_f = open(output_dir+'/yolov1.eval.txt','w') # eval_f.write('{},'.format('weights')) # for cls in classes: # eval_f.write('{},'.format(cls)) # eval_f.write('{}\n'.format('mAP')) # else: # eval_f = open(output_dir+'/yolov1.eval.txt','a') for i, cls in enumerate(classes): if cls == '__background__': continue filename = get_voc_results_file_template(image_set).format(cls) rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, model + '_' + cls + '_pr.pkl'), 'wb') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) #print('~~~~~~~~') #print('Results:') eval_f.write('{},'.format(model)) for ap in aps: #print('{:.3f}'.format(ap)) eval_f.write('{:.3f},'.format(ap)) #print('{:.3f}'.format(np.mean(aps))) eval_f.write('{:.3f}\n'.format(np.mean(aps)))
def do_python_eval(devkit_path, year, image_set, classes, output_dir='results'): annopath = os.path.join(devkit_path, 'VOC' + year, 'Annotations', '{}.xml') imagesetfile = os.path.join(devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt') cachedir = os.path.join(devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(year) < 2010 else False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) print('devkit_path=', devkit_path, ', year = ', year) if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(classes): print(cls) if cls == '__background__': continue filename = get_voc_results_file_template(image_set).format(cls) rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) print('~~~~~~~~') print('Results:') for ap in aps: print('{:.3f}'.format(ap)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('-- Thanks, The Management') print('--------------------------------------------------------------')
def do_python_eval(devkit_path, year, image_set, classes, output_dir = 'results'): annopath = os.path.join( devkit_path, 'VOC' + year, 'Annotations', '{}.xml') imagesetfile = os.path.join( devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt') cachedir = os.path.join(devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(year) < 2010 else False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) print('devkit_path=',devkit_path,', year = ',year) if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(classes): if cls == '__background__': continue filename = get_voc_results_file_template(image_set).format(cls) rec, prec, ap = voc_eval( filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) print('~~~~~~~~') print('Results:') for ap in aps: print('{:.3f}'.format(ap)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('-- Thanks, The Management') print('--------------------------------------------------------------')
from voc_eval_py3 import voc_eval #/Users/liqinghua/projects/medical-rib/data/voc2007/ImageSets/test.txt rec,prec,ap = voc_eval('/Users/liqinghua/projects/medical-rib/models/darknet/results/{}.txt', '/Users/liqinghua/voc2007.xoy/Annotations/{}.xml', '/Users/liqinghua/voc2007.xoy/ImageSets/2007_test.txt', 'hurt', '.') print('rec',rec) print('prec',prec) print('ap',ap)
from voc_eval_py3 import voc_eval print('car:') print( voc_eval( '/home/magic/Desktop/CNN models/darknet/results/{}.txt', '/home/magic/Desktop/CNN models/darknet/VOCdevkit/VOC2007/Annotations/{}.xml', '/home/magic/Desktop/CNN models/darknet/VOCdevkit/VOC2007/ImageSets/Main/test.txt', 'car', '.')) print( '\n###############################################################################' ) print('person:') print( voc_eval( '/home/magic/Desktop/CNN models/darknet/results/{}.txt', '/home/magic/Desktop/CNN models/darknet/VOCdevkit/VOC2007/Annotations/{}.xml', '/home/magic/Desktop/CNN models/darknet/VOCdevkit/VOC2007/ImageSets/Main/test.txt', 'person', '.'))
from voc_eval_py3 import voc_eval rec, prec, ap = voc_eval( '/userhome/darknet/results/comp4_det_test_p.txt', '/userhome/darknet/data/VOC2018/Annotations/{}.xml', '/userhome/darknet/data/VOC2018/ImageSets/Segmentation/val_num.txt', 'p', '.') print('rec', rec) print('prec', prec) print('ap', ap)
def do_python_eval(devkit_path, year, image_set, classes, output_dir='results'): print( f"devkit_path:{devkit_path}, year:{year}, image_set:{image_set}, classes:{classes}, output_dir:{output_dir}" ) #devkit_path:., year:2017, image_set:test, classes:['car', 'truck', 'bus', 'motorcycle', 'bicycle', 'trains'], # output_dir:/home/yuanpu/vehicle/data/MIO-TCD-Localization/evaluator/results annopath = os.path.join( devkit_path, # 'VOC' + year, 'Annotations', '{}.xml') imagesetfile = os.path.join( devkit_path, # 'VOC' + year, # 'JPEGImages', # 'Main', image_set + '.txt') print(f'=========={annopath}-------------{imagesetfile}') cachedir = os.path.join(devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(year) < 2010 else False print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) print('devkit_path=', devkit_path, ', year = ', year) if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(classes): if cls == '__background__': continue filename = get_voc_results_file_template(image_set).format(cls) print(f'=========={filename}-------------') rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.45, use_07_metric=True) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) print('~~~~~~~~') print('Results:') for ap in aps: print('{:.3f}'.format(ap)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('-- Thanks, The Management') print('--------------------------------------------------------------')