def _do_python_eval(self, output_dir='output'): annopath = os.path.join(self._val_det_bbox, '{:s}' + self._anno_ext) print "Anno path {}".format(annopath) imagesetfile = os.path.join(self._devkit_path, "data/det_lists", self._image_set + '.txt') cachedir = os.path.join(self._devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No') if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(self._classes): if cls == '__background__': continue filename = self._get_imagenet_results_file_template().format(cls) print "File name {}".format(filename) rec, prec, ap = voc_eval(self._wnid_to_ind, self._class_to_ind, filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) print('~~~~~~~~') print('Results:') i = 1 for ap in aps: print('{}: {:.3f}'.format(self._classes[i], ap)) i += 1 print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('Recompute with `./tools/reval.py --matlab ...` for your paper.') print('-- Thanks, The Management') print('--------------------------------------------------------------')
def _do_python_eval(self, output_dir = 'output'): annopath = os.path.join(self._val_det_bbox, '{:s}.xml') print "Anno path {}".format(annopath) imagesetfile = os.path.join( self._devkit_path, "data/det_lists", self._image_set + '.txt') cachedir = os.path.join(self._devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No') if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(self._classes): if cls == '__background__': continue filename = self._get_imagenet_results_file_template().format(cls) print "File name {}".format(filename) rec, prec, ap = voc_eval(self._wnid_to_ind, self._class_to_ind, filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) print('Mean AP = {:.4f}'.format(np.mean(aps))) print('~~~~~~~~') print('Results:') for ap in aps: print('{:.3f}'.format(ap)) print('{:.3f}'.format(np.mean(aps))) print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('Recompute with `./tools/reval.py --matlab ...` for your paper.') print('-- Thanks, The Management') print('--------------------------------------------------------------')
def _do_python_eval(self, output_dir='output'): annopath = os.path.join(self._devkit_path, 'Annotations', 'DET', self._image_set_dir, '{:s}.xml') imagesetfile = os.path.join(self._devkit_path, 'ImageSets', 'DET', self._image_set + '.txt') cachedir = os.path.join(self._devkit_path, 'annotations_cache', self._anno_set_dir) aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(self._year) < 2010 else False print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No') if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(self._classes): if cls == '__background__': continue filename = self._get_voc_results_file_template().format(cls) # pass in the class id corresponding to the synset n_cls = self._wnid[i] rec, prec, ap, ovthresh = voc_eval(filename, annopath, imagesetfile, n_cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] #print('AP for {} = {:.4f}'.format(cls, ap)) with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f: cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) #print('Mean AP = {:.4f}'.format(np.mean(aps))) print(aps) aps = np.array(aps) results_fd = open("./results_imagenet.txt", "w") for kdx in range(len(ovthresh)): #print('{0:.3f}@{1:.2f}'.format(ap[kdx],ovthresh[kdx])) print('Mean AP = {:.4f} @ {:.2f}'.format(np.mean(aps[:, kdx]), ovthresh[kdx])) print('~~~~~~~~') print('Results:') sys.stdout.write('{0:>15} (#):'.format("class AP")) results_fd.write('{0:>15} (#):'.format("class AP")) for thsh in ovthresh: sys.stdout.write("\t{:>5}{:.3f}".format("@", thsh)) results_fd.write("\t{:>5}{:.3f}".format("@", thsh)) sys.stdout.write("\n") results_fd.write("\n") count_ = 1 for ap in aps: sys.stdout.write('{:>15} ({}):'.format(self._classes[count_], count_)) results_fd.write('{:>15} ({}):'.format(self._classes[count_], count_)) for kdx in range(len(ovthresh)): sys.stdout.write('\t{0:>10.5f}'.format(ap[kdx], ovthresh[kdx])) results_fd.write('\t{0:>10.5f}'.format(ap[kdx], ovthresh[kdx])) sys.stdout.write('\n') results_fd.write('\n') count_ += 1 sys.stdout.write('{:>15}:'.format("mAP")) results_fd.write('{:>15}:'.format("mAP")) for kdx in range(len(ovthresh)): sys.stdout.write('\t{:10.5f}'.format(np.mean(aps[:, kdx]))) results_fd.write('\t{:10.5f}'.format(np.mean(aps[:, kdx]))) #print('{0:.3f}@{1:.2f}'.format(ap[kdx],ovthresh[kdx])) #print('mAP @ {:.2f}: {:.5f} '.format(ovthresh[kdx],np.mean(aps[:,kdx]))) sys.stdout.write('\n') results_fd.write('\n') print('~~~~~~~~') print('') print('--------------------------------------------------------------') print('Results computed with the **unofficial** Python eval code.') print('Results should be very close to the official MATLAB eval code.') print('Recompute with `./tools/reval.py --matlab ...` for your paper.') print('-- Thanks, The Management') print('--------------------------------------------------------------')