Beispiel #1
0
 def do_python_eval(self):
     """
     python evaluation wrapper
     :return: info_str
     """
     info_str = ''
     annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
     imageset_file = os.path.join(self.data_path, 'ImageSets',
                                  self.image_set + '.txt')
     annocache = os.path.join(self.cache_path,
                              self.name + '_annotations.pkl')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if self.year == 'SDS' or int(
         self.year) < 2010 else False
     print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
     info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
     info_str += '\n'
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imageset_file,
                                  cls,
                                  annocache,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
     print('Mean [email protected] = {:.4f}'.format(np.mean(aps)))
     info_str += 'Mean [email protected] = {:.4f}\n\n'.format(np.mean(aps))
     # @0.7
     aps = []
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imageset_file,
                                  cls,
                                  annocache,
                                  ovthresh=0.7,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
     print('Mean [email protected] = {:.4f}'.format(np.mean(aps)))
     info_str += 'Mean [email protected] = {:.4f}'.format(np.mean(aps))
     return info_str
Beispiel #2
0
 def do_python_eval(self):
     """
     python evaluation wrapper
     :return: info_str
     """
     info_str = ''
     annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
     imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
     annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if self.year == 'SDS' or int(self.year) < 2010 else False
     print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
     info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
     info_str += '\n'
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
                                  ovthresh=0.5, use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
     print('Mean [email protected] = {:.4f}'.format(np.mean(aps)))
     info_str += 'Mean [email protected] = {:.4f}\n\n'.format(np.mean(aps))
     # @0.7
     aps = []
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
                                  ovthresh=0.7, use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
     print('Mean [email protected] = {:.4f}'.format(np.mean(aps)))
     info_str += 'Mean [email protected] = {:.4f}'.format(np.mean(aps))
     return info_str
Beispiel #3
0
    def do_python_eval(self):
        """
        python evaluation wrapper
        :return: None
        """
        annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
        imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main',
                                     self.image_set + '.txt')
        annocache = os.path.join(self.cache_path,
                                 self.name + '_annotations.pkl')
        result_file = os.path.join(self.cache_path,
                                   self.name + 'eval_result.txt')
        result_cont = ''
        recs = []
        precs = []
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self.year) < 2010 else False
        print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')

        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            filename = self.get_result_file_template().format(cls)
            rec, prec, ap = voc_eval(filename,
                                     annopath,
                                     imageset_file,
                                     cls,
                                     annocache,
                                     ovthresh=0.5,
                                     use_07_metric=use_07_metric)
            #self.draw_roc_curve(cls, rec, prec, ap)
            #recs += rec
            #precs += prec
            aps += [ap]
            output = 'AP for {} = {:.4f}'.format(cls, ap)
            print(output)
            result_cont += output + '\n'
        #self.draw_roc_curve("all", recs/float(self.num_classes-1), precs/float(self.num_classes-1), np.mean(aps))
        output = 'Mean AP = {:.4f}'.format(np.mean(aps))
        print(output)
        result_cont += output + '\n'
        with open(result_file, 'wt') as f:
            f.write(result_cont)
        os.system('mailx -s "gpu029 voc eval result" [email protected] < ' +
                  result_file)
Beispiel #4
0
 def do_python_eval(self):
     """
     python evaluation wrapper
     :return: None
     """
     annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
     imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main',
                                  self.image_set + '.txt')
     annocache = os.path.join(self.cache_path,
                              self.name + '_annotations.pkl')
     aps = []
     ars = []
     nobs = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self.year) < 2010 else False
     print('VOC07 metric? ' + ('Y' if use_07_metric else 'No'))
     for cls_ind, cls in enumerate(self.classes):
         if cls == '__background__':
             continue
         filename = self.get_result_file_template().format(cls)
         rec, prec, ap, ar, npos = voc_eval(filename,
                                            annopath,
                                            imageset_file,
                                            cls,
                                            annocache,
                                            ovthresh=0.5,
                                            use_07_metric=use_07_metric)
         aps += [ap]
         ars += [ar]
         nobs += [npos]
         print('AP for {} = {:.4f}'.format(cls, ap))
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     draw_ap(aps,
             ars,
             nobs,
             self.classes[1:],
             range_name='all',
             tag='map = {:.4f}'.format(np.mean(aps)))
Beispiel #5
0
    def do_python_eval(self):
        """
        python evaluation wrapper
        :return: info_str
        """
        result_out = True
        result_thersh = 0.85
        if result_out:
            clas = dict()
            f = io.open(self.data_path+'/food_info.txt','r',encoding='UTF-8') 
            lines = f.readlines()

            for line in lines:
                data = line.split(' ')
                data.pop(1)
                clas[int(line.split(' ')[1])] = data  #clas[24] = [腐竹油菜 3.0 92.8 3.42 4.06 1.99 3.1]
           
            f.close()
            result_file = os.path.join(self.result_path, 'results', 'Food' + self.year, 'result_all.txt')
            
            f = io.open(result_file,'w',encoding ='UTF-8' )
            f2 = io.open(os.path.join(self.result_path, 'results', 'Food' + self.year, 'result_{}.txt'.format(result_thersh)),'w',encoding ='UTF-8')
        info_str = ''
        annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
        imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
        annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
        
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if self.year == 'SDS' or int(self.year) < 2010 else False
        print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
        info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
        info_str += '\n'
        
        #kinds = ['all', 'small', 'medium', 'large']
        kinds = ['all']
        Maps = []
        for ovthresh in np.arange(0.5, 1.0,0.05):
            for k in kinds:
                info_str += 'MAP@{} - ({})\n'.format(ovthresh, k)
                aps = []
                for cls_ind, cls in enumerate(self.classes):
                    if cls == '__background__':
                        continue
                    filename = self.get_result_file_template().format(cls)
                    rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
                                             ovthresh=ovthresh, use_07_metric=use_07_metric)
                    aps += [ap]
                    if result_out:
                        f.write(clas[int(cls)][0] +'  {} = {:.4f}\n'.format(cls, ap))
                        if ap<float(result_thersh):
                            f2.write(clas[int(cls)][0] +'  {} = {:.4f}\n'.format(cls, ap))
                            #print(clas[int(cls)][0] +'  {} = {:.4f}'.format(cls, ap))
                    info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
                Maps +=[np.mean(aps)]
                print('Mean AP@{:.2f} = {:.4f}'.format(ovthresh, np.mean(aps)))
                info_str += 'Mean AP@{:.2f} = {:.4f}\n\n'.format(ovthresh, np.mean(aps))
        if result_out:
            f.close()
            f2.close()
        print('{:.4f}'.format( np.mean(Maps)))
        info_str+='All Map {:.4f}'.format( np.mean(Maps))
        return info_str