def _do_python_eval(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(json_dataset,
                                                  salt).format(cls)
        rec, prec, ap = voc_eval(filename,
                                 anno_path,
                                 image_set_path,
                                 cls,
                                 cachedir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps += [ap]
        logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('Results:')
    for ap in aps:
        logger.info('{:.3f}'.format(ap))
    logger.info('{:.3f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')
Ejemplo n.º 2
0
 def _do_python_eval(self, output_dir='output'):
     annopath = os.path.join(self._devkit_path, 'VOC' + self._year,
                             'Annotations', '{:s}.xml')
     imagesetfile = os.path.join(self._devkit_path, 'VOC' + self._year,
                                 'ImageSets', 'Main',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric,
                                  use_diff=self.config['use_diff'])
         aps += [ap]
         print(('AP for {} = {:.4f}'.format(cls, ap)))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
             pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print(('Mean AP = {:.4f}'.format(np.mean(aps))))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print(('{:.3f}'.format(ap)))
     print(('{:.3f}'.format(np.mean(aps))))
     print('~~~~~~~~')
     print('')
     print('--------------------------------------------------------------')
     print('Results computed with the **unofficial** Python eval code.')
     print('Results should be very close to the official MATLAB eval code.')
     print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
     print('-- Thanks, The Management')
     print('--------------------------------------------------------------')
Ejemplo n.º 3
0
def _do_python_eval(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(
            json_dataset, salt).format(cls)
        rec, prec, ap = voc_eval(
            filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5,
            use_07_metric=use_07_metric)
        aps += [ap]
        logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('Results:')
    for ap in aps:
        logger.info('{:.3f}'.format(ap))
    logger.info('{:.3f}'.format(np.mean(aps)))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')
Ejemplo n.º 4
0
    def _do_python_eval(self, output_dir = 'output'):
        annopath = os.path.join(self._data_path, 'xml', '{:s}.xml')

        cachedir = os.path.join(self._data_path, 'annotations_cache', self._image_set)
        aps = []
        # The PASCAL VOC metric changed in 2010
        # use_07_metric = True if int(self._year) < 2010 else False
        use_07_metric = False
        print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        # for i, cls in enumerate(self._classes):
        for i, cls in enumerate(['sofa']): # Only evaluate sofa
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(
                filename, annopath, self._image_set_file, cls, cachedir, ovthresh=0.5,
                use_07_metric=use_07_metric)
            aps += [ap]
            print('AP for {} = {:.4f}'.format(cls, ap))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
                cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print('{:.3f}'.format(ap))
        print('{:.3f}'.format(np.mean(aps)))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Ejemplo n.º 5
0
 def _do_python_eval(self, output_dir='output'):
     annopath = os.path.join(self._devkit_path, 'RGZ' + self._year,
                             'Annotations', '{:s}.xml')
     imagesetfile = os.path.join(self._devkit_path, 'RGZ' + self._year,
                                 'ImageSets', 'Main',
                                 self._image_set + '.txt')
     cachedir = os.path.join(self._devkit_path, 'annotations_cache')
     aps = []
     # The PASCAL VOC metric changed in 2010
     use_07_metric = True if int(self._year) < 2010 else False
     print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
     for i, cls in enumerate(self._classes):
         if cls == '__background__':
             continue
         filename = self._get_voc_results_file_template().format(cls)
         rec, prec, ap = voc_eval(filename,
                                  annopath,
                                  imagesetfile,
                                  cls,
                                  cachedir,
                                  ovthresh=0.5,
                                  use_07_metric=use_07_metric)
         aps += [ap]
         print('AP for {} = {:.4f}'.format(cls, ap))
         with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
             cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
     print('Mean AP = {:.4f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('Results:')
     for ap in aps:
         print('{:.3f}'.format(ap))
     print('{:.3f}'.format(np.mean(aps)))
     print('~~~~~~~~')
     print('')
Ejemplo n.º 6
0
    def _do_python_eval(self, output_dir='output', path=None):
        annopath = os.path.join(self._devkit_path, 'segmentation_detection',
                                'xml_annotations', '{:s}.xml')
        imagesetfile = os.path.join(self._devkit_path, 'train_val_test',
                                    self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        ovthresh_list = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
        for ovthresh in ovthresh_list:
            res_file = open(
                os.path.join('/DeepWatershedDetection' + path,
                             'res-' + str(ovthresh) + '.txt'), "w+")
            aps = []
            sum_aps, present = 0, 0
            for i, cls in enumerate(self._classes):
                if cls in [
                        'clef15', 'noteheadDoubleWholeSmall', 'flag64thUp',
                        'articTenutoBelow', 'rest64th', 'flag8thDownSmall',
                        'restHBar', 'caesura', 'restLonga', 'restMaxima',
                        'rest32nd', 'dynamicRinforzando2',
                        'noteheadWholeSmall', 'dynamicPPP', 'flag128thUp',
                        'flag64thDown', 'articStaccatissimoAbove',
                        'articStaccatissimoBelow', 'restDoubleWhole',
                        'noteheadDoubleWhole', 'articMarcatoBelow',
                        'fingering5', 'fingering4', 'fingering1', 'fingering0',
                        'fingering3', 'fingering2', 'timeSig9', 'timeSig5',
                        'timeSig0', 'flag128thDown', 'timeSig16', 'timeSig12',
                        'dynamicPPPPP', 'rest128th', 'dynamicFortePiano',
                        'flag32ndUp', 'noteheadHalfSmall', 'flag8thUpSmall',
                        'articMarcatoAbove'
                ]:
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                rec, prec, ap = voc_eval(filename,
                                         annopath,
                                         imagesetfile,
                                         cls,
                                         cachedir,
                                         ovthresh=ovthresh,
                                         use_07_metric=use_07_metric)
                aps += [ap]
                print(('AP for {} = {:.4f}'.format(cls, ap)))
                with open(os.path.join(output_dir, cls + '_pr.pkl'),
                          'wb') as f:
                    pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
                if math.isnan(ap):
                    res_file.write(cls + " " + str(0) + "\n")
                else:
                    res_file.write(cls + " " + '{:.3f}'.format(ap) + "\n")
                    sum_aps += ap
                present += 1
            res_file.write('\n\n\n')
            res_file.write("Mean Average Precision: " +
                           str(sum_aps / float(present)))
            res_file.close()

            print(('Mean AP = {:.4f}'.format(np.mean(aps))))
            print('~~~~~~~~')
            print('Results:')
            print(('{:.3f}'.format(np.mean(aps))))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Ejemplo n.º 7
0
    def _do_python_eval(self, output_dir='output', path=None):
        annopath = os.path.join(self._devkit_path, 'segmentation_detection',
                                'xml_annotations', '{:s}.xml')
        imagesetfile = os.path.join(self._devkit_path, 'train_val_test',
                                    self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)

        ovthresh_list = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
        for ovthresh in ovthresh_list:
            aps = []
            for i, cls in enumerate(self._classes):
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                rec, prec, ap = voc_eval(filename,
                                         annopath,
                                         imagesetfile,
                                         cls,
                                         cachedir,
                                         ovthresh=ovthresh,
                                         use_07_metric=use_07_metric)
                aps += [ap]
                print(('AP for {} = {:.4f}'.format(cls, ap)))
                with open(os.path.join(output_dir, cls + '_pr.pkl'),
                          'wb') as f:
                    pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
            print(('Mean AP = {:.4f}'.format(np.mean(aps))))
            print('~~~~~~~~')
            print('Results:')
            # open the file where we want to save the results
            if path is not None:
                res_file = open(
                    os.path.join('/DeepWatershedDetection' + path,
                                 'res-' + str(ovthresh) + '.txt'), "w+")
                len_ap = len(aps)
                sum_aps = 0
                present = 0
                for i in range(len_ap):
                    print(('{:.3f}'.format(aps[i])))
                    if i not in [
                            68, 34, 35, 36, 90, 102, 39, 42, 75, 45, 48, 99,
                            20, 117, 118, 89, 25, 26, 74
                    ]:
                        if math.isnan(aps[i]):
                            res_file.write(str(0) + "\n")
                        else:
                            res_file.write(('{:.3f}'.format(aps[i])) + "\n")
                            sum_aps += aps[i]
                        present += 1
                res_file.write('\n\n\n')
                res_file.write("Mean Average Precision: " +
                               str(sum_aps / float(present)))
                res_file.close()

            print(('{:.3f}'.format(np.mean(aps))))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')
Ejemplo n.º 8
0
def _do_python_eval(json_dataset, salt, output_dir='output'):
    info = voc_info(json_dataset)
    year = info['year']
    anno_path = info['anno_path']
    image_set_path = info['image_set_path']
    devkit_path = info['devkit_path']
    #cachedir = os.path.join(devkit_path, 'annotations_cache')
    cachedir = os.path.join('data/VOC' + year, 'annotations_cache')
    aps_50 = []
    cls_ls = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    # load gt
    with open(image_set_path, 'r') as f:
        lines = f.readlines()
    imagenames = [x.strip() for x in lines]

    # load annots
    recs = {}
    with open('./data/fsod/annotations/fsod_test.json', 'r') as f:
        now_dict = json.load(f)
    anno = pd.DataFrame.from_dict(now_dict['annotations'])
    category = pd.DataFrame.from_dict(now_dict['categories'])
    # we need image_id and category
    for i, imagename in enumerate(imagenames):
        image_now_ls = []
        image_id, category_id, ep = imagename.strip().split('_')
        category_name = category.loc[category['id'] == int(category_id),
                                     'name'].tolist()[0]
        now_df = anno.loc[(anno['image_id'] == int(image_id)) &
                          (anno['category_id'] == int(category_id)
                           ), :]  #anno[anno['image_id'] == int(image_id)]
        obj_dict = now_df.to_dict('index')
        for key, value in obj_dict.items():
            value[
                'name'] = category_name + '_' + ep  #category_name #category[category['id'] == value['category_id']]['name'].values[0]
            value['bbox'] = [
                value['bbox'][0], value['bbox'][1],
                value['bbox'][0] + value['bbox'][2],
                value['bbox'][1] + value['bbox'][3]
            ]
            #print(value['name'])
            value['truncated'] = 0
            value['difficult'] = 0
            image_now_ls.append(value)
        recs[imagename] = image_now_ls
        if i % 100 == 0:
            logger.info('Reading annotation for {:d}/{:d}'.format(
                i + 1, len(imagenames)))

    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(json_dataset,
                                                  salt).format(cls)
        rec, prec, ap = voc_eval(filename,
                                 anno_path,
                                 recs,
                                 imagenames,
                                 cls,
                                 cachedir,
                                 ovthresh=0.5,
                                 use_07_metric=use_07_metric)
        aps_50 += [ap]
        cls_ls += [cls]
        #logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_50_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP50 = {:.4f}'.format(np.mean(aps_50)))
    logger.info('~~~~~~~~')
    logger.info('AP50 Results:')
    for ap_id, ap in enumerate(aps_50):
        logger.info('AP50 for {} = {:.4f}'.format(cls_ls[ap_id], ap))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')

    aps_75 = []
    cls_ls = []
    for _, cls in enumerate(json_dataset.classes):
        if cls == '__background__':
            continue
        filename = _get_voc_results_file_template(json_dataset,
                                                  salt).format(cls)
        rec, prec, ap = voc_eval(filename,
                                 anno_path,
                                 recs,
                                 imagenames,
                                 cls,
                                 cachedir,
                                 ovthresh=0.75,
                                 use_07_metric=use_07_metric)
        aps_75 += [ap]
        cls_ls += [cls]
        #logger.info('AP for {} = {:.4f}'.format(cls, ap))
        res_file = os.path.join(output_dir, cls + '_75_pr.pkl')
        save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
    logger.info('Mean AP75 = {:.4f}'.format(np.mean(aps_75)))
    logger.info('~~~~~~~~')
    logger.info('AP75 Results:')
    for ap_id, ap in enumerate(aps_75):
        logger.info('AP75 for {} = {:.4f}'.format(cls_ls[ap_id], ap))
    logger.info('~~~~~~~~')
    logger.info('Mean AP50 = {:.4f}'.format(np.mean(aps_50)))
    logger.info('Mean AP75 = {:.4f}'.format(np.mean(aps_75)))
    logger.info('~~~~~~~~')
    logger.info('')
    logger.info('----------------------------------------------------------')
    logger.info('Results computed with the **unofficial** Python eval code.')
    logger.info('Results should be very close to the official MATLAB code.')
    logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
    logger.info('-- Thanks, The Management')
    logger.info('----------------------------------------------------------')
    def _do_python_eval(self, output_dir='output', path=None):
        annopath = os.path.join(self._devkit_path, 'MUSICMA++_' + self._year,
                                'xml_annotations', '{:s}.xml')
        imagesetfile = os.path.join(self._devkit_path, 'train_val_test',
                                    self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)

        # pdb.set_trace()
        ovthresh_list = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
        for ovthresh in ovthresh_list:
            res_file = open(
                os.path.join('/DeepWatershedDetection' + path,
                             'res-' + str(ovthresh) + '.txt'), "w+")
            aps = []
            sum_aps, present = 0, 0
            # pdb.set_trace()
            for i, cls in enumerate(self._classes):
                if cls in [
                        'tuple_bracket_line', 'other-clef',
                        'dotted_horizontal_spanner', 'letter_other',
                        'trill_wobble', 'numeral_0', 'letter_E', 'letter_S',
                        'curved-line_(tie-or-slur)', 'letter_g',
                        'arpeggio_wobble', 'transposition_text'
                ]:  # or cls[:3] == 'let':
                    continue
                # pdb.set_trace()
                filename = self._get_voc_results_file_template().format(cls)
                rec, prec, ap = voc_eval(filename,
                                         annopath,
                                         imagesetfile,
                                         cls,
                                         cachedir,
                                         ovthresh=ovthresh,
                                         use_07_metric=use_07_metric)
                aps += [ap]
                print(('AP for {} = {:.4f}'.format(cls, ap)))
                with open(os.path.join(output_dir, cls + '_pr.pkl'),
                          'wb') as f:
                    pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
                if math.isnan(ap):
                    res_file.write(cls + " " + ap + "\n")
                else:
                    res_file.write(cls + " " + '{:.3f}'.format(ap) + "\n")
                    sum_aps += ap
                present += 1
            res_file.write('\n\n\n')
            res_file.write("Mean Average Precision: " +
                           str(sum_aps / float(present)))
            res_file.close()

            print(('Mean AP = {:.4f}'.format(np.mean(aps))))
            print('~~~~~~~~')
            print('Results:')
            print(('{:.3f}'.format(np.mean(aps))))

        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------')