Example #1
0
def cityperson_eval(src_pth,
                    annFile,
                    CUT_WH=None,
                    ignore_uncertain=False,
                    use_iod_for_ignore=False,
                    catIds=[],
                    use_citypersons_standard=True,
                    tiny_scale=1.0,
                    iou_ths=None,
                    setup_labels=None):
    if os.path.isdir(src_pth):
        resFile = src_pth + '/' + 'bbox.json'
    else:
        resFile = src_pth
    Params.CITYPERSON_STANDARD = use_citypersons_standard
    if use_citypersons_standard:
        kwargs = {}
        if CUT_WH is None: CUT_WH = (1, 1)
    else:
        kwargs = {'filter_type': 'size'}
        if CUT_WH is None: CUT_WH = (1, 1)
        Params.TINY_SCALE = tiny_scale
    Params.IOU_THS = iou_ths

    kwargs.update({
        'use_iod_for_ignore': use_iod_for_ignore,
        'ignore_uncertain': ignore_uncertain
    })
    kwargs['given_catIds'] = len(catIds) > 0

    annType = 'bbox'  # specify type here
    print('Running demo for *%s* results.' % annType)

    # running evaluation
    print('CUT_WH:', CUT_WH)
    print('use_citypersons_standard:', use_citypersons_standard)
    print('tiny_scale:', tiny_scale)
    print(kwargs)
    res_file = open("results.txt", "w")
    Params.CUT_WH = CUT_WH
    setupLbl = Params().SetupLbl
    for id_setup in range(len(setupLbl)):
        if (setup_labels is None) or (setupLbl[id_setup] in setup_labels):
            cocoGt = COCO(annFile)
            cocoDt = cocoGt.loadRes(resFile)
            imgIds = sorted(cocoGt.getImgIds())
            cocoEval = COCOeval(cocoGt, cocoDt, annType, **kwargs)
            cocoEval.params.imgIds = imgIds
            cocoEval.evaluate(id_setup)
            cocoEval.accumulate()
            cocoEval.summarize(id_setup, res_file)

    res_file.close()
Example #2
0
def validate(annFile, dt_path):
    mean_MR = []
    for id_setup in range(0, 4):
        cocoGt = COCO(annFile)
        cocoDt = cocoGt.loadRes(dt_path)
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate(id_setup)
        cocoEval.accumulate()
        mean_MR.append(cocoEval.summarize_nofile(id_setup))
    return mean_MR
Example #3
0
def evaluation(annFile,resFile,outFile = "results.txt"):
    from coco import COCO # IMPORT THEIR COCO, not pycocotools
    from eval_MR_multisetup import COCOeval
    
    # running evaluation
    res_file = open("results.txt", "w")
    for id_setup in range(0,4):
        cocoGt = COCO(annFile)
        cocoDt = cocoGt.loadRes(resFile)
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt,cocoDt,'bbox')
        cocoEval.params.imgIds  = imgIds
        cocoEval.evaluate(id_setup)
        cocoEval.accumulate()
        cocoEval.summarize(id_setup,res_file)

    res_file.close()
Example #4
0
def mMR(resFile, annFile):
    '''
    :param resFile:  json file  detect result : list =[ dict, dict ...] dict = {'image_id':, 'bbox':, 'score':,
    'category_id': }bbox = [x,y,w,h]  image_id = ***(no .jpg)  category_id = 1 for person score must be sort from high to low
    :param annFile:  json file  format is same as mscoco dataset for example instances_val_{}2014.json
    :return: None
    '''
    res_file = open("results.txt", "w")
    for id_setup in range(3, 4):
        cocoGt = COCO(annFile)
        cocoDt = cocoGt.loadRes(resFile)
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt, cocoDt, annType)
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate(id_setup)
        cocoEval.accumulate()
        cocoEval.summarize(id_setup, res_file)
    res_file.close()
Example #5
0
def eval_json_reasonable(annFile, resFile):
    dt_path = os.path.split(resFile)[0]
    respath = os.path.join(dt_path, 'results.txt')
    res_file = open(respath, "w")
    mr_reasonable = None
    for id_setup in range(6):
        cocoGt = COCO(annFile)
        cocoDt = cocoGt.loadRes(resFile)
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt, cocoDt, annType)
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate(id_setup)
        cocoEval.accumulate()
        mean_mr = cocoEval.summarize(id_setup, res_file)
        if id_setup == 0:
            mr_reasonable = mean_mr
    print('')
    res_file.close()
    return mr_reasonable
Example #6
0
if os.path.isfile(main_path):
    print('Given file {} with detections'.format(main_path))
    res_file = None
    resFile_txt = os.path.join(main_path)
    resFile = os.path.join(main_path.replace('.txt', '.json'))
    for id_setup in range(6):
        cocoGt = COCO(annFile)
        img_lut = {img['id']: img for img in cocoGt.imgs.values()}
        ann_lut = {ann['id']: ann for ann in cocoGt.anns.values()}
        ann_lut_by_img = collections.defaultdict(list)
        for ann in cocoGt.anns.values():
            img_id = ann['image_id']
            ann_lut_by_img[img_id].append(ann)
        cocoDt = cocoGt.loadRes(resFile)
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt, cocoDt, annType)
        setup_name = cocoEval.params.SetupLbl[id_setup]
        setup_savedir = os.path.join(base_save_dir, setup_name)
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate(id_setup)
        misses = cocoEval.accumulate(plot=True, return_misses=True)
        mean_mr = cocoEval.summarize(id_setup, res_file)
        missed_heights = []
        missed_visibilities = []
        for img_id, ms in misses.items():
            if len(ms):
                image_name = img_lut[img_id]['im_name']
                city = image_name.split('_')[0]
                image_path = os.path.join(img_base, city, image_name)
                image = cv2.imread(image_path)
                bbs_missed = [ann_lut[m]['bbox'] for m in ms]
Example #7
0
main_path = '../../output/valresults/precarious/h/off{}_finetuned'.format(
    exp_name)

for f in sorted(os.listdir(main_path)):
    print('file: {}'.format(f))
    # initialize COCO detections api
    dt_path = os.path.join(main_path, f)
    resFile = os.path.join(dt_path, 'val_dt.json')
    print('Load results from {}'.format(resFile))
    respath = os.path.join(dt_path, 'results.txt')
    # if os.path.exists(respath):
    #     continue
    ## running evaluation
    if not os.path.exists(resFile):
        print("Skipping {} ... Doesn't exist yet.".format(resFile))
        continue
    res_file = open(respath, "w")
    for iou_thr in ious:
        for id_setup in range(1):
            cocoGt = COCO(annFile)
            cocoDt = cocoGt.loadRes(resFile)
            imgIds = sorted(cocoGt.getImgIds())
            cocoEval = COCOeval(cocoGt, cocoDt, annType)
            cocoEval.params.imgIds = imgIds
            cocoEval.evaluate(id_setup)
            cocoEval.accumulate()
            cocoEval.summarize(id_setup, res_file, iou_thr=iou_thr)
    print('')

    res_file.close()
Example #8
0
    print('file: {}'.format(f))
    if 'val' in f:
        continue
    # initialize COCO detections api
    dt_path = os.path.join(main_path, f)
    resFile = os.path.join(dt_path, 'val_dt.json')
    respath = os.path.join(dt_path, 'results.txt')
    # if os.path.exists(respath):
    #     continue
    ## running evaluation
    res_file = open(respath, "w")
    for id_setup in range(6):
        cocoGt = COCO(annFile)
        cocoDt = cocoGt.loadRes(resFile)
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt, cocoDt, annType)
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate(id_setup)
        cocoEval.accumulate()
        mean_mr = cocoEval.summarize(id_setup, res_file)
        if id_setup == 0:
            mr_reasonable = mean_mr
            if mr_reasonable < best_mr_reasonable:
                print('New best validation MR with model {} : {} -> {}'.format(
                    f, best_mr_reasonable, mr_reasonable))
                best_mr_reasonable = mr_reasonable
                best_mr_name = f
    print('')

    res_file.close()