示例#1
0
def calcClassMeasures(gt, prob, validArea, thresh):
    #    print prob.max()
    #    plt.subplot(2,1,1)
    #    plt.imshow(gt)
    #    plt.subplot(2,1,2)
    #    plt.imshow(prob)
    return evalExp(gt, prob, thresh, validMap=None, validArea=validArea)
示例#2
0
def calcClassMeasures(gt, prob, validArea, thresh):
#    print prob.max()
#    plt.subplot(2,1,1)
#    plt.imshow(gt)
#    plt.subplot(2,1,2)
#    plt.imshow(prob)
    return evalExp(gt, prob, thresh, validMap = None, validArea=validArea)
示例#3
0
def eval_road(li_pred, li_gt):
    print "Starting evaluation ..."
    thresh = np.array(range(0, 256)) / 255.0    # recall thresh
    # init result
    totalFP = np.zeros(thresh.shape)
    totalFN = np.zeros(thresh.shape)
    totalPosNum = 0
    totalNegNum = 0

    for i in range(len(li_pred)):
        pred = li_pred[i]
        # gt = li_gt[i] > 0
        gt = li_gt[i] == 1
        validArea = li_gt[i] < 250
        FN, FP, posNum, negNum = evalExp(gt, pred, thresh, validMap=None, validArea=validArea)

        assert FN.max() <= posNum, 'BUG @ poitive samples'
        assert FP.max() <= negNum, 'BUG @ negative samples'

        # collect results for whole category
        totalFP += FP
        totalFN += FN
        totalPosNum += posNum
        totalNegNum += negNum

    # if category_ok:
    print "Computing evaluation scores..."
    # Compute eval scores!
    eval_dict = pxEval_maximizeFMeasure(totalPosNum, totalNegNum, totalFN, totalFP, thresh=thresh)

    for property in li_property:
        print '%s: %4.2f ' % (property, eval_dict[property] * 100,)

    print "Finished evaluating!"
    return eval_dict['MaxF']
示例#4
0
def main(result_dir, train_dir, debug=False):
    '''
    main method of evaluateRoad
    :param result_dir: directory with the result propability maps, e.g., /home/elvis/kitti_road/my_results
    :param gt_dir: training directory (has to contain gt_image_2)  e.g., /home/elvis/kitti_road/training
    :param debug: debug flag (OPTIONAL)
    '''

    print "Starting evaluation ..."
    print "Available categories are: %s" % dataStructure.cats

    thresh = np.array(range(0, 256)) / 255.0
    trainData_subdir_gt = 'gt_image_2/'
    gt_dir = os.path.join(train_dir, trainData_subdir_gt)
    print os.getcwd()
    assert os.path.isdir(
        result_dir), 'Cannot find result_dir: %s ' % result_dir

    # In the submission_dir we expect the probmaps!
    submission_dir = result_dir
    assert os.path.isdir(submission_dir), 'Cannot find %s, ' % submission_dir

    # init result
    prob_eval_scores = []  # the eval results in a dict
    eval_cats = []  # saves al categories at were evaluated
    outputline = []
    for cat in dataStructure.cats:
        print "Execute evaluation for category %s ..." % cat
        fn_search = '%s*%s' % (cat, dataStructure.gt_end)
        prob_fileList = glob(os.path.join(result_dir, fn_search))
        assert len(prob_fileList) > 0, 'Error reading ground truth'
        gt_fileList = glob(os.path.join(gt_dir, fn_search))
        assert len(gt_fileList) > 0, 'Error reading ground truth'
        # Init data for categgory
        category_ok = True  # Flag for each cat
        totalFP = np.zeros(thresh.shape)
        totalFN = np.zeros(thresh.shape)
        totalPosNum = 0
        totalNegNum = 0

        # firstFile  = gt_fileList[0]
        firstFile = prob_fileList[0]
        file_key = firstFile.split('/')[-1].split('.')[0]
        tags = file_key.split('_')
        ts_tag = tags[2]
        dataset_tag = tags[0]
        class_tag = tags[1]

        submission_tag = dataset_tag + '_' + class_tag + '_'
        print "Searching for submitted files with prefix: %s" % submission_tag

        for prob_cur in prob_fileList:
            file_key = prob_cur.split('/')[-1].split('.')[0]
            file_key_1, file_key_2, file_key_3 = file_key.split('_')
            _file_key = file_key_1 + '_' + file_key_2 + '_' + file_key_3
            if debug:
                print "Processing file: %s " % file_key

            # get tags
            tags = file_key.split('_')
            ts_tag = tags[2]
            dataset_tag = tags[0]
            class_tag = tags[1]

            cur_prob = cv2.imread(prob_cur, 0)

            fn_curGt = os.path.join(gt_dir, _file_key + dataStructure.prob_end)
            if not os.path.isfile(fn_curGt):
                print "Cannot find file: %s for category %s." % (fn_curGt, cat)
                print "--> Will now abort evaluation for this particular category."
                category_ok = False
                break
            cur_gt, validArea = getGroundTruth(fn_curGt)
            cur_prob = np.clip(
                (cur_prob.astype('f4')) / (np.iinfo(cur_prob.dtype).max), 0.,
                1.)

            FN, FP, posNum, negNum = evalExp(cur_gt,
                                             cur_prob,
                                             thresh,
                                             validMap=None,
                                             validArea=validArea)

            assert FN.max() <= posNum, 'BUG @ poitive samples'
            assert FP.max() <= negNum, 'BUG @ negative samples'

            # collect results for whole category
            totalFP += FP
            totalFN += FN
            totalPosNum += posNum
            totalNegNum += negNum

        if category_ok:
            print "Computing evaluation scores..."
            # Compute eval scores!
            prob_eval_scores.append(
                pxEval_maximizeFMeasure(totalPosNum,
                                        totalNegNum,
                                        totalFN,
                                        totalFP,
                                        thresh=thresh))
            eval_cats.append(cat)

            factor = 100
            for property in dataStructure.eval_propertyList:
                print '%s: %4.2f ' % (
                    property,
                    prob_eval_scores[-1][property] * factor,
                )

            print "Finished evaluating category: %s " % (eval_cats[-1], )

    if len(eval_cats) > 0:
        print "Successfully finished evaluation for %d categories: %s " % (
            len(eval_cats), eval_cats)
        return True
    else:
        print "No categories have been evaluated!"
        return False
示例#5
0
def main(result_dir, train_dir, debug=False):
    '''
    main method of evaluateRoad
    :param result_dir: directory with the result propability maps, e.g., /home/elvis/kitti_road/my_results
    :param gt_dir: training directory (has to contain gt_image_2)  e.g., /home/elvis/kitti_road/training
    :param debug: debug flag (OPTIONAL)
    '''

    print("Starting evaluation ...")
    thresh = np.array(range(0, 256)) / 255.0
    trainData_subdir_gt = 'origin/'
    gt_dir = os.path.join(train_dir, trainData_subdir_gt)

    assert os.path.isdir(
        result_dir), 'Cannot find result_dir: %s ' % result_dir

    # In the submission_dir we expect the probmaps!
    submission_dir = result_dir
    assert os.path.isdir(submission_dir), 'Cannot find %s, ' % submission_dir

    # init result
    prob_eval_scores = []  # the eval results in a dict
    gt_fileList = glob(os.path.join(gt_dir, '*'))
    assert len(gt_fileList) > 0, 'Error reading ground truth'
    # Init data for categgory
    category_ok = True  # Flag for each cat
    totalFP = np.zeros(thresh.shape)
    totalFN = np.zeros(thresh.shape)
    totalPosNum = 0
    totalNegNum = 0

    for fn_curGt in gt_fileList:
        fn_curGt = fn_curGt.replace('\\', '/')
        file_key = fn_curGt.split('/')[-1].split('.')[0]
        print("Processing file: %s " % file_key)

        # Read GT
        cur_gt, validArea = getGroundTruth(fn_curGt)
        # Read probmap and normalize
        fn_curProb = os.path.join(submission_dir,
                                  file_key + dataStructure.prob_end)
        if not os.path.isfile(fn_curProb):
            print(
                "--> Will now abort evaluation for this particular category.")
            category_ok = False
            break

        cur_prob = cv2.imread(fn_curProb, 0)
        cur_prob = np.clip(
            (cur_prob.astype('f4')) / (np.iinfo(cur_prob.dtype).max), 0., 1.)
        FN, FP, posNum, negNum = evalExp(cur_gt,
                                         cur_prob,
                                         thresh,
                                         validMap=None,
                                         validArea=validArea)
        assert FN.max() <= posNum, 'BUG @ poitive samples'
        assert FP.max() <= negNum, 'BUG @ negative samples'

        # collect results for whole category
        totalFP += FP
        totalFN += FN
        totalPosNum += posNum
        totalNegNum += negNum

    if category_ok:
        print("Computing evaluation scores...")
        # Compute eval scores!
        print(
            pxEval_maximizeFMeasure(totalPosNum,
                                    totalNegNum,
                                    totalFN,
                                    totalFP,
                                    thresh=thresh))
        prob_eval_scores.append(
            pxEval_maximizeFMeasure(totalPosNum,
                                    totalNegNum,
                                    totalFN,
                                    totalFP,
                                    thresh=thresh))

        factor = 100
        for property in dataStructure.eval_propertyList:
            print('%s: %4.2f ' % (
                property,
                prob_eval_scores[-1][property] * factor,
            ))
def main(result_dir, train_dir, debug = False):
    '''
    main method of evaluateRoad
    :param result_dir: directory with the result propability maps, e.g., /home/elvis/kitti_road/my_results
    :param gt_dir: training directory (has to contain gt_image_2)  e.g., /home/elvis/kitti_road/training
    :param debug: debug flag (OPTIONAL)
    '''
    
    print "Starting evaluation ..." 
    print "Available categories are: %s" %dataStructure.cats
    
    thresh = np.array(range(0,256))/255.0
    trainData_subdir_gt = 'image/'
    gt_dir = os.path.join(train_dir,trainData_subdir_gt)
    
    assert os.path.isdir(result_dir), 'Cannot find result_dir: %s ' %result_dir
    
    # In the submission_dir we expect the probmaps! 
    submission_dir = result_dir
    assert os.path.isdir(submission_dir), 'Cannot find %s, ' %submission_dir
    
    # init result
    prob_eval_scores = [] # the eval results in a dict
    eval_cats = [] # saves al categories at were evaluated
    outputline = []
    for cat in dataStructure.cats:
        print "Execute evaluation for category %s ..." %cat
        fn_search  = '%s*%s' %(cat, dataStructure.gt_end)
        gt_fileList = glob(os.path.join(gt_dir, fn_search))
        assert len(gt_fileList)>0, 'Error reading ground truth'
        # Init data for categgory
        category_ok = True # Flag for each cat
        totalFP = np.zeros( thresh.shape )
        totalFN = np.zeros( thresh.shape )
        totalPosNum = 0
        totalNegNum = 0
        
        firstFile  = gt_fileList[0]
        file_key = firstFile.split('/')[-1].split('.')[0]
        tags = file_key.split('_')
        print tags
        ts_tag = tags[2]
        dataset_tag = tags[0]
        class_tag = tags[1]
        
        submission_tag = dataset_tag + '_' + class_tag + '_'
        print "Searching for submitted files with prefix: %s" %submission_tag
        
        for fn_curGt in gt_fileList:
            
            file_key = fn_curGt.split('/')[-1].split('.')[0]
            if debug:
                print "Processing file: %s " %file_key
            
            # get tags
            tags = file_key.split('_')
            ts_tag = tags[2]
            dataset_tag = tags[0]
            class_tag = tags[1]
            

            # Read GT
            cur_gt, validArea = getGroundTruth(fn_curGt)
                        
            # Read probmap and normalize
            fn_curProb = os.path.join(submission_dir, file_key + dataStructure.prob_end)
            
            if not os.path.isfile(fn_curProb):
                print "Cannot find file: %s for category %s." %(file_key, cat)
                print "--> Will now abort evaluation for this particular category."
                category_ok = False
                break
            
            cur_prob = cv2.imread(fn_curProb,0)
            cur_prob = np.clip( (cur_prob.astype('f4'))/(np.iinfo(cur_prob.dtype).max),0.,1.)
            
            FN, FP, posNum, negNum = evalExp(cur_gt, cur_prob, thresh, validMap = None, validArea=validArea)
            
            assert FN.max()<=posNum, 'BUG @ poitive samples'
            assert FP.max()<=negNum, 'BUG @ negative samples'
            
            # collect results for whole category
            totalFP += FP
            totalFN += FN
            totalPosNum += posNum
            totalNegNum += negNum
        
        if category_ok:
            print "Computing evaluation scores..."
            # Compute eval scores!
            prob_eval_scores.append(pxEval_maximizeFMeasure(totalPosNum, totalNegNum, totalFN, totalFP, thresh = thresh))
            eval_cats.append(cat)
            
            factor = 100
            for property in dataStructure.eval_propertyList:
                print '%s: %4.2f ' %(property, prob_eval_scores[-1][property]*factor,)


            print "Finished evaluating category: %s " %(eval_cats[-1],)
    
    if len(eval_cats)>0:     
        print "Successfully finished evaluation for %d categories: %s " %(len(eval_cats),eval_cats)
        return True
    else:
        print "No categories have been evaluated!"
        return False
示例#7
0
def main(result_dir, train_dir, file_path="./data1", debug=False):
    '''
    main method of evaluateRoad
    :param result_dir: directory with the result propability maps, e.g., /home/user/lane/eval/eval_images/tmp
    :param gt_dir: training directory (has to contain groundtruth)  e.g., /home/user/lane/eval/eval_images
    :param file_path: path pointing to the (not yet existing) measurement file e.g. /home/user/lane/eval/eval_results/dataxxx
    :param debug: debug flag (OPTIONAL)
    '''

    if (debug):
        print("Starting evaluation ...")
        print("Available categories are: %s" % (dataStructure.cats))

    thresh = np.array(range(0, 256)) / 255.0
    trainData_subdir_gt = 'groundtruth/'
    gt_dir = os.path.join(train_dir, trainData_subdir_gt)

    assert os.path.isdir(
        result_dir), 'Cannot find result_dir: %s ' % result_dir

    # In the submission_dir we expect the probmaps!
    submission_dir = result_dir
    assert os.path.isdir(submission_dir), 'Cannot find %s, ' % submission_dir

    # init result
    prob_eval_scores = []  # the eval results in a dict
    eval_cats = []  # saves al categories at were evaluated
    outputline = []

    path = Path(file_path)
    with path.open(mode="w") as f:
        f.write('#')
        for property in dataStructure.eval_propertyList:
            f.write('%s ' % (property))
        f.write("\n")

    totalFP = np.zeros(thresh.shape)
    totalFN = np.zeros(thresh.shape)
    totalPosNum = 0
    totalNegNum = 0

    for cat in dataStructure.cats:
        print()
        if (debug):
            print("Execute evaluation for category %s ..." % (cat))
        fn_search = '%s*%s' % (cat, dataStructure.gt_end)
        gt_fileList = sorted(glob(os.path.join(gt_dir, fn_search)))
        assert len(gt_fileList) > 0, 'Error reading ground truth'
        # Init data for categgory
        category_ok = True  # Flag for each cat
        #totalFP = np.zeros(thresh.shape)
        #totalFN = np.zeros(thresh.shape)
        #totalPosNum = 0
        #totalNegNum = 0

        firstFile = gt_fileList[0]
        file_key = firstFile.split('/')[-1].split('.')[0]
        tags = file_key.split('_')
        ts_tag = tags[2]
        dataset_tag = tags[0]
        class_tag = tags[1]

        submission_tag = dataset_tag + '_' + class_tag + '_'
        if (debug):
            print("Searching for submitted files with prefix: %s" %
                  (submission_tag))

        for fn_curGt in gt_fileList:

            file_key = fn_curGt.split('/')[-1].split('.')[0]
            if debug:
                print("Processing file: %s " % (file_key))

            # get tags
            tags = file_key.split('_')
            ts_tag = tags[2]
            dataset_tag = tags[0]
            class_tag = tags[1]

            # Read GT
            cur_gt, validArea = getGroundTruth(fn_curGt)

            # Read probmap and normalize
            fn_curProb = os.path.join(submission_dir,
                                      file_key + dataStructure.prob_end)

            if not os.path.isfile(fn_curProb):
                print("Cannot find file: %s for category %s." %
                      (file_key, cat))
                print(
                    "--> Will now abort evaluation for this particular category."
                )
                category_ok = False
                break

            cur_prob = cv2.imread(fn_curProb, 0)
            cur_prob = np.clip(
                (cur_prob.astype('f4')) / (np.iinfo(cur_prob.dtype).max), 0.,
                1.)

            FN, FP, posNum, negNum = evalExp(cur_gt,
                                             cur_prob,
                                             thresh,
                                             validMap=None,
                                             validArea=validArea)

            assert FN.max() <= posNum, 'BUG @ poitive samples'
            assert FP.max() <= negNum, 'BUG @ negative samples'

            prob_eval_scores.append(
                pxEval_maximizeFMeasure(posNum, negNum, FN, FP, thresh=thresh))
            factor = 100
            with path.open(mode="a") as f:
                for property in dataStructure.eval_propertyList:
                    f.write('%4.2f ' %
                            (prob_eval_scores[-1][property] * factor))
                f.write("\n")

            prob_eval_scores = []

            # collect results for whole category
            totalFP += FP
            totalFN += FN
            totalPosNum += posNum
            totalNegNum += negNum

    if category_ok:
        if (debug):
            print("Computing evaluation scores...")
        # Compute eval scores!
        prob_eval_scores.append(
            pxEval_maximizeFMeasure(totalPosNum,
                                    totalNegNum,
                                    totalFN,
                                    totalFP,
                                    thresh=thresh))
        eval_cats.append(cat)

        factor = 100
        with path.open(mode="a") as f:
            for property in dataStructure.eval_propertyList:
                f.write('%4.2f ' % (prob_eval_scores[-1][property] * factor))
            f.write('\n')

        if (debug):
            print("Finished evaluating category: %s " % (eval_cats[-1], ))

    if len(eval_cats) > 0:
        if (debug):
            print("Successfully finished evaluation for %d categories: %s " %
                  (len(eval_cats), eval_cats))
        return True
    else:
        print("No categories have been evaluated!")
        return False