Example #1
0
def validate_data(filePath, evaluationParams, isGT):
    """
    Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
                            Validates also that there are no missing files in the folder.
                            If some error detected, the method raises the error
    """
    if isGT:
        gt = rrc_evaluation_funcs.load_zip_file(
            filePath, evaluationParams['GT_SAMPLE_NAME_2_ID'])
        # Validate format of GroundTruth
        for k in gt:
            rrc_evaluation_funcs.validate_lines_in_file(k,
                                                        gt[k],
                                                        evaluationParams,
                                                        withTranscription=True)
    else:
        subm = rrc_evaluation_funcs.load_zip_file(
            filePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True)
        #Validate format of results
        for k in subm:
            # if (k in gt) == False :
            #     raise Exception("The sample %s not present in GT" %k)
            rrc_evaluation_funcs.validate_lines_in_file(
                k,
                subm[k],
                evaluationParams,
                withConfidence=evaluationParams['CONFIDENCES'])
Example #2
0
def count_pr_for_detectors():
    evalParams = polygon_evaluation_params()
    validate_data(gtFilePath, evalParams, isGT=True)
    gt = load_zip_file(gtFilePath, evalParams['GT_SAMPLE_NAME_2_ID'])
    subm_dict = {}
    curves = {}
    for key in using_detections:
        validate_data(allDetFilePaths[key], evalParams, isGT=False)
        subm_dict[key] = load_zip_file(allDetFilePaths[key], evalParams['DET_SAMPLE_NAME_2_ID'], True)
        precision, recall, confidences, operating_point = precision_recall(gt, subm_dict[key], threshold=thresholds[key])
        curves[key] = {'precision': list(precision.astype(np.float)),
                           'recall': list(recall.astype(np.float)),
                           'thresholds': list(confidences),
                           'operating_point': operating_point}
        with open('./precision_recall/data/' + key + '.json', "w") as write_file:
            json.dump({key: curves[key]}, write_file)
    print()
    return curves
Example #3
0
    print("average recall: ", av_recall)
    print("average hmean: ", av_hmean)
    print("average ap: ", av_ap)
    return best_result_per_fold, best_params_per_fold


if __name__ == '__main__':
    evalParams = polygon_evaluation_params()

    for key in using_detections:
        validate_data(allDetFilePaths[key], evalParams, isGT=False)
    subm_dict = {}
    for key in using_detections:
        new_key = key.split('_')[0]
        subm_dict[new_key] = load_zip_file(allDetFilePaths[key],
                                           evalParams['DET_SAMPLE_NAME_2_ID'],
                                           True)
    validate_data(gtFilePath, evalParams, isGT=True)
    gt = load_zip_file(gtFilePath, evalParams['GT_SAMPLE_NAME_2_ID'])
    detector_keys = list(subm_dict.keys())
    if len(detector_keys) == 0:
        exit()
    img_keys = list(
        sorted(subm_dict[detector_keys[0]].keys(), key=lambda x: int(x)))
    best_results, best_params = cross_validate_TextALFA(
        img_keys, gt, detector_keys, subm_dict)
    print()
    print()
    for i in range(0, folds_count):
        print(i, best_results[i])
        print(best_params[i])