Beispiel #1
0
def precision_recall(gt, subm, threshold=None):
    evalParams = polygon_evaluation_params()
    p = {
        'g': gt,
        's': subm
    }
    resDict, precision, recall, confidences = rrc_evaluation_funcs.main_evaluation_notzip(p, evalParams, validate_data, evaluate_method,
                                                          show_result=False)
    operating_point = None
    if threshold is not None:
        op_p_idx = np.argmin(np.abs(confidences - threshold))
        operating_point = (float(precision[op_p_idx]), float(recall[op_p_idx]))
    return precision, recall, confidences, operating_point
Beispiel #2
0
def count_pr_for_detectors():
    evalParams = polygon_evaluation_params()
    validate_data(gtFilePath, evalParams, isGT=True)
    gt = load_zip_file(gtFilePath, evalParams['GT_SAMPLE_NAME_2_ID'])
    subm_dict = {}
    curves = {}
    for key in using_detections:
        validate_data(allDetFilePaths[key], evalParams, isGT=False)
        subm_dict[key] = load_zip_file(allDetFilePaths[key], evalParams['DET_SAMPLE_NAME_2_ID'], True)
        precision, recall, confidences, operating_point = precision_recall(gt, subm_dict[key], threshold=thresholds[key])
        curves[key] = {'precision': list(precision.astype(np.float)),
                           'recall': list(recall.astype(np.float)),
                           'thresholds': list(confidences),
                           'operating_point': operating_point}
        with open('./precision_recall/data/' + key + '.json', "w") as write_file:
            json.dump({key: curves[key]}, write_file)
    print()
    return curves
Beispiel #3
0
def validate_TextALFA(test_index, detector_keys, subm_dict, cur_params):
    text_alfa = TextALFA()
    evalParams = polygon_evaluation_params()
    joined_subm_dict = {}
    for img_key in test_index:
        joined_img_polygons = {}
        joined_img_confs = {}
        for j in range(len(detector_keys)):
            detector_key = detector_keys[j]
            detFile = subm_dict[detector_key][img_key]
            pointsList, _, confidencesList, _ = get_tl_line_values_from_file_contents(detFile, evalParams,
                                                                                      False, evalParams['CONFIDENCES'])

            detPols = []
            for i in range(len(pointsList)):
                points = pointsList[i]
                detPol = polygon_from_points(points)
                detPols.append(detPol)
            joined_img_polygons[detector_keys[j]] = detPols
            joined_img_confs[detector_keys[j]] = confidencesList
        new_img_polygon, new_img_confs = text_alfa.TextALFA_result(detector_keys, joined_img_polygons,
                                                                   joined_img_confs, tau=cur_params['tau'], gamma=0.5,
                                                                   bounding_box_fusion_method=cur_params['bounding_box_fusion_method'],
                                                                   scores_fusion_method=cur_params['scores_fusion_method'],
                                                                   add_empty_detections=True,
                                                                   empty_epsilon=cur_params['empty_epsilon'],
                                                                   max_1_box_per_detector=True,
                                                                   use_precision_instead_of_scores=cur_params['use_precision_instead_of_scores'])
        polygons, confs = [], []
        for i in range(len(new_img_polygon)):
            if new_img_confs[i] < cur_params['threshold']:
                continue
            polygons.append(new_img_polygon[i])
            confs.append(new_img_confs[i])
        new_img_polygon = polygons
        new_img_confs = confs

        joined_subm_dict[img_key] = new_img_polygon, new_img_confs
    return joined_subm_dict
Beispiel #4
0
        av_hmean += best_result_per_fold[i]['hmean']
        # print("AP/folds_count: ", ap_per_fold[i])
        print()
    av_ap /= folds_count
    av_recall /= folds_count
    av_precision /= folds_count
    av_hmean /= folds_count
    print("average precision: ", av_precision)
    print("average recall: ", av_recall)
    print("average hmean: ", av_hmean)
    print("average ap: ", av_ap)
    return best_result_per_fold, best_params_per_fold


if __name__ == '__main__':
    evalParams = polygon_evaluation_params()

    for key in using_detections:
        validate_data(allDetFilePaths[key], evalParams, isGT=False)
    subm_dict = {}
    for key in using_detections:
        new_key = key.split('_')[0]
        subm_dict[new_key] = load_zip_file(allDetFilePaths[key],
                                           evalParams['DET_SAMPLE_NAME_2_ID'],
                                           True)
    validate_data(gtFilePath, evalParams, isGT=True)
    gt = load_zip_file(gtFilePath, evalParams['GT_SAMPLE_NAME_2_ID'])
    detector_keys = list(subm_dict.keys())
    if len(detector_keys) == 0:
        exit()
    img_keys = list(