def compute_hmean(submit_file_path, epoch):
    print('EAST <==> Evaluation <==> Compute Hmean <==> Begin')

    basename = os.path.basename(submit_file_path)
    assert basename == 'submit.zip', 'There is no submit.zip'

    dirname = '/home/binchengxiong/ssd_fcn_multitask_text_detection_pytorch1.0'
    gt_file_path = os.path.join(dirname, 'gt_train.zip')
    print('gt_file_path:', gt_file_path)
    assert os.path.isfile(gt_file_path), 'There is no gt.zip'
    tt = 'train_log_step_hmean_' + str(min_epoch) + '.txt'
    log_file_path = os.path.join(
        '/home/binchengxiong/ssd_fcn_multitask_text_detection_pytorch1.0/', tt)
    # log_file_path = os.path.join(dirname, 'log_epoch_hmean_modify_loss.txt')
    # log_file_path = os.path.join(dirname, 'log_epoch_hmean_cross_entropy_loss.txt')

    if not os.path.isfile(log_file_path):
        os.mknod(log_file_path)
    result_dir_path = os.path.join(dirname, 'result' + str(min_epoch))
    try:
        shutil.rmtree(result_dir_path)
    except:
        pass
    if not os.path.exists(result_dir_path):
        os.makedirs(result_dir_path)
    print('gt_file_path:', gt_file_path)
    print('submit_file_path:', submit_file_path)

    resDict = rrc_evaluation_funcs.main_evaluation(
        {
            'g': gt_file_path,
            's': submit_file_path,
            'o': result_dir_path
        }, TL_iou.default_evaluation_params, TL_iou.validate_data,
        TL_iou.evaluate_method)

    print(resDict)
    if resDict['method'] == '{}':
        with open(log_file_path, 'a') as f:
            f.write(
                'step:{} ssd_fcn_multitask <==> Evaluation <==> Precision:{:.2f} Recall:{:.2f} Hmean{:.2f} <==> Done\n'
                .format(epoch, -1, -1, -1))
        return -1
    else:
        recall = resDict['method']['recall']

        precision = resDict['method']['precision']

        hmean = resDict['method']['hmean']

        print(
            'ssd_fcn_multitask <==> Evaluation <==> Precision:{:.2f} Recall:{:.2f} Hmean{:.2f} <==> Done'
            .format(precision, recall, hmean))

        with open(log_file_path, 'a') as f:
            f.write(
                'step:{} ssd_fcn_multitask <==> Evaluation <==> Precision:{:.4f} Recall:{:.4f} Hmean{:.4f} <==> Done\n'
                .format(epoch, precision, recall, hmean))

    return hmean
示例#2
0
文件: hmean.py 项目: TuKJet/EAST
def compute_hmean(submit_file_path):
    print('EAST <==> Evaluation <==> Compute Hmean <==> Begin')

    basename = os.path.basename(submit_file_path)
    assert basename == 'submit.zip', 'There is no submit.zip'

    dirname = os.path.dirname(submit_file_path)
    gt_file_path = os.path.join(dirname, 'gt.zip')
    print(gt_file_path)
    assert os.path.isfile(gt_file_path), 'There is no gt.zip'

    log_file_path = os.path.join(dirname, 'log_epoch_hmean.txt')
    if not os.path.isfile(log_file_path):
        os.mknod(log_file_path)

    result_dir_path = os.path.join(dirname, 'result')
    try:
        shutil.rmtree(result_dir_path)
    except:
        pass
    os.mkdir(result_dir_path)

    resDict = rrc_evaluation_funcs.main_evaluation(
        {
            'g': gt_file_path,
            's': submit_file_path,
            'o': result_dir_path
        }, TL_iou.default_evaluation_params, TL_iou.validate_data,
        TL_iou.evaluate_method)

    print(resDict)
    recall = resDict['method']['recall']

    precision = resDict['method']['precision']

    hmean = resDict['method']['hmean']

    print(
        'EAST <==> Evaluation <==> Precision:{:.2f} Recall:{:.2f} Hmean{:.2f} <==> Done'
        .format(precision, recall, hmean))

    with open(log_file_path, 'a') as f:
        f.write(
            'EAST <==> Evaluation <==> Precision:{:.2f} Recall:{:.2f} Hmean{:.2f} <==> Done\n'
            .format(precision, recall, hmean))

    return hmean
示例#3
0
文件: TL_iou.py 项目: chenbys/MEAST
                                            'AP':sampleAP,
                                            'iouMat':[] if len(detPols)>100 else iouMat.tolist(),
                                            'gtPolPoints':gtPolPoints,
                                            'detPolPoints':detPolPoints,
                                            'gtDontCare':gtDontCarePolsNum,
                                            'detDontCare':detDontCarePolsNum,
                                            'evaluationParams': evaluationParams,
                                            'evaluationLog': evaluationLog
                                        }

    # Compute MAP and MAR
    AP = 0
    if evaluationParams['CONFIDENCES']:
        AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)

    methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt
    methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet
    methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)

    methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP  }

    resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}


    return resDict;



if __name__=='__main__':
    rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)