def main():
    args = parse_args()
    get_scene_model_result(args.ak, args.sk, args.gt, args.log)
    result = rrc_evaluation_funcs.main_evaluation(None,
                                                  default_evaluation_params,
                                                  validate_data,
                                                  evaluate_method, args.gt)
    tr = result["method"]["recall"]
    tp = result["method"]["precision"]
    nr = result["method"]["hmean"]
    with open(args.log, "w") as fw:
        print('ocr_detect_recall:%s' % tr)
        print('ocr_precision:%s' % tp)
        print('F1:%s' % nr)
        fw.write("recall:" + str(tr) + "\n")
        fw.write("precision:" + str(tp) + "\n")
        fw.write("hmean:" + str(nr) + "\n")
                        numGlobalCareGt)

    methodRecall = 0 if numGlobalCareGt == 0 else float(
        matchedSum) / numGlobalCareGt
    methodPrecision = 0 if numGlobalCareDet == 0 else float(
        matchedSum) / numGlobalCareDet
    methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * methodRecall * methodPrecision / (
        methodRecall + methodPrecision)

    methodMetrics = {
        'precision': methodPrecision,
        'recall': methodRecall,
        'hmean': methodHmean,
        'AP': AP
    }

    resDict = {
        'calculated': True,
        'Message': '',
        'method': methodMetrics,
        'per_sample': perSampleMetrics
    }

    return resDict


if __name__ == '__main__':

    rrc_evaluation_funcs.main_evaluation(None, default_evaluation_params,
                                         validate_data, evaluate_method)
def evaluate():

    id = 0
    submFile = request.files.get('submissionFile')

    if submFile is None:
        resDict = {"calculated": False, "Message": "No file selected"}
        if request.query['json'] == "1":
            return json.dumps(resDict)
        else:
            vars = {
                'url': url,
                'title': 'Method Upload ' + title,
                'resDict': resDict
            }
            return template('upload', vars)
    else:

        name, ext = os.path.splitext(submFile.filename)
        if ext not in ('.' + gt_ext):
            resDict = {
                "calculated":
                False,
                "Message":
                "File not valid. A " + gt_ext.upper() + " file is required."
            }
            if request.query['json'] == "1":
                return json.dumps(resDict)
            else:
                vars = {
                    'url': url,
                    'title': 'Method Upload ' + title,
                    'resDict': resDict
                }
                return template('upload', vars)

        p = {
            'g':
            os.path.dirname(os.path.abspath(__file__)) + "/gt/gt." + gt_ext,
            's': os.path.dirname(os.path.abspath(__file__)) + "/output/subm." +
            gt_ext,
            'o': os.path.dirname(os.path.abspath(__file__)) + "/output",
            'p': evaluation_params
        }

        for k, _ in submit_params.iteritems():
            p['p'][k] = request.forms.get(k)

        if os.path.isfile(p['s']):
            os.remove(p['s'])

        submFile.save(p['s'])

        module = importlib.import_module("config." + evaluation_script)
        resDict = rrc_evaluation_funcs.main_evaluation(
            p, module.default_evaluation_params, module.validate_data,
            module.evaluate_method)

        if resDict['calculated'] == True:
            dbPath = os.path.dirname(
                os.path.abspath(__file__)) + "/output/submits"
            conn = sqlite3.connect(dbPath)
            cursor = conn.cursor()

            submTitle = request.forms.get('title')
            if submTitle == "":
                submTitle = "unnamed"

            cursor.execute(
                'INSERT INTO submission(title,sumbit_date,results) VALUES(?,?,?)',
                (submTitle, datetime.now().strftime("%Y-%m-%d %H:%M"),
                 json.dumps(resDict['method'])))
            conn.commit()
            id = cursor.lastrowid

            os.rename(
                p['s'], p['s'].replace("subm." + gt_ext,
                                       "subm_" + str(id) + "." + gt_ext))
            os.rename(p['o'] + "/results.zip",
                      p['o'] + "/results_" + str(id) + ".zip")

            conn.close()

        if request.query['json'] == "1":
            return json.dumps({
                "calculated": resDict['calculated'],
                "Message": resDict['Message'],
                'id': id
            })
        else:
            vars = {
                'url': url,
                'title': 'Method Upload ' + title,
                'resDict': resDict,
                'id': id
            }
            return template('upload', vars)
Exemple #4
0
    methodRecall = 0 if numGlobalCareGt == 0 else float(
        matchedSum) / numGlobalCareGt
    methodPrecision = 0 if numGlobalCareDet == 0 else float(
        matchedSum) / numGlobalCareDet
    methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * methodRecall * methodPrecision / (
        methodRecall + methodPrecision)

    methodMetrics = {
        'precision': methodPrecision,
        'recall': methodRecall,
        'hmean': methodHmean,
        'AP': AP
    }

    resDict = {
        'calculated': True,
        'Message': '',
        'method': methodMetrics,
        'per_sample': perSampleMetrics
    }

    return resDict


if __name__ == '__main__':
    res_dict, model_name = rrc_evaluation_funcs.main_evaluation(
        None, default_evaluation_params, validate_data, evaluate_method)
    with open('./log.txt', 'a') as f:
        f.write(model_name + ':' + json.dumps(res_dict['method']) + '\n')
Exemple #5
0
def eval(para):
    return rrc_evaluation_funcs.main_evaluation(para,
                                                default_evaluation_params,
                                                validate_data, evaluate_method)
Exemple #6
0
def evaluate():

    id_ = 0
    submFile = request.files.get('submissionFile')

    if submFile is None:
        resDict = {"calculated": False, "Message": "No file selected"}
        if request.query['json'] == "1":
            return json.dumps(resDict)
        else:
            vars = {
                'url': url,
                'title': 'Method Upload ' + title,
                'resDict': resDict
            }
            return template('upload', vars)
    else:

        name, ext = os.path.splitext(submFile.filename)
        if ext not in ('.' + gt_ext):
            resDict = {
                "calculated":
                False,
                "Message":
                "File not valid. A " + gt_ext.upper() + " file is required."
            }
            if request.query['json'] == "1":
                return json.dumps(resDict)
            else:
                vars = {
                    'url': url,
                    'title': 'Method Upload ' + title,
                    'resDict': resDict
                }
                return template('upload', vars)

        p = {
            'g':
            os.path.dirname(os.path.abspath(__file__)) + "/gt/gt." + gt_ext,
            's': os.path.dirname(os.path.abspath(__file__)) + "/output/subm." +
            gt_ext,
            'o': os.path.dirname(os.path.abspath(__file__)) + "/output"
        }
        global PARAMS
        setattr(
            PARAMS, 'GT_PATH',
            os.path.dirname(os.path.abspath(__file__)) + "/gt/gt." + gt_ext)
        setattr(
            PARAMS, 'SUBMIT_PATH',
            os.path.dirname(os.path.abspath(__file__)) + "/output/subm." +
            gt_ext)
        setattr(PARAMS, 'OUTPUT_PATH',
                os.path.dirname(os.path.abspath(__file__)) + "/output")

        if os.path.isfile(PARAMS.SUBMIT_PATH):
            os.remove(PARAMS.SUBMIT_PATH)

        submFile.save(PARAMS.SUBMIT_PATH)

        # apply response to evaluation
        if request.forms.get('transcription') == 'on':
            setattr(PARAMS, 'TRANSCRIPTION', True)
        else:
            setattr(PARAMS, 'TRANSCRIPTION', False)

        if request.forms.get('confidence') == 'on':
            setattr(PARAMS, 'CONFIDENCES', True)
        else:
            setattr(PARAMS, 'CONFIDENCES', False)

        if request.forms.get('mode') == 'endtoend':
            setattr(PARAMS, 'E2E', True)
        else:
            setattr(PARAMS, 'E2E', False)

        resDict = rrc_evaluation_funcs.main_evaluation(validate_data,
                                                       cleval_evaluation)

        if resDict['calculated'] == True:
            dbPath = os.path.dirname(
                os.path.abspath(__file__)) + "/output/submits"
            conn = sqlite3.connect(dbPath)
            cursor = conn.cursor()

            submTitle = request.forms.get('title')
            if submTitle == "":
                submTitle = "unnamed"

            cursor.execute(
                'INSERT INTO submission(title,sumbit_date,results,is_end2end) VALUES(?,?,?,?)',
                (submTitle, datetime.now().strftime("%Y-%m-%d %H:%M"),
                 json.dumps(resDict['method']), PARAMS.E2E))
            conn.commit()
            id_ = cursor.lastrowid

            os.rename(
                p['s'], p['s'].replace("subm." + gt_ext,
                                       "subm_" + str(id_) + "." + gt_ext))
            os.rename(p['o'] + "/results.zip",
                      p['o'] + "/results_" + str(id_) + ".zip")

            conn.close()
        if request.query['json'] == "1":
            return json.dumps({
                "calculated": resDict['calculated'],
                "Message": resDict['Message'],
                'id': id_
            })
        else:
            vars = {
                'url': url,
                'title': 'Method Upload ' + title,
                'resDict': resDict,
                'id': id_
            }
            return template('upload', vars)
Exemple #7
0
def upload_file():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'gt_file' not in request.files :
            return json.loads(json.dumps({'status':'gt_file is not found.'}))
        if 'pred_file' not in request.files :
            return json.loads(json.dumps({'status':'pred_file is not found.'}))

        if 'eval_method' not in request.form:
            return json.loads(json.dumps({'status':'eval_method is not found.'}))
        if 'box_type' not in request.form:
            return json.loads(json.dumps({'status':'box_type is not found.'}))

        gt_file = request.files['gt_file']
        pred_file = request.files['pred_file']

        if gt_file.filename == '':
            return json.loads(json.dumps({'status':'gt_file is null.'}))
        if pred_file.filename == '':
            return json.loads(json.dumps({'status':'pred_file is null.'}))


        if not(gt_file and allowed_file(gt_file.filename) and pred_file and allowed_file(pred_file.filename)):
            return json.loads(json.dumps({'status':'input is not allowed.'}))


        if not (request.form['eval_method'] in ['detection','end-to-end']):
            return json.loads(json.dumps({'status':'evaluation method is not found.'}))

        if not (request.form['box_type'] in ['LTRB', 'QUAD', 'POLY']):
            return json.loads(json.dumps({'status':'Box Typeis not found.'}))



        gt_filename = secure_filename(gt_file.filename)
        gt_file.save(os.path.join(app.config['UPLOAD_FOLDER'], gt_filename))
        pred_filename = secure_filename(pred_file.filename)
        pred_file.save(os.path.join(app.config['UPLOAD_FOLDER'], pred_filename))

        # Set Path, and submit
        setattr(PARAMS, 'GT_PATH', os.path.join(app.config['UPLOAD_FOLDER'], gt_filename))
        setattr(PARAMS, 'SUBMIT_PATH', os.path.join(app.config['UPLOAD_FOLDER'], pred_filename))
        

        if request.form['eval_method'] == 'detection':
            setattr(PARAMS, 'E2E', False)
        else:
            setattr(PARAMS, 'E2E', True)

        setattr(PARAMS, 'BOX_TYPE', request.form['box_type'])
     
        resDict = rrc_evaluation_funcs.main_evaluation(validate_data, cleval_evaluation)
 

        return json.loads(json.dumps({'status':'ok','evaluate':resDict}))
        
            
    return '''
    <!doctype html>
    <title>CLEval API</title>
    <h1>CLEval Tool</h1>
    <form method=post enctype=multipart/form-data>
      <b>Upload GT file  (zip): <input type=file name=gt_file> <br><br>
      <b>Upload Predict file (zip): <input type=file name=pred_file> <br><br>
      <b>Evaluation Method: 
      <input type="radio" id="detection" name="eval_method" value="detection">
      <label for="detection">detection</label>
      <input type="radio" id="end-to-end" name="eval_method" value="end-to-end" checked="checked">
      <label for="end-to-end">END-TO-END</label><br><br>

      <b>Box Type: 
      <input type="radio" id="LTRB" name="box_type" value="LTRB" checked="checked">
      <label for="LTRB">LTRB</label>      
      <input type="radio" id="LTRB" name="box_type" value="QUAD">
      <label for="QUAD">QUAD</label>
      <input type="radio" id="POLY" name="box_type" value="POLY">
      <label for="POLY">POLY</label><br><br>
      
      <br><br>
      <input type=submit value=Upload>
    </form>
    '''


# calling using this command
# FLASK_ENV=development flask run --port 8000 --host 0.0.0.0
Exemple #8
0
                if det_file is None:
                    det_file = ""
            else:
                det_file = ""

            future = executor.submit(eval_single_result, gt_file, det_file)
            futures[future] = file_idx

        with tqdm(total=bar_len) as pbar:
            pbar.set_description("Integrating results...")
            for future in concurrent.futures.as_completed(futures):
                file_idx = futures[future]
                result = future.result()
                per_sample_metrics[file_idx] = result
                overall_result.accumulate_stats(result['Rawdata'])
                pbar.update(1)

        executor.shutdown()

    resDict = {
        'calculated': True,
        'Message': '',
        'method': overall_result.to_dict(),
        'per_sample': per_sample_metrics
    }
    return resDict


if __name__ == '__main__':
    rrc_evaluation_funcs.main_evaluation(validate_data, cleval_evaluation)
Exemple #9
0
def cal_recall_precison_f1(gt_path, result_path, show_result=False):
    p = {'g': gt_path, 's': result_path}
    result = rrc_evaluation_funcs.main_evaluation(p, default_evaluation_params,
                                                  validate_data,
                                                  evaluate_method, show_result)
    return result['method']
Exemple #10
0
                                        'precision':precision,
                                        'recall':recall,
                                        'hmean':hmean,
                                        'pairs':pairs,
                                        'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
                                        'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
                                        'gtPolPoints':gtPolPoints,
                                        'detPolPoints':detPolPoints,
                                        'gtDontCare':gtDontCareRectsNum,
                                        'detDontCare':detDontCareRectsNum,
                                        'evaluationParams': evaluationParams,
                                        'evaluationLog': evaluationLog
                                    }
        
    methodRecall = 0 if numGt==0 else methodRecallSum/numGt
    methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
    methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
    
    methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean  }

    resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
    
    
    return resDict;



if __name__=='__main__':
        
    rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)