def LOOT_Eval(RFfolder): reslist = glob.glob(os.path.join(\ RFfolder,'*.out')) FN = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} for fi, fname in enumerate(reslist): with open(fname, 'r') as fin: Results = pickle.load(fin) fResults = ECGRF.ECGrf.resfilter(Results) # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults[0:1]) pErr, pFN = ECGstats.eval(debug=False) for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) # write to log file EvalLogfilename = os.path.join(curfolderpath, 'res.log') ECGstatistics.dispstat0(\ pFN = FN,\ pErr = Err,\ LogFileName = EvalLogfilename,\ LogText = 'Statistics of Results in [{}]'.\ format(RFfolder)\ ) ECGstats.stat_record_analysis(pErr=Err, pFN=FN, LogFileName=EvalLogfilename)
def EvalQTdbResults(resultfilelist, OutputFolder): if resultfilelist == None or len(resultfilelist) == 0: print "Empty result file list!" return None FN = {'pos': [], 'label': [], 'recname': []} FP = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} #======================================== # select best round to compare with refs #======================================== bRselector = BestRoundSelector() #InvalidRecordList = conf['InvalidRecords'] # for each record test result for fi, fname in enumerate(resultfilelist): print 'json load :', fname with open(fname, 'rU') as fin: Results = json.load(fin) Results = Results[0] # skip invalid records currecordname = Results[0] #if currecordname in InvalidRecordList: #continue # ================================== # filter result of QT # ================================== reslist = Results[1] resfilter = ResultFilter(reslist) reslist = resfilter.group_local_result(cp_del_thres=1) reslist = resfilter.syntax_filter(reslist) fResults = (Results[0], reslist) fResults = [ fResults, ] # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults) pErr, pFN = ECGstats.eval(debug=False) # get False Positive pFP = ECGstats.pFP # one test Error stat print '[picle filename]:{}'.format(fname) print '[{}] files left.'.format(len(resultfilelist) - fi) evallabellist, evalstats = ECGstatistics.dispstat0(pFN=pFN, pErr=pErr) # select best Round numofFN = len(pFN['pos']) if numofFN == 0: ExtraInfo = 'Best Round ResultFileName[{}]\nTestSet :{}\n#False Negtive:{}\n'.format( fname, [x[0] for x in Results], numofFN) bRselector.input(evallabellist, evalstats, ExtraInfo=ExtraInfo) # ============================================== for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) for kk in FP: FP[kk].extend(pFP[kk]) #==================================== # write to log file #EvalLogfilename = os.path.join(curfolderpath,'res.log') output_log_filename = os.path.join(OutputFolder, 'RecordResults.log') EvalLogfilename = output_log_filename # display error stat for each label & save results to logfile ECGstatistics.dispstat0( pFN=FN, pErr=Err, LogFileName=EvalLogfilename, LogText='Statistics of Results in FilePath [{}]'.format( os.path.split(resultfilelist[0])[0]), OutputFolder=OutputFolder) with open(os.path.join(curfolderpath, 'Err.txt'), 'w') as fout: pickle.dump(Err, fout) # find best round bRselector.dispBestRound() bRselector.dumpBestRound(EvalLogfilename) ECGstats.stat_record_analysis(pErr=Err, pFN=FN, LogFileName=EvalLogfilename) # write csv file outputfilename = os.path.join(OutputFolder, 'FalsePositive.csv') ECGstats.FP2CSV(FP, Err, outputfilename) # False Negtive outputfilename = os.path.join(OutputFolder, 'FalseNegtive.csv') ECGstats.FN2CSV(FN, Err, outputfilename)
def TestN_Eval(RFfolder, output_log_filename=os.path.join(curfolderpath, 'res.log')): # test result file list picklereslist = glob.glob(os.path.join(RFfolder, '*.out')) # struct Init FN = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} #======================================== # select best round to compare with refs #======================================== bRselector = BestRoundSelector() for fi, fname in enumerate(picklereslist): with open(fname, 'rU') as fin: Results = pickle.load(fin) # filter result fResults = ECGRF.ECGrf.resfilter(Results) # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults) pErr, pFN = ECGstats.eval(debug=False) # one test Error stat print '[picle filename]:{}'.format(fname) print '[{}] files left.'.format(len(picklereslist) - fi) evallabellist,evalstats = ECGstatistics.dispstat0(\ pFN = pFN,\ pErr = pErr\ ) # select best Round numofFN = len(pFN['pos']) if numofFN == 0: ExtraInfo = 'Best Round ResultFileName[{}]\nTestSet :{}\n#False Negtive:{}\n'.format( fname, [x[0] for x in Results], numofFN) bRselector.input(evallabellist, evalstats, ExtraInfo=ExtraInfo) for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) # write to log file #EvalLogfilename = os.path.join(curfolderpath,'res.log') EvalLogfilename = output_log_filename # display error stat for each label & save results to logfile ECGstatistics.dispstat0(\ pFN = FN,\ pErr = Err,\ LogFileName = EvalLogfilename,\ LogText = 'Statistics of Results in FilePath [{}]'.format(RFfolder)\ ) with open(os.path.join(projhomepath, 'tmp', 'Err.txt'), 'w') as fout: pickle.dump(Err, fout) # find best round bRselector.dispBestRound() bRselector.dumpBestRound(EvalLogfilename) ECGstats.stat_record_analysis(pErr=Err, pFN=FN, LogFileName=EvalLogfilename)