def debug_show_eval_result(\ picklefilename,\ target_recname = None,\ singleRecordFile = False\ ): with open(picklefilename, 'r') as fin: Results = pickle.load(fin) # convert to a list if singleRecordFile == True: Results = [ Results, ] for recind in xrange(0, len(Results)): # only plot target rec if target_recname is not None: print 'Current FileName: {}'.format(Results[recind][0]) if Results[recind][0] != target_recname: return fResults = ECGRF.ECGrf.resfilter(Results) # # show filtered results & raw results # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults[recind:recind + 1]) ECGstats.eval(debug=False) ECGstats.dispstat0() ECGstats.plotevalresofrec(Results[recind][0], Results)
def plot(self): Results = [ (self.recname, self.testresult), ] fResults = ECGRF.ECGrf.resfilter(Results) # # show filtered results & raw results # Evaluate prediction result statistics # recind = 0 ECGstats = ECGstatistics(fResults[recind:recind + 1]) ECGstats.eval(debug=False) ECGstats.dispstat0() ECGstats.plotevalresofrec(Results[recind][0], Results)
def LOOT_Eval(RFfolder): reslist = glob.glob(os.path.join(\ RFfolder,'*.out')) FN = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} for fi, fname in enumerate(reslist): with open(fname, 'r') as fin: Results = pickle.load(fin) fResults = ECGRF.ECGrf.resfilter(Results) # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults[0:1]) pErr, pFN = ECGstats.eval(debug=False) for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) # write to log file EvalLogfilename = os.path.join(curfolderpath, 'res.log') ECGstatistics.dispstat0(\ pFN = FN,\ pErr = Err,\ LogFileName = EvalLogfilename,\ LogText = 'Statistics of Results in [{}]'.\ format(RFfolder)\ ) ECGstats.stat_record_analysis(pErr=Err, pFN=FN, LogFileName=EvalLogfilename)
def debug_show_eval_result(\ picklefilename,\ target_recname = None\ ): with open(picklefilename,'r') as fin: Results = pickle.load(fin) # only plot target rec if target_recname is not None: print Results[0][0] if Results[0][0]!= target_recname: return fResults = ECGRF.ECGrf.resfilter(Results) # # show filtered results & raw results # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults[0:1]) ECGstats.eval(debug = False) ECGstats.dispstat0() ECGstats.plotevalresofrec(Results[0][0],Results)
def RFtest(self, testrecname): ecgrf = ECGRF() sel1213 = conf['sel1213'] ecgrf.training(sel1213) Results = ecgrf.testing([ testrecname, ]) # Evaluate result filtered_Res = ECGRF.resfilter(Results) stats = ECGstats(filtered_Res[0:1]) Err, FN = stats.eval(debug=False) # write to log file EvalLogfilename = os.path.join(projhomepath, 'res.log') stats.dispstat0(\ pFN = FN,\ pErr = Err) # plot prediction result stats.plotevalresofrec(Results[0][0], Results)
def EvalQTdbResults(resultfilelist, OutputFolder): if resultfilelist == None or len(resultfilelist) == 0: print "Empty result file list!" return None FN = {'pos': [], 'label': [], 'recname': []} FP = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} #======================================== # select best round to compare with refs #======================================== bRselector = BestRoundSelector() #InvalidRecordList = conf['InvalidRecords'] # for each record test result for fi, fname in enumerate(resultfilelist): print 'json load :', fname with open(fname, 'rU') as fin: Results = json.load(fin) Results = Results[0] # skip invalid records currecordname = Results[0] #if currecordname in InvalidRecordList: #continue # ================================== # filter result of QT # ================================== reslist = Results[1] resfilter = ResultFilter(reslist) reslist = resfilter.group_local_result(cp_del_thres=1) reslist = resfilter.syntax_filter(reslist) fResults = (Results[0], reslist) fResults = [ fResults, ] # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults) pErr, pFN = ECGstats.eval(debug=False) # get False Positive pFP = ECGstats.pFP # one test Error stat print '[picle filename]:{}'.format(fname) print '[{}] files left.'.format(len(resultfilelist) - fi) evallabellist, evalstats = ECGstatistics.dispstat0(pFN=pFN, pErr=pErr) # select best Round numofFN = len(pFN['pos']) if numofFN == 0: ExtraInfo = 'Best Round ResultFileName[{}]\nTestSet :{}\n#False Negtive:{}\n'.format( fname, [x[0] for x in Results], numofFN) bRselector.input(evallabellist, evalstats, ExtraInfo=ExtraInfo) # ============================================== for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) for kk in FP: FP[kk].extend(pFP[kk]) #==================================== # write to log file #EvalLogfilename = os.path.join(curfolderpath,'res.log') output_log_filename = os.path.join(OutputFolder, 'RecordResults.log') EvalLogfilename = output_log_filename # display error stat for each label & save results to logfile ECGstatistics.dispstat0( pFN=FN, pErr=Err, LogFileName=EvalLogfilename, LogText='Statistics of Results in FilePath [{}]'.format( os.path.split(resultfilelist[0])[0]), OutputFolder=OutputFolder) with open(os.path.join(curfolderpath, 'Err.txt'), 'w') as fout: pickle.dump(Err, fout) # find best round bRselector.dispBestRound() bRselector.dumpBestRound(EvalLogfilename) ECGstats.stat_record_analysis(pErr=Err, pFN=FN, LogFileName=EvalLogfilename) # write csv file outputfilename = os.path.join(OutputFolder, 'FalsePositive.csv') ECGstats.FP2CSV(FP, Err, outputfilename) # False Negtive outputfilename = os.path.join(OutputFolder, 'FalseNegtive.csv') ECGstats.FN2CSV(FN, Err, outputfilename)
def TestN_Eval(RFfolder, output_log_filename=os.path.join(curfolderpath, 'res.log')): # test result file list picklereslist = glob.glob(os.path.join(RFfolder, '*.out')) # struct Init FN = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} #======================================== # select best round to compare with refs #======================================== bRselector = BestRoundSelector() for fi, fname in enumerate(picklereslist): with open(fname, 'rU') as fin: Results = pickle.load(fin) # filter result fResults = ECGRF.ECGrf.resfilter(Results) # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults) pErr, pFN = ECGstats.eval(debug=False) # one test Error stat print '[picle filename]:{}'.format(fname) print '[{}] files left.'.format(len(picklereslist) - fi) evallabellist,evalstats = ECGstatistics.dispstat0(\ pFN = pFN,\ pErr = pErr\ ) # select best Round numofFN = len(pFN['pos']) if numofFN == 0: ExtraInfo = 'Best Round ResultFileName[{}]\nTestSet :{}\n#False Negtive:{}\n'.format( fname, [x[0] for x in Results], numofFN) bRselector.input(evallabellist, evalstats, ExtraInfo=ExtraInfo) for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) # write to log file #EvalLogfilename = os.path.join(curfolderpath,'res.log') EvalLogfilename = output_log_filename # display error stat for each label & save results to logfile ECGstatistics.dispstat0(\ pFN = FN,\ pErr = Err,\ LogFileName = EvalLogfilename,\ LogText = 'Statistics of Results in FilePath [{}]'.format(RFfolder)\ ) with open(os.path.join(projhomepath, 'tmp', 'Err.txt'), 'w') as fout: pickle.dump(Err, fout) # find best round bRselector.dispBestRound() bRselector.dumpBestRound(EvalLogfilename) ECGstats.stat_record_analysis(pErr=Err, pFN=FN, LogFileName=EvalLogfilename)
def EvalQTdbResults(resultfilelist, OutputFolder): if resultfilelist == None or len(resultfilelist) == 0: print "Empty result file list!" return None FN = {'pos': [], 'label': [], 'recname': []} FP = {'pos': [], 'label': [], 'recname': []} Err = {'err': [], 'pos': [], 'label': [], 'recname': []} #======================================== # select best round to compare with refs #======================================== bRselector = BestRoundSelector() #InvalidRecordList = conf['InvalidRecords'] # for each record test result for fi, fname in enumerate(resultfilelist): print 'pickle load :', fname with open(fname, 'rU') as fin: Results = pickle.load(fin) # skip invalid records currecordname = Results[0] #if currecordname in InvalidRecordList: #continue # ================================== # filter result of QT # ================================== reslist = Results[1] resfilter = ResultFilter(reslist) reslist = resfilter.group_local_result(cp_del_thres=1) #reslist = resfilter.syntax_filter(reslist) fResults = (Results[0], reslist) fResults = [ fResults, ] # show filtered results & raw results #for recname , recRes in Results: # Evaluate prediction result statistics # ECGstats = ECGstatistics(fResults) pErr, pFN = ECGstats.eval(debug=False) # get False Positive pFP = ECGstats.pFP # one test Error stat print '[picle filename]:{}'.format(fname) print '[{}] files left.'.format(len(resultfilelist) - fi) evallabellist, evalstats = ECGstatistics.dispstat0(pFN=pFN, pErr=pErr) # select best Round numofFN = len(pFN['pos']) if numofFN == 0: ExtraInfo = 'Best Round ResultFileName[{}]\nTestSet :{}\n#False Negtive:{}\n'.format( fname, [x[0] for x in Results], numofFN) bRselector.input(evallabellist, evalstats, ExtraInfo=ExtraInfo) # ============================================== for kk in Err: Err[kk].extend(pErr[kk]) for kk in FN: FN[kk].extend(pFN[kk]) for kk in FP: FP[kk].extend(pFP[kk]) return (FN, FP, Err)