def testDraw(): """ ./check-files.py -t Make sure we can actually do a Draw on the final product! """ from ROOT import TChain # don't have this pkg in the shifter image # import psutil # process = psutil.Process(os.getpid()) # METHOD 1: load massive chainz dawg. # this segfaults on Draw() if the chain's boot too big (not enough mem available) # the default batch options can't handle this for most of the datasets. (login node can ...) # ds = 1 # dsMap = bkg.dsMap() # ch = TChain("skimTree") # for sub in range(dsMap[ds]+1): # latList = dsi.getSplitList("%s/latSkimDS%d_%d*" % (dsi.latDir, ds, sub), sub) # tmpList = [f for idx, f in sorted(latList.items())] # for f in tmpList: ch.Add(f) # nEnt = ch.GetEntries() # print("nEnt",nEnt) # n = ch.Draw("trapENFCal:riseNoise:fitSlo","trapENFCal < 250","goff") # t1, t2, t3 = ch.GetV1(), ch.GetV2(), ch.GetV3() # t1 = np.asarray([t1[i] for i in range(n)]) # t2 = np.asarray([t2[i] for i in range(n)]) # print("nEnt %-8d nDraw %-8d mem: %d MB" % (nEnt, n, process.memory_info().rss/1e6)) # METHOD 2: file-by-file, to save memory fileList = [] dsMap = bkg.dsMap() # for ds in dsMap: for ds in [1]: for sub in range(dsMap[ds] + 1): latList = dsi.getSplitList( "%s/latSkimDS%d_%d*" % (dsi.latDir, ds, sub), sub) tmpList = [f for idx, f in sorted(latList.items())] fileList.extend(tmpList) for f in fileList: tf = TFile(f) tt = tf.Get("skimTree") nEnt = tt.GetEntries() n = tt.Draw("trapENFCal:riseNoise:fitSlo", "trapENFCal < 250", "goff") t1, t2, t3 = tt.GetV1(), tt.GetV2(), tt.GetV3() t1 = np.asarray([t1[i] for i in range(n)]) t2 = np.asarray([t2[i] for i in range(n)]) # print("%s: nEnt %-8d nDraw %-8d mem: %d MB" % (f.split("/")[-1], nEnt, n, process.memory_info().rss/1e6)) print("%s: nEnt %-8d nDraw %-8d" % (f.split("/")[-1], nEnt, n)) tf.Close()
def makeJobList(): """ ./check-files.py -m """ # bkg if not checkCal: # open the job list to compare with open("./jobs/bkgLAT.ls") as f: latJobs = f.readlines() latJobs = [j.rstrip() for j in latJobs] # write failed jobs to a cleanup list fOut = open("./jobs/bkgLAT_cleanup.ls", "w") dsMap = bkg.dsMap() for ds in dsMap: for sub in range(dsMap[ds] + 1): splList = dsi.getSplitList( "%s/splitSkimDS%d_%d*" % (dsi.splitDir, ds, sub), sub) latList = dsi.getSplitList( "%s/latSkimDS%d_%d*" % (dsi.latDir, ds, sub), sub) if len(splList) != len(latList): print( "Error: ds %d sub %d. Found %d split files but %d lat files." % (ds, sub, len(splList), len(latList))) spl = unpackFileName(splList) lat = unpackFileName(latList) diffs = list(set(spl).symmetric_difference(set(lat))) for diff in diffs: fname = "latSkimDS%d_%d_%d.root" % (diff[0], diff[1], diff[2]) job = [s for s in latJobs if fname in s][0] fOut.write(job + "\n") fOut.close() # cal else: # open the job lists to compare with open("./jobs/calLAT.ls") as f: latJobs = f.readlines() latJobs = [j.rstrip() for j in latJobs] with open("./jobs/calLAT_ds5c.ls") as f: latJobs2 = f.readlines() latJobs.extend([j.rstrip() for j in latJobs2]) # write failed jobs to a cleanup list fOut = open("./jobs/calLAT_cleanup.ls", "w") dsMap = cal.GetKeys() for key in dsMap: ds = int(key[2]) if skipDS6Cal and ds == 6: continue for sub in range(cal.GetIdxs(key)): cRuns = cal.GetCalList(key, sub) for run in cRuns: splList = dsi.getSplitList( "%s/splitSkimDS%d_run%d*" % (dsi.calSplitDir, ds, run), run) latList = dsi.getSplitList( "%s/latSkimDS%d_run%d*" % (dsi.calLatDir, ds, run), run) if len(splList) != len(latList): print( "Error: ds %d run %d. Found %d split files but %d lat files." % (ds, run, len(splList), len(latList))) spl = unpackFileName(splList) lat = unpackFileName(latList) diffs = list(set(spl).symmetric_difference(set(lat))) for diff in diffs: fname = "latSkimDS%d_run%d_%d.root" % ( diff[0], diff[1], diff[2]) job = [s for s in latJobs if fname in s][0] fOut.write(job + "\n") fOut.close()
def checkLAT(): """ ./check-files.py [-c] -l """ # make sure nLat matches nSplit files # repeat the checkWave checks print("Checking lats. Cal?", checkCal) fileList = [] # bkg if not checkCal: dsMap = bkg.dsMap() for ds in dsMap: for sub in range(dsMap[ds] + 1): sList = dsi.getSplitList( "%s/splitSkimDS%d_%d*" % (dsi.splitDir, ds, sub), sub) latList = dsi.getSplitList( "%s/latSkimDS%d_%d*" % (dsi.latDir, ds, sub), sub) if len(sList) != len(latList): print( "Error: ds %d sub %d. Found %d split files but %d lat files." % (ds, sub, len(sList), len(latList))) tmpList = [f for idx, f in sorted(latList.items())] fileList.extend(tmpList) # cal else: dsMap = cal.GetKeys() for key in dsMap: ds = int(key[2]) if skipDS6Cal and ds == 6: continue for sub in range(cal.GetIdxs(key)): cRuns = cal.GetCalList(key, sub) for run in cRuns: sList = dsi.getSplitList( "%s/splitSkimDS%d_run%d*" % (dsi.calSplitDir, ds, run), run) latList = dsi.getSplitList( "%s/latSkimDS%d_run%d*" % (dsi.calLatDir, ds, run), run) if len(sList) != len(latList): print( "Error: ds %d sub %d run %d. Found %d split files but %d lat files." % (ds, sub, run, len(sList), len(latList))) tmpList = [f for idx, f in sorted(latList.items())] fileList.extend(tmpList) if testMode: fileList = [] ds = 1 # dsMap = bkg.dsMap() # for sub in range(dsMap[ds]+1): # latList = dsi.getSplitList("%s/latSkimDS%d_%d*" % (dsi.latDir, ds, sub), sub) # tmpList = [f for idx, f in sorted(latList.items())] # fileList.extend(tmpList) dsMap = cal.GetKeys() key = dsMap[1] for sub in range(cal.GetIdxs(key)): cRuns = cal.GetCalList(key, sub) for run in cRuns: latList = dsi.getSplitList( "%s/latSkimDS%d_run%d*" % (dsi.calLatDir, ds, run), run) fileList.extend([f for idx, f in sorted(latList.items())]) # loop over files, repeating the same checks in checkWave for idx, fname in enumerate(fileList[:fLimit]): f = TFile(fname) t = f.Get("skimTree") n = t.GetEntries() if n == 0: print("No entries in file", fname) continue brSingle, brVector = [], [] for br in t.GetListOfBranches(): if "vector" in br.GetClassName(): brVector.append(br.GetName()) else: brSingle.append(br.GetName()) if n < 20: eList = np.arange(0, n, 1) else: eList = np.arange(0, 20, 1) eList = np.append(eList, np.arange(n - 20, n, 1)) for i in eList: t.GetEntry(i) # make sure individual entries are accessible (no segfaults) for br in brSingle: val = getattr(t, br) # print(val) for br in brVector: try: nCh = getattr(t, br).size() except AttributeError: print("Error:", br) for j in range(nCh): val = getattr(t, br)[j] # print(br,nCh,val) # make sure we can process waveforms for j in range(nCh): wf = t.MGTWaveforms.at(j) ch = t.channel.at(j) # be absolutely sure you're matching the right waveform to this hit if wf.GetID() != ch: print( "ERROR -- Vector matching failed. entry %d, file: %s" % (i, fname)) # run the LAT routine to convert into numpy arrays truncLo, truncHi = 0, 2 if ds == 6 or ds == 2: truncLo = 4 signal = wl.processWaveform(wf, truncLo, truncHi) if verbose: print("%d/%d %s nEnt %d" % (idx, len( fileList[:fLimit]), fname.split("/")[-1], t.GetEntries())) f.Close()
def checkSplit(): """ ./check-files.py [-c] -p """ # make sure every file has a TCut # do a sum file size check - make sure sum(size(splits)) = size(waves) print("Checking splits. Cal?", checkCal) # bkg if not checkCal: dsMap = bkg.dsMap() for ds in dsMap: for sub in range(dsMap[ds] + 1)[:fLimit]: # dict: {'DSX_X_X':filePath} to list[file paths] tmpList = dsi.getSplitList( "%s/splitSkimDS%d_%d*" % (dsi.splitDir, ds, sub), sub) sList = [f for idx, f in sorted(tmpList.items())] # check file existence wFile = "%s/waveSkimDS%d_%d.root" % (dsi.waveDir, ds, sub) if len(sList) == 0 or not os.path.isfile(wFile): print( "Files not found! len(split): %d, wave file:" % len(sList), wFile) continue # check file sizes - result: wave files are slightly larger. weird. # I guess ROOT has some kinda overhead for larger files? wSize = os.stat(wFile).st_size / 1e6 sSize = sum([os.stat(f).st_size for f in sList]) / 1e6 # critical check - number of entries match, and TCut is the same f1 = TFile(wFile) t1 = f1.Get("skimTree") wCut = f1.Get("theCut").GetTitle() nEnt = t1.GetEntries() f1.Close() nEntS = 0 for f in sList: f2 = TFile(f) t2 = f2.Get("skimTree") sCut = f2.Get("theCut").GetTitle() nEntS += t2.GetEntries() f2.Close() if nEnt != nEntS: print( "Error: nEntries don't match! ds %d sub %d size(wave) %.2f size(split) %.2f nEnt(wave) %d nEnt(split) %d" % (ds, sub, wSize, sSize, nEnt, nEntS)) if wCut != sCut: print( "Error: TCut doesn't match!\nwaveFile:%s\nsplitFile:%s" % (wCut, sCut)) if verbose: print("%d/%d DS-%d-%d nEnt %d" % (sub, dsMap[ds] + 1, ds, sub, nEnt)) # cal else: dsMap = cal.GetKeys() for key in dsMap: ds = int(key[2]) if skipDS6Cal and ds == 6: continue for sub in range(cal.GetIdxs(key))[:fLimit]: cRuns = cal.GetCalList(key, sub) for run in cRuns: tmpList = dsi.getSplitList( "%s/splitSkimDS%d_run%d*" % (dsi.calSplitDir, ds, run), run) sList = [f for idx, f in sorted(tmpList.items())] # check file existence wFile = "%s/waveSkimDS%d_run%d.root" % (dsi.calWaveDir, ds, run) if len(sList) == 0 or not os.path.isfile(wFile): print( "Files not found! len(split): %d, wave file:" % len(sList), wFile) continue # check file sizes - result: wave files are slightly larger. weird. # i guess ROOT has some kinda overhead for larger files? wSize = os.stat(wFile).st_size / 1e6 sSize = sum([os.stat(f).st_size for f in sList]) / 1e6 # critical check - number of entries match, and TCut is the same f1 = TFile(wFile) t1 = f1.Get("skimTree") wCut = f1.Get("theCut").GetTitle() nEnt = t1.GetEntries() f1.Close() nEntS = 0 for f in sList: f2 = TFile(f) t2 = f2.Get("skimTree") sCut = f2.Get("theCut").GetTitle() nEntS += t2.GetEntries() f2.Close() if nEnt != nEntS: print( "Error: nEntries don't match! ds %d sub %d size(wave) %.2f size(split) %.2f nEnt(wave) %d nEnt(split) %d" % (ds, sub, wSize, sSize, nEnt, nEntS)) if wCut != sCut: print( "Error: TCut doesn't match!\nwaveFile:%s\nsplitFile:%s" % (wCut, sCut)) if verbose: print("%d/%d DS-%d-c%d run %d nEnt %d" % (sub, cal.GetIdxs(key), ds, sub, run, nEnt))
def scanRunsRise(ds, key, mod, cIdx): from ROOT import TFile, TTree rLim, eLim = 4, 250 print("Limiting to", rLim, "runs and a", eLim, "keV hit upper limit.") # load file and channel list fileList = [] calRuns = cal.GetCalList( key, cIdx, runLimit=rLim) # should not need much for riseNoise for run in calRuns: latList = dsi.getSplitList( "%s/latSkimDS%d_run%d*" % (dsi.calLatDir, ds, run), run) tmpList = [f for idx, f in sorted(latList.items())] fileList.extend(tmpList) chList = det.getGoodChanList(ds) print( "Scanning DS:%d calIdx %d mod %d key %s nFiles:%d" % (ds, cIdx, mod, key, len(fileList)), time.strftime('%X %x %Z')) outFile = "%s/rise_%s_c%d.npz" % (dsi.effDir, key, cIdx) print("Saving output in:", outFile) # this is what we'll output for every calIdx hitE, chan, rise = [], [], [] # loop over LAT cal files scanStart = time.time() prevRun = 0 evtCtr, totCtr, totRunTime = 0, 0, 0 for iF, f in enumerate(fileList): print("%d/%d %s" % (iF, len(fileList), f)) tf = TFile(f) tt = tf.Get("skimTree") tt.GetEntry(0) run = tt.run if run != prevRun: calIdx = cal.GetCalIdx(key, run) start = tt.startTime_s stop = tt.stopTime_s runTime = stop - start if runTime < 0 or runTime > 9999: print("run time error, run", run, "start", start, "stop") else: totRunTime += runTime # find thresholds for this run, # to calculate sumET and mHT in the loop. n = tt.Draw("channel:threshKeV:threshSigma", "", "goff") thrC, thrM, thrS = tt.GetV1(), tt.GetV2(), tt.GetV3() tmpThresh = {} for i in range(n): if thrC[i] not in chList: continue if thrC[i] in tmpThresh.keys(): continue if thrM[i] < 9999: thrK = thrM[i] + 3 * thrS[i] tmpThresh[thrC[i]] = [run, thrM[i], thrS[i], thrK] for ch in chList: if ch not in tmpThresh.keys(): tmpThresh[ch] = [-1, -1, -1, -1] prevRun = run # continue # loop over tree for iE in range(tt.GetEntries()): tt.GetEntry(iE) if tt.EventDC1Bits != 0: continue # totCtr += 1 n = tt.channel.size() chTmp = np.asarray([tt.channel.at(i) for i in range(n)]) idxRaw = [ i for i in range(tt.channel.size()) if tt.channel.at(i) in chList ] hitERaw = np.asarray([tt.trapENFCal.at(i) for i in idxRaw]) # get indexes of hits above threshold (use thresholds from THIS CAL RUN) idxList = [ i for i in range(tt.channel.size()) if tt.channel.at(i) in chList and tt.trapENFCal.at(i) > tmpThresh[tt.channel.at(i)][3] and 0.7 < tt.trapENFCal.at(i) < eLim ] # save riseNoise data for i in idxList: hitE.append(tt.trapENFCal.at(i)) chan.append(tt.channel.at(i)) rise.append(tt.riseNoise.at(i)) print("done.") hitE, chan, rise = np.asarray(hitE), np.asarray(chan), np.asarray(rise) print(len(hitE), 'total entries') for ch in chList: idx = np.where(chan == ch) idx2 = np.where(hitE[idx] < 10) print(ch, "nTot", len(hitE[idx]), "nCts under 10 keV:", len(hitE[idx2]), "nCts<10/0.5 keV: ", len(hitE[idx2]) / 20) np.savez(outFile, hitE, chan, rise) print("Done:", time.strftime('%X %x %Z'), ", %.2f sec/file." % ((time.time() - scanStart) / len(fileList)))
def runLAT(dsNum, subNum=None, runNum=None, calList=[]): """ ./job-panda.py [-q] -lat (-ds dsNum) (-sub dsNum subNum) (-run dsNum subNum) [-cal] Runs LAT on splitSkim output. Does not combine output files back together. """ bkg = dsi.BkgInfo() # bg if not calList: dsMap = bkg.dsMap() # -ds if subNum == None and runNum == None: for subNum in range(dsMap[dsNum] + 1): files = dsi.getSplitList( "%s/splitSkimDS%d_%d*" % (dsi.splitDir, dsNum, subNum), subNum) for idx, inFile in sorted(files.items()): outFile = "%s/latSkimDS%d_%d_%d.root" % (dsi.latDir, dsNum, subNum, idx) job = "./lat.py -b -r %d %d -p %s %s" % (dsNum, subNum, inFile, outFile) # jspl = job.split() # make SUPER sure stuff is matched # print(jspl[3],jspl[4],jspl[6].split("/")[-1],jspl[7].split("/")[-1]) if useJobQueue: sh("%s >& ./logs/lat-ds%d-%d-%d.txt" % (job, dsNum, subNum, idx)) else: sh("""%s '%s'""" % (jobStr, job)) # -sub elif runNum == None: files = dsi.getSplitList( "%s/splitSkimDS%d_%d*" % (dsi.splitDir, dsNum, subNum), subNum) for idx, inFile in sorted(files.items()): outFile = "%s/latSkimDS%d_%d_%d.root" % (dsi.latDir, dsNum, subNum, idx) job = "./lat.py -b -r %d %d -p %s %s" % (dsNum, subNum, inFile, outFile) if useJobQueue: sh("%s >& ./logs/lat-ds%d-run%d-%d.txt" % (job, dsNum, subNum, idx)) else: sh("""%s '%s'""" % (jobStr, job)) # -run elif subNum == None: files = dsi.getSplitList( "%s/splitSkimDS%d_run%d*" % (dsi.splitDir, dsNum, runNum), runNum) for idx, inFile in sorted(files.items()): outFile = "%s/latSkimDS%d_run%d_%d.root" % (dsi.latDir, dsNum, runNum, idx) job = "./lat.py -b -r %d %d -p %s %s" % (dsNum, runNum, inFile, outFile) if useJobQueue: sh("%s >& ./logs/lat-ds%d-run%d-%d.txt" % (job, dsNum, runNum, idx)) else: sh("""%s '%s'""" % (jobStr, job)) # cal else: dsRanges = bkg.dsRanges() for run in calList: for key in dsRanges: if dsRanges[key][0] <= run <= dsRanges[key][1]: dsNum = key files = dsi.getSplitList( "%s/splitSkimDS%d_run%d*" % (dsi.calSplitDir, dsNum, run), run) for idx, inFile in sorted(files.items()): outFile = "%s/latSkimDS%d_run%d_%d.root" % (dsi.calLatDir, dsNum, run, idx) job = "./lat.py -b -f %d %d -p %s %s" % (dsNum, run, inFile, outFile) if useJobQueue: sh("%s >& ./logs/lat-ds%d-run%d-%d.txt" % (job, dsNum, run, idx)) else: sh("""%s '%s'""" % (jobStr, job))
def scanRuns(ds, key, mod, cIdx): from ROOT import TFile, TTree # load file and channel list fileList = [] calRuns = cal.GetCalList(key,cIdx) for run in calRuns: latList = dsi.getSplitList("%s/latSkimDS%d_run%d*" % (dsi.calLatDir, ds, run), run) tmpList = [f for idx, f in sorted(latList.items())] fileList.extend(tmpList) chList = det.getGoodChanList(ds) print("Scanning DS:%d calIdx %d mod %d key %s nFiles:%d" % (ds, cIdx, mod, key, len(fileList)), time.strftime('%X %x %Z')) outFile = "%s/eff_%s_c%d.npz" % (dsi.effDir, key, cIdx) print("Saving output in:",outFile) # declare the output stuff evtIdx, evtSumET, evtHitE, evtChans, evtSlo, evtRise = [], [], [], [], [], [] thrCal = {ch:[] for ch in chList} fLo, fHi, fpb = -200, 400, 1 nbf = int((fHi-fLo)/fpb)+1 fSloSpec = {ch:[np.zeros(nbf) for i in range(3)] for ch in chList} # 0-10, 10-200, 236-240 # loop over LAT cal files scanStart = time.time() prevRun = 0 evtCtr, totCtr, totRunTime = 0, 0, 0 for iF, f in enumerate(fileList): print("%d/%d %s" % (iF, len(fileList), f)) tf = TFile(f) tt = tf.Get("skimTree") # histogram these for each file (and then add to total) fs10 = {ch:[] for ch in chList} fs200 = {ch:[] for ch in chList} fs238 = {ch:[] for ch in chList} # increment the run time and fill the output dict of thresholds tt.GetEntry(0) run = tt.run if run!=prevRun: start = tt.startTime_s stop = tt.stopTime_s runTime = stop-start if runTime < 0 or runTime > 9999: print("run time error, run",run,"start",start,"stop") else: totRunTime += runTime # find thresholds for this run, # to calculate sumET and mHT in the loop. # save them into the output dict (so we can compare w/ DB later). n = tt.Draw("channel:threshKeV:threshSigma","","goff") chan, thrM, thrS = tt.GetV1(), tt.GetV2(), tt.GetV3() tmpThresh = {} for i in range(n): if chan[i] not in chList: continue if chan[i] in tmpThresh.keys(): continue if thrM[i] < 9999: thrK = thrM[i] + 3*thrS[i] tmpThresh[chan[i]] = [run,thrM[i],thrS[i],thrK] for ch in chList: if ch not in tmpThresh.keys(): tmpThresh[ch] = [-1,-1,-1,-1] # fill the output dict for ch in tmpThresh: thrCal[ch].append(tmpThresh[ch]) # [run, thrM, thrS, thrK] prevRun = run # continue # loop over tree for iE in range(tt.GetEntries()): tt.GetEntry(iE) if tt.EventDC1Bits != 0: continue totCtr += 1 n = tt.channel.size() chTmp = np.asarray([tt.channel.at(i) for i in range(n)]) idxRaw = [i for i in range(tt.channel.size()) if tt.channel.at(i) in chList] hitERaw = np.asarray([tt.trapENFCal.at(i) for i in idxRaw]) # get indexes of hits above threshold (use thresholds from THIS CAL RUN) idxList = [i for i in range(tt.channel.size()) if tt.channel.at(i) in chList and tt.trapENFCal.at(i) > tmpThresh[tt.channel.at(i)][3] and 0.7 < tt.trapENFCal.at(i) < 9999 ] hitE = np.asarray([tt.trapENFCal.at(i) for i in idxList]) # calculate mHT and sumET mHT, sumET = len(hitE), sum(hitE) # save fitSlo data for 0-10 and 10-200 kev ranges for each channel for i in idxList: en = tt.trapENFCal.at(i) ch = tt.channel.at(i) fs = tt.fitSlo.at(i) if fLo < fs < fHi: if 0 < en < 10: fs10[ch].append(fs) if 10 < en < 200: fs200[ch].append(fs) if 236 < en < 240: fs238[ch].append(fs) # Save m2s238 events to output, skip everything else if mHT!=2: continue if not 237.28 < sumET < 239.46: continue hitChans = np.asarray([tt.channel.at(i) for i in idxList]) hitSlo = np.asarray([tt.fitSlo.at(i) for i in idxList]) hitRise = np.asarray([tt.riseNoise.at(i) for i in idxList]) evtIdx.append([run,iE]) evtSumET.append(sumET) evtHitE.append(hitE) evtChans.append(hitChans) evtSlo.append(hitSlo) evtRise.append(hitRise) evtCtr += 1 # fill the fitSlo histograms w/ the events from this file for ch in chList: x, h1 = wl.GetHisto(fs10[ch],fLo,fHi,fpb) x, h2 = wl.GetHisto(fs200[ch],fLo,fHi,fpb) x, h3 = wl.GetHisto(fs238[ch],fLo,fHi,fpb) fSloSpec[ch][0] = np.sum([fSloSpec[ch][0], h1], axis=0) fSloSpec[ch][1] = np.sum([fSloSpec[ch][1], h2], axis=0) fSloSpec[ch][2] = np.sum([fSloSpec[ch][2], h3], axis=0) # n1 = np.sum(fSloSpec[ch][0]) # n2 = np.sum(fSloSpec[ch][1]) # n3 = np.sum(fSloSpec[ch][2]) # print("ch:%d n10 %d n200 %d n238 %d" % (ch, n1, n2, n3)) # get average threshold for each channel in this file list thrFinal = {chan:[] for chan in thrCal} for chan in thrCal: thrVals = [] for iT in range(len(thrCal[chan])): run, thrM, thrS, thrK = thrCal[chan][iT] # print("%d %d %.3f %.3f %.3f" % (chan,run,thrM,thrS,thrK)) if thrK > -1: thrVals.append(thrK) thrVals = np.asarray(thrVals) thrAvg = np.mean(thrVals) thrDev = np.std(thrVals) # print("%d %.3f %.3f" % (chan, thrAvg, thrDev)) thrFinal[chan] = [thrAvg,thrDev] # print to screen the final thresholds, stdev, and an error message if necessary print("Detector Thresholds:") for chan in sorted(thrFinal): thKeV = thrFinal[chan][0] thE = thrFinal[chan][1] errString = "" if thE/thKeV > 0.5: errString = ">50pct error: thE/thKeV=%.2f" % (thE/thKeV) if thKeV > 2: errString = ">2kev" print("%d %.3f %.3f %s" % (chan,thKeV,thE,errString)) # save output np.savez(outFile, evtIdx, evtSumET, evtHitE, evtChans, thrCal, thrFinal, evtCtr, totCtr, totRunTime, fSloSpec, x, evtSlo, evtRise) # output stats print("Done:",time.strftime('%X %x %Z'),", %.2f sec/file." % ((time.time()-scanStart)/len(fileList))) print(" m2s238 evts:",evtCtr, "total evts:",totCtr, "runTime:",totRunTime)