Ejemplo n.º 1
0
def getThreshDB():
    """ ./chan-sel.py -getThreshDB
    Just an example of getting all threshold values (accounting for sub-bIdx's) from the DB.
    """
    calDB = db.TinyDB("%s/calDB-v2.json" % dsi.latSWDir)
    pars = db.Query()
    bkg = dsi.BkgInfo()

    # loop over datasets
    # for ds in [0,1,2,3,4,5,6]:
    for ds in [6]:
        dsNum = ds if isinstance(ds, int) else 5
        goodChans = det.getGoodChanList(dsNum)

        for bkgIdx in bkg.getRanges(ds):

            # ==== loop over sub-ranges (when TF was run) ====
            rFirst, rLast = bkg.getRanges(ds)[bkgIdx][0], bkg.getRanges(ds)[bkgIdx][-1]

            subRanges = bkg.GetSubRanges(ds,bkgIdx)
            if len(subRanges) == 0: subRanges.append((rFirst, rLast))

            for subIdx, (runLo, runHi) in enumerate(subRanges):

                key = "thresh_ds%d_bkg%d_sub%d" % (dsNum, bkgIdx, subIdx)

                thD = dsi.getDBRecord(key, False, calDB, pars)
                print(key)
                for ch in thD:
                    print(ch,":",thD[ch])
                print("")
Ejemplo n.º 2
0
def dumpSettings(ds):

    print("Dumping settings for DS:%d" % ds)

    # detCH and pMons
    # f = np.load("./data/ds%d_detChans.npz" % ds)
    # detCH, pMons = f['arr_0'].item(), f['arr_1']
    #
    # print("Pulser monitor channels:")
    # print(pMons)

    # print("Detector analysis channels:")
    # for key in detCH:
    #     print(key, detCH[key])

    # get HV and TF vals from DB with a regex
    calDB = db.TinyDB("%s/calDB-v2.json" % dsi.latSWDir)
    pars = db.Query()
    #
    # print("DB Threshold values:")
    # thrList = calDB.search(pars.key.matches("trapThr_ds%d" % ds))
    # for idx in range(len(thrList)):
    #     key = thrList[idx]['key']
    #     detTH = thrList[idx]['vals']
    #     print(key)
    #     # for d in detTH:
    #         # print(d, detTH[d])

    # print("DB HV values:")
    # hvList = calDB.search(pars.key.matches("hvBias_ds%d" % ds))
    # for idx in range(len(hvList)):
    #     key = hvList[idx]['key']
    #     detHV = hvList[idx]['vals']
    #     print(key)
    #     # for d in detHV:
    #         # print(d, detHV[d])

    # get a particular key
    dbKeyTH = "trapThr_ds0_m1_c23"
    dbValTH = dsi.getDBRecord(dbKeyTH, calDB=calDB, pars=pars)

    # debug: print the values
    for val in sorted(dbValTH):
        if len(dbValTH[val]) > 0:
            print(val, dbValTH[val])
Ejemplo n.º 3
0
def riseStability_v2():

    dsList = [0, 1, 2, 3, 4, 5]
    # dsList = [3]

    calDB = db.TinyDB('%s/calDB-v2.json' % (dsi.latSWDir))
    pars = db.Query()

    for ds in dsList:

        for calKey in cal.GetKeys(ds):
            chList = det.getGoodChanList(ds)
            mod = -1
            if "m1" in calKey:
                mod = 1
                chList = [ch for ch in chList if ch < 1000]
            if "m2" in calKey:
                mod = 2
                chList = [ch for ch in chList if ch > 1000]
            nCal = cal.GetNCalIdxs(ds, mod)

            # load DB vals : {calIdx: {ch:[a,b,c99,c,fitPass] for ch in goodList} }}
            dbVals = {}
            for ci in range(nCal):
                dbVals[ci] = dsi.getDBRecord(
                    "riseNoise_%s_ci%d_pol" % (calKey, ci), False, calDB, pars)

            # average a,b,c for ALL detectors, all calIdx's
            allA, allB, allC = [], [], []
            for ci in range(nCal):
                for ch in dbVals[ci]:
                    if dbVals[ci][ch] is not None:
                        allA.append(dbVals[ci][ch][0])
                        allB.append(dbVals[ci][ch][1])
                        allC.append(dbVals[ci][ch][2])
            avgA, stdA = np.mean(allA), np.std(allA)
            avgB, stdB = np.mean(allB), np.std(allB)
            avgC, stdC = np.mean(allC), np.std(allC)

            # MWE - don't delete me
            # fig = plt.figure()
            # cmap = plt.cm.get_cmap('tab20',len(chList)+1)
            # for i, ch in enumerate(chList):
            #     x = [ci for ci in range(nCal) if dbVals[ci][ch] is not None]
            #     y = [dbVals[ci][ch][0]/avgA for ci in range(nCal) if dbVals[ci][ch] is not None]
            #     plt.plot(x, y, ".", ms=10, c=cmap(i), label=ch)
            # plt.axhline(avgA, c='r', alpha=0.5, label='avgA %.2e' % avgA)
            # plt.xlabel("calIdx", ha='right', x=1)
            # plt.legend(loc='best', ncol=4, fontsize=8)

            print("plotting", calKey)

            fig = plt.figure(figsize=(18, 6))
            p1 = plt.subplot(131)
            p2 = plt.subplot(132)
            p3 = plt.subplot(133)
            cmap = plt.cm.get_cmap('tab20', len(chList) + 1)

            chk = {'a': [-500, 2000], 'b': [0, 200], 'c': [50, 200]}
            checkList = []

            for i, ch in enumerate(chList):
                x = [ci for ci in range(nCal) if dbVals[ci][ch] is not None]
                yA, yB, yC = [], [], []
                for ci in range(nCal):
                    if dbVals[ci][ch] is not None:
                        valA = 100 * dbVals[ci][ch][0] / avgA
                        valB = 100 * dbVals[ci][ch][1] / avgB
                        valC = 100 * dbVals[ci][ch][2] / avgC  # this is c99
                        yA.append(valA)
                        yB.append(valB)
                        yC.append(valC)
                        if not (chk['a'][0] < valA < chk['a'][1]
                                and chk['b'][0] < valB < chk['b'][1]
                                and chk['c'][0] < valC < chk['c'][1]):
                            checkList.append([ci, ch, valC])

                yA = [
                    100 * dbVals[ci][ch][0] / avgA for ci in range(nCal)
                    if dbVals[ci][ch] is not None
                ]
                yB = [
                    100 * dbVals[ci][ch][1] / avgB for ci in range(nCal)
                    if dbVals[ci][ch] is not None
                ]
                yC = [
                    100 * dbVals[ci][ch][2] / avgC for ci in range(nCal)
                    if dbVals[ci][ch] is not None
                ]
                p1.plot(x, yA, ".", ms=10, c=cmap(i), label=ch)
                p2.plot(x, yB, ".", ms=10, c=cmap(i))
                p3.plot(x, yC, ".", ms=10, c=cmap(i))

            p1.axhline(100, c='g', alpha=0.5, label='avgA %.2e' % avgA)
            p1.axhline(chk['a'][0],
                       c='r',
                       alpha=0.5,
                       label='bad:%d' % chk['a'][0])
            p1.axhline(chk['a'][1],
                       c='r',
                       alpha=0.5,
                       label='bad:%d' % chk['a'][1])
            p1.set_xlabel("calIdx", ha='right', x=1)
            p1.set_ylabel("Pct.Deviation from Avg.", ha='right', y=1)
            p1.legend(loc='best', ncol=4, fontsize=10)

            p2.axhline(100, c='g', alpha=0.5, label='avgB %.2e' % avgB)
            p2.axhline(chk['b'][0],
                       c='r',
                       alpha=0.5,
                       label='bad:%d' % chk['b'][0])
            p2.axhline(chk['b'][1],
                       c='r',
                       alpha=0.5,
                       label='bad:%d' % chk['b'][1])
            p2.set_xlabel("calIdx", ha='right', x=1)
            p2.legend(loc='best', fontsize=10)

            p3.axhline(100, c='g', alpha=0.5, label='avgC %.2f' % avgC)
            p3.axhline(chk['c'][0],
                       c='r',
                       alpha=0.5,
                       label='bad:%d' % chk['c'][0])
            p3.axhline(chk['c'][1],
                       c='r',
                       alpha=0.5,
                       label='bad:%d' % chk['c'][1])
            p3.set_xlabel("calIdx", ha='right', x=1)
            p3.legend(loc='best', fontsize=10)

            plt.tight_layout()
            plt.savefig("../plots/rise-stability-%s.png" % calKey)

            if len(checkList) == 0:
                print("No bad channels found!")
                return

            # ===========================
            # For channels that look suspicious, print a diagnostic plot
            # so you can justify rejecting them

            # checkList = [[0,578]] # just print an example of a good plot

            f = np.load("../data/lat2-rise-%s.npz" % calKey)
            riseHist = f['arr_0'].item()

            xLo, xHi, xpb = 0, 250, 1
            yLo, yHi, ypb = -10, 10, 0.05
            nbx = int((xHi - xLo) / xpb)
            nby = int((yHi - yLo) / ypb)
            _, xe, ye = np.histogram2d([], [],
                                       bins=[nbx, nby],
                                       range=[[xLo, xHi], [yLo, yHi]])

            fig = plt.figure(figsize=(18, 6))
            p1 = plt.subplot(131)
            p2 = plt.subplot(132)
            p3 = plt.subplot(133)

            print("Removal candidates:")
            for ci, ch, c99 in checkList:
                cpd = det.getChanCPD(ds, ch)
                print("ci", ci, "ch", ch, "cpd", cpd)

                if riseHist[ci][ch] is None:
                    print("No data found! ci, ch", ci, ch)
                    continue

                hRise = riseHist[ci][ch]

                p1.cla()
                p1.set_aspect('auto')
                x, y = np.meshgrid(xe, ye)
                p1.pcolormesh(x, y, hRise.T, norm=LogNorm())
                p1.set_xlabel("Energy (keV)", ha='right', x=1)
                p1.set_ylabel("riseNoise (Shifted)", ha='right', y=1)
                p1.plot(np.nan,
                        np.nan,
                        '.w',
                        label='cIdx %d ch%d C%sP%sD%s' % (ci, ch, *cpd))
                p1.legend(loc=1)

                p2.cla()
                pE = np.sum(hRise, axis=1)
                bE = xe[:-1] + 0.5 * (xe[1] - xe[0])  # center the bin edges
                p2.plot(bE, pE, "b", ls='steps')
                p2.set_xlabel("Energy (keV)", ha='right', x=1)

                p3.cla()
                pR = np.sum(hRise, axis=0)
                bR = ye[:-1] + 0.5 * (ye[1] - ye[0])
                p3.plot(bR, pR, "b", ls='steps')
                p3.axvline(c99 / 100, c='r', label='c99:%.2f' % (c99 / 100.))
                p3.set_xlabel("riseNoise", ha='right', x=1)

                plt.tight_layout()
                plt.savefig("../plots/rise-%s-ci%d-ch%d.png" %
                            (calKey, ci, ch))
Ejemplo n.º 4
0
def fillDetInfo():
    """ ./chan-sel.py -fill
    Summarize the results of getSettings in LAT/data/runSettings-v2.npz.
    Create a file accessible by the DetInfo object in dsi.py
    It contains dictionaries of TRAP threshold, HV settings,
    detector/channel mapping, and pulser monitor lists,
    that span an entire dataset (not broken down into separate calIdx's.)
    # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }
    """
    # 1. maps of analysis channel to cpd, and pulser monitor channels
    detCH, pMons = {}, {}
    for ds in [0, 1, 2, 3, 4, 5, 6]:
        f = np.load("%s/data/ds%d_detChans.npz" % (os.environ['LATDIR'], ds))
        detCH[ds] = f['arr_0'].item()
        pMons[ds] = f['arr_1'].item()

    # 2. maps of HV and TRAP threshold settings are stored in the DB.
    # make them global, and move them to the runSettings file.
    # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }
    detHV, detTH = {}, {}

    # load all possible values, as in settingsMgr
    detDB = db.TinyDB("%s/calDB-v2.json" % dsi.latSWDir)
    detPars = db.Query()
    cal = dsi.CalInfo()
    for ds in [0, 1, 2, 3, 4, 5, 6]:
        # for ds in [3]:
        print("scanning ds", ds)
        detTH[ds] = {}
        detHV[ds] = {}
        for key in cal.GetKeys(ds):
            mod = -1
            if "m1" in key: mod = 1
            if "m2" in key: mod = 2
            for cIdx in range(cal.GetIdxs(key)):

                # load the DB records
                dbKeyTH = "trapThr_%s_c%d" % (key, cIdx)
                dbValTH = dsi.getDBRecord(dbKeyTH, calDB=detDB, pars=detPars)

                dbKeyHV = "hvBias_%s_c%d" % (key, cIdx)
                dbValHV = dsi.getDBRecord(dbKeyHV, calDB=detDB, pars=detPars)

                # debug: print the record
                # for val in sorted(dbValTH):
                # if len(dbValTH[val])>0:
                # print(val, dbValTH[val])
                # return

                # fill the first value
                if len(detTH[ds]) == 0:
                    detTH[ds] = dbValTH
                    detHV[ds] = dbValHV
                    continue

                # check for new threshold values.
                for cpd in detTH[ds]:
                    nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])

                    # detector just came online
                    if nOld == 0 and nNew > 0:
                        detTH[ds][cpd] = dbValTH[cpd]
                        continue
                    # detector still offline
                    if nOld == 0 and nNew == 0:
                        continue
                    # detector just went offline
                    if nOld > 0 and nNew == 0:
                        continue

                    # check last run/trap pair against each new one
                    prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][
                        -1][1]
                    for val in dbValTH[cpd]:
                        thisRun, thisTH = val[0], val[1]
                        if thisTH != prevTH:
                            detTH[ds][cpd].append([thisRun, thisTH])
                        prevTH = thisTH

                # check for new HV values.
                for cpd in detHV[ds]:

                    nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])

                    # detector just came online
                    if nOld == 0 and nNew > 0:
                        detHV[ds][cpd] = dbValHV[cpd]
                        continue
                    # detector still offline
                    if nOld == 0 and nNew == 0:
                        continue
                    # detector just went offline
                    if nOld > 0 and nNew == 0:
                        continue

                    # check last run/trap pair against each new one
                    prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][
                        -1][1]
                    for val in dbValHV[cpd]:
                        thisRun, thisHV = val[0], val[1]
                        if thisHV != prevHV:
                            print(
                                "found HV diff.  cpd %d  prev %dV (run %d)  new %dV (run %d)"
                                % (cpd, prevHV, prevRun, thisHV, thisRun))
                            detHV[ds][cpd].append([thisRun, thisHV])
                        prevHV = thisHV

                # return

    # # load the old file and compare
    # # GOAL: improve on this file.
    # # f = np.load("%s/data/runSettings.npz" % dsi.latSWDir)
    # # detHVOld = f['arr_0'].item()
    # # detTHOld = f['arr_1'].item()
    # # detCHOld = f['arr_2'].item()
    # # pMonsOld = f['arr_3'].item()
    #
    # ds = 3
    # print("old results, ds",ds)
    # for cpd in sorted(detTHOld[ds]):
    #     if cpd!="122":continue
    #     if len(detTHOld[ds][cpd]) > 0:
    #         print(cpd, detTHOld[ds][cpd])
    #
    # # for ds in [0,1,2,3,4,5,6]:
    # print("thresh results, ds:",ds)
    # for cpd in sorted(detTH[ds]):
    #     # if cpd!=122:continue
    #     if len(detTH[ds][cpd]) > 0:
    #         print(cpd, detTH[ds][cpd])

    np.savez("%s/data/runSettings-v2.npz" % dsi.latSWDir, detHV, detTH, detCH,
             pMons)
Ejemplo n.º 5
0
def combineSloEff(makePlots=False, writeDB=False, runMC=False, seedNum=1):
    """
    Inputs: m2s238 data (lat2-eff-data-95.npz, from lat2::setSloCut)
    Outputs:
    - npz file w/ efficiency curves
    - combined weibull parameters for enr/nat and toy MC data
    Then in lat-expo:
    - make the efficiency curve per dataset and
      combine w/ the trigger & riseNoise efficiencies
    """

    # load lat2 data
    f = np.load(os.environ['LATDIR'] + '/data/lat2-eff-data-95.npz')
    effData = f['arr_0'].item()
    detList = effData.keys()
    xVals = effData['112'][7][1:]

    # combine individual detector m2s238 histos into overall enr and nat histos
    hPassNat, hAllNat = np.zeros(len(xVals)), np.zeros(len(xVals))
    hPassEnr, hAllEnr = np.zeros(len(xVals)), np.zeros(len(xVals))
    eff5, eff10, cts5, cts10 = {}, {}, {}, {}

    for det in detList:
        if det in skipList:
            continue

        # NOTE: detectors w/ more counts contribute more
        if detInfo.isEnr(det):
            hPassEnr += effData[det][4][1:]
            hAllEnr += effData[det][6][1:]
        else:
            hPassNat += effData[det][4][1:]
            hAllNat += effData[det][6][1:]

        # save efficiencies at 5 and 10 kev for plot 1
        eff5[det] = effData[det][4][4] / effData[det][6][4]
        eff10[det] = effData[det][4][9] / effData[det][6][9]
        cts5[det] = effData[det][4][4]
        cts10[det] = effData[det][4][9]

    hEffEnr = np.nan_to_num(hPassEnr / hAllEnr)
    hEffNat = np.nan_to_num(hPassNat / hAllNat)

    # calculate CI's for each histogram bin
    enrCILo, enrCIHi = proportion.proportion_confint(hPassEnr,
                                                     hAllEnr,
                                                     alpha=0.1,
                                                     method='beta')
    natCILo, natCIHi = proportion.proportion_confint(hPassNat,
                                                     hAllNat,
                                                     alpha=0.1,
                                                     method='beta')

    # ---- fit overall enr/nat efficiency to a weibull function ----
    fitBnd = ((1, -20, 0, 0.5), (np.inf, np.inf, np.inf, 0.99))
    fitInit = [1, -1.6, 2.75, 0.95]
    poptNat, pcovNat = curve_fit(wl.weibull,
                                 xVals,
                                 hEffNat,
                                 p0=fitInit,
                                 bounds=fitBnd)
    poptEnr, pcovEnr = curve_fit(wl.weibull,
                                 xVals,
                                 hEffEnr,
                                 p0=fitInit,
                                 bounds=fitBnd)
    effNat = wl.weibull(xVals, *poptNat)
    effEnr = wl.weibull(xVals, *poptEnr)

    # ---- fitSlo efficiency uncertainty, method 1 ----
    # use the diagonal as the uncertainty (ignoring correlations)

    zVal = 1.645
    sigmaEnr = np.sqrt(np.diagonal(pcovEnr))
    sigmaNat = np.sqrt(np.diagonal(pcovNat))

    effEnrHi = wl.weibull(xVals, *(np.array(poptEnr) + zVal * sigmaEnr))
    effEnrLo = wl.weibull(xVals, *(np.array(poptEnr) - zVal * sigmaEnr))
    effNatHi = wl.weibull(xVals, *(np.array(poptNat) + zVal * sigmaNat))
    effNatLo = wl.weibull(xVals, *(np.array(poptNat) - zVal * sigmaNat))

    # ---- fitSlo efficiency uncertainty, method 2 ----
    # https://stats.stackexchange.com/questions/135749/\
    # confidence-intervals-of-fitted-weibull-survival-function

    effEnrHi2 = np.exp(np.log(effEnr) * np.exp(zVal / np.sqrt(hAllEnr)))
    effEnrLo2 = np.exp(np.log(effEnr) * np.exp(-1 * zVal / np.sqrt(hAllEnr)))
    effNatHi2 = np.exp(np.log(effNat) * np.exp(zVal / np.sqrt(hAllNat)))
    effNatLo2 = np.exp(np.log(effNat) * np.exp(-1 * zVal / np.sqrt(hAllNat)))

    # ---- run toy MC to get FINAL fitSlo efficiency uncertainty ----
    if runMC:

        np.random.seed(seedNum)

        xLo, xHi = 0, 200
        xCoarse = np.arange(xLo, xHi, 0.1)
        hEnr, hNat = [], []  # store toymc histo efficiencies

        nMC = 10000
        for i in range(nMC):

            if i % 100 == 0:
                wl.update_progress(float(i) / nMC)

            # vary the spectra randomly (toy MC method) and re-fit each one
            ePass = np.random.poisson(hPassEnr)
            nPass = np.random.poisson(hPassNat)
            eAll = np.random.poisson(hAllEnr)
            nAll = np.random.poisson(hAllNat)

            eEff = np.nan_to_num(ePass / eAll)
            nEff = np.nan_to_num(nPass / nAll)

            poptEnr, _ = curve_fit(wl.weibull,
                                   xVals,
                                   eEff,
                                   p0=fitInit,
                                   bounds=fitBnd)
            poptNat, _ = curve_fit(wl.weibull,
                                   xVals,
                                   nEff,
                                   p0=fitInit,
                                   bounds=fitBnd)

            effCoarseEnr = wl.weibull(xCoarse, *poptEnr)
            effCoarseNat = wl.weibull(xCoarse, *poptNat)

            hEnr.append(effCoarseEnr)
            hNat.append(effCoarseNat)

            # diagnostic plot -- don't delete
            # hScale = np.amax(hAllEnr)
            # plt.plot(xCoarse, effCoarseEnr, '-r')
            # plt.plot(xVals, hAllEnr / hScale, ls='steps', c='k', label='all m2s238 enr evts')
            # plt.plot(xVals, hPassEnr / hScale, ls='steps', c='b', label='orig passing')
            # plt.plot(xVals, ePass / hScale, ls='steps', c='m', label='toyMC variation')
            # plt.axvline(1, c='g', label="1 keV eff: {:.2f}".format(wl.weibull(1, *poptEnr)))
            # plt.xlabel("Energy (keV)", ha='right', x=1)
            # plt.xlim(0, 60)
            # plt.legend()
            # plt.tight_layout()
            # plt.savefig("./plots/toyMCEff.pdf")
            # exit()

        hEnr, hNat = np.vstack(hEnr), np.vstack(hNat)
        toyEffEnr = hEnr.mean(axis=0)
        toyEffNat = hNat.mean(axis=0)
        toyStdEnr = hEnr.std(axis=0)
        toyStdNat = hNat.std(axis=0)
        np.savez("./data/lat-toymc-eff.npz", toyEffEnr, toyEffNat, toyStdEnr,
                 toyStdNat)

        # save results into calDB
        if writeDB:
            dbKey = "fitSlo_Combined_m2s238_eff95"
            dbVals = {
                0: [*poptEnr, *sigmaEnr],  # 0: enr
                1: [*poptNat, *sigmaNat]
            }  # 1: nat
            print("Writing DB vals for key:", dbKey)
            # pprint(dbVals)
            dsi.setDBRecord({
                "key": dbKey,
                "vals": dbVals
            },
                            forceUpdate=True,
                            calDB=calDB,
                            pars=pars)
            pprint(dsi.getDBRecord(dbKey, False, calDB, pars))
            print("DB filled.")

    # ---------- make some plots ----------
    if makePlots:

        # 1.
        # individual detector efficiency at 5 & 10 keV, vs number of counts
        fig, (p0, p1) = plt.subplots(1, 2, figsize=(10, 5))

        nEnr = len([det for det in eff5 if detInfo.isEnr(det)])
        nNat = len(eff5) - nEnr
        cmapEnr = plt.cm.get_cmap('nipy_spectral', nEnr + 1)
        cmapNat = plt.cm.get_cmap('jet', nNat + 1)
        iEnr, iNat = 0, 0

        for det in eff5:
            if detInfo.isEnr(det):
                iEnr += 1
                p, idx, cm = p0, iEnr, cmapEnr
            else:
                iNat += 1
                p, idx, cm = p1, iNat, cmapNat

            p.plot([eff5[det], eff10[det]], [cts5[det], cts10[det]],
                   '-',
                   c=cm(idx),
                   lw=1,
                   label="C{}P{}D{}".format(*det))
            p.plot(eff5[det], cts5[det], 'v', ms=5, c=cm(idx))
            p.plot(eff10[det], cts10[det], 'o', ms=5, c=cm(idx))

        p0.plot(np.nan, 'kv', ms=5, label='5 keV')
        p0.plot(np.nan, 'ko', ms=5, label='10 keV')
        p1.plot(np.nan, 'kv', ms=5, label='5 keV')
        p1.plot(np.nan, 'ko', ms=5, label='10 keV')
        p0.legend(ncol=3, fontsize=8)
        p1.legend(ncol=3, fontsize=8)
        p0.set_xlabel("Enr. Efficiency", ha='right', x=1)
        p1.set_xlabel("Nat. Efficiency", ha='right', x=1)
        p0.set_ylabel("Counts (passing, m2s238)", ha='right', y=1)
        plt.tight_layout()
        plt.savefig("./plots/countsVsEff.pdf")
        plt.close()

        # 2.
        # individual and combined detector efficiencies
        fsD = dsi.getDBRecord("fitSlo_cpd_eff95", False, calDB, pars)

        fig, (p0, p1) = plt.subplots(1, 2, figsize=(10, 5))
        iEnr, iNat = 0, 0

        for det in eff5:
            if detInfo.isEnr(det):
                iEnr += 1
                p, idx, cm = p0, iEnr, cmapEnr
            else:
                iNat += 1
                p, idx, cm = p1, iNat, cmapNat

            wbPars = fsD[int(det)]
            c, loc, scale, amp = wbPars[3], wbPars[4], wbPars[5], wbPars[2]
            effDet = wl.weibull(xVals, c, loc, scale, amp)

            p.plot(xVals,
                   effDet,
                   alpha=0.4,
                   c=cm(idx),
                   lw=2,
                   label='C{}P{}D{}'.format(*det))

        p0.plot(xVals, effEnr, lw=4, color='k', label='Enr, Combined')
        p1.plot(xVals, effNat, lw=4, color='k', label='Nat, Combined')
        p0.legend(loc=4, ncol=3, fontsize=10)
        p1.legend(loc=4, ncol=3, fontsize=10)
        p0.set_xlabel("Energy (keV)", ha='right', x=1)
        p1.set_xlabel("Energy (keV)", ha='right', x=1)
        p0.set_ylabel("Efficiency", ha='right', y=1)
        plt.tight_layout()
        plt.savefig("./plots/effCombined.pdf")
        plt.close()

        # 3.
        # uncertainties on the combined efficiencies

        fig, (p0, p1) = plt.subplots(1, 2, figsize=(10, 5))

        # enriched
        p0.errorbar(xVals,
                    effEnr,
                    yerr=[hEffEnr - enrCILo, enrCIHi - hEffEnr],
                    color='k',
                    lw=1,
                    fmt='o',
                    capsize=2,
                    ms=3,
                    label="Enriched, Combined")

        # NOTE: method 1 swaps the high/low boundaries at the turning point
        # I.E. DO NOT USE!
        # p0.plot(xVals, effEnrHi, 'r-', lw=1, label="Method 1 (hi)")
        # p0.plot(xVals, effEnrLo, 'g-', lw=1, label="Method 1 (lo)")
        # p0.fill_between(xVals, effEnrLo, effEnrHi, color='b', alpha=0.5, label='Method 1')

        # NOTE: method 2 looks like the efficiency uncertainties are too small
        # I.E. DO NOT USE!
        # p0.plot(xVals, effEnrHi2, 'r-', lw=1, label="Method 2 (hi)")
        # p0.plot(xVals, effEnrLo2, 'g-', lw=1, label="Method 2 (lo)")
        # p0.fill_between(xVals, effEnrLo2, effEnrHi2, color='r', alpha=0.5, label='Method 2')

        # Method 3 - uncertainty from Toy MC results
        f = np.load("./data/lat-toymc-eff.npz")
        toyEffEnr, toyEffNat, toyStdEnr, toyStdNat = [f[k] for k in f]
        xLo, xHi = 0, 200
        xCoarse = np.arange(xLo, xHi, 0.1)

        p0.plot(xCoarse, toyEffEnr, c='r', lw=2, label='Toy MC Efficiency')
        effLo = toyEffEnr - zVal * toyStdEnr
        effHi = toyEffEnr + zVal * toyStdEnr
        p0.fill_between(xCoarse,
                        effLo,
                        effHi,
                        color='g',
                        alpha=0.4,
                        label='Toy MC Uncert.')

        # natural
        p1.errorbar(xVals,
                    effNat,
                    yerr=[hEffNat - natCILo, natCIHi - hEffNat],
                    color='k',
                    lw=1,
                    fmt='o',
                    capsize=2,
                    ms=3,
                    label="Natural, Combined")

        p1.plot(xCoarse, toyEffNat, c='r', lw=2, label="Toy MC Efficiency")
        effLo = toyEffNat - zVal * toyStdNat
        effHi = toyEffNat + zVal * toyStdNat
        p1.fill_between(xCoarse,
                        effLo,
                        effHi,
                        color='b',
                        alpha=0.3,
                        label='Toy MC Uncert.')

        p0.set_xlabel("Energy (keV)", ha='right', x=1)
        p1.set_xlabel("Energy (keV)", ha='right', x=1)
        p0.set_ylabel("Efficiency", ha='right', y=1)
        p0.legend()
        p1.legend()
        p0.set_xlim(0, 20)
        p0.set_ylim(0.4, 1)
        p1.set_xlim(0, 20)
        p1.set_ylim(0.4, 1)
        plt.tight_layout()
        plt.savefig("./plots/combinedEff.pdf")
Ejemplo n.º 6
0
def GPXSloEff(makePlots=False, writeDB=False):
    """
        This function does 2 things:
        1) It calculates the total mHL == 1 efficiency at the 238 keV peak (not really required for anything but Wenqin wanted it)
        2) It uses the mHL == 1 efficiency at the 238 keV peak and performs the sideband method of calculating the cut efficiency.

        The sideband method was suggested by Wenqin as a cross-check to the cut efficiency. Instead of breaking up the m2s238 event pairs and evaluating the efficiency, we keep the pairs in tact and perform a background subtraction on the energy window slightly below the peak. We then can back out the single detector efficiency at low energy by using the mHL==1 efficiency of the 238 keV peak at high energy.

    Requires:
        CalPairHit_WithDbCut.h5 and CalPairHit_WithDbCut_mH1.h5, both generated in lat2.py
        These files are essentially the m2s238 hit data with a Pass/Fail for fitSlo
        Weibull fit parameters of the Combined efficiency

    Writes:
        fitSlo_Sideband_m2s238_eff95 (containing the sideband Weibull fit parameters of Enr and Nat) key to the DB

    """

    df = pd.read_hdf('{}/data/CalPairHit_WithDbCut.h5'.format(
        os.environ['LATDIR']))
    windowSize = 0.2
    xVals = [
        round(windowSize * i, 2)
        for i in range(int(1 / windowSize), int((50. + windowSize) /
                                                windowSize))
    ]
    fitBnd = ((1, -20, 0, 0.5), (np.inf, np.inf, np.inf, 0.99)
              )  # eFitHi=30 and these works!
    initialGuess = [1, -1.6, 2.75, 0.95]
    fListPeakE, fListBkgE = [], []
    cListPeakE, cListBkgE = [], []
    fListPeakN, fListBkgN = [], []
    cListPeakN, cListBkgN = [], []

    for idx, er in enumerate(xVals):
        if idx % 50 == 0:
            print('Current Energy: {:.2f} of {:.2f}'.format(er, xVals[-1]))
        dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN = runCutVals(
            df, er, windowSize=windowSize)
        fListPeakE.append(dFullPeakE)
        cListPeakE.append(dCutPeakE)
        fListBkgE.append(dFullBkgE)
        cListBkgE.append(dCutBkgE)
        fListPeakN.append(dFullPeakN)
        cListPeakN.append(dCutPeakN)
        fListBkgN.append(dFullBkgN)
        cListBkgN.append(dCutBkgN)

    # Grab total fitSlo efficiency from DB
    dbKey = "fitSlo_Combined_m2s238_eff95"
    fsN = dsi.getDBRecord(dbKey, False, calDB, pars)
    enrpars = fsN[0]
    natpars = fsN[1]
    EnrEff = wl.weibull(xVals, *(np.array(enrpars[:4])))
    NatEff = wl.weibull(xVals, *(np.array(natpars[:4])))

    # mHL==1 efficiency correction from high energy
    effScaleEnr, effScaleNat = getM1Efficiency()
    print('Scaling Factors: ', effScaleEnr, effScaleNat)

    effCorrE = (np.array(cListPeakE) - np.array(cListBkgE)) / (
        np.array(fListPeakE) - np.array(fListBkgE))
    effCorrE /= effScaleEnr
    enr_ci_low, enr_ci_upp = proportion.proportion_confint(
        np.array(cListPeakE) - np.array(cListBkgE),
        np.array(fListPeakE) - np.array(fListBkgE),
        alpha=0.1,
        method='beta')

    effCorrN = (np.array(cListPeakN) - np.array(cListBkgN)) / (
        np.array(fListPeakN) - np.array(fListBkgN))
    effCorrN /= effScaleNat
    nat_ci_low, nat_ci_upp = proportion.proportion_confint(
        np.array(cListPeakN) - np.array(cListBkgN),
        np.array(fListPeakN) - np.array(fListBkgN),
        alpha=0.1,
        method='beta')

    poptEnr, pcovenr = curve_fit(wl.weibull,
                                 xVals,
                                 effCorrE,
                                 p0=initialGuess,
                                 bounds=fitBnd)
    poptNat, pcovnat = curve_fit(wl.weibull,
                                 xVals,
                                 effCorrN,
                                 p0=initialGuess,
                                 bounds=fitBnd)

    effEFit = wl.weibull(xVals, *poptEnr)
    effNFit = wl.weibull(xVals, *poptNat)

    # Comparison of the parameters
    print(poptEnr, enrpars[:4])
    print(poptNat, natpars[:4])

    sigmaEnr = np.sqrt(np.diagonal(pcovenr))
    sigmaNat = np.sqrt(np.diagonal(pcovnat))

    if writeDB:
        dbKeyFill = "fitSlo_Sideband_m2s238_eff95"
        dbVals = {
            0: [*poptEnr, *sigmaEnr],  # 0: enr
            1: [*poptNat, *sigmaNat]
        }  # 1: nat
        print('Writing dbVals:', dbVals)
        dsi.setDBRecord({
            "key": dbKeyFill,
            "vals": dbVals
        },
                        forceUpdate=True,
                        calDB=calDB,
                        pars=pars)
        print("DB filled:", dbKeyFill)

        # Get the record
        fsFill = dsi.getDBRecord(dbKeyFill, False, calDB, pars)
        print(fsFill)

    if makePlots:
        fig1, ax1 = plt.subplots(nrows=2, ncols=2, figsize=(15, 10))
        ax1 = ax1.flatten()
        ax1[0].errorbar(xVals,
                        effCorrE,
                        yerr=[
                            effCorrE - enr_ci_low / effScaleEnr,
                            enr_ci_upp / effScaleEnr - effCorrE
                        ],
                        color='k',
                        linewidth=0.8,
                        fmt='o',
                        alpha=0.75,
                        capsize=2,
                        label='Sideband Method')
        ax1[0].plot(xVals, effEFit, 'b', lw=3, label='Sideband Fit Efficiency')
        ax1[0].plot(xVals, EnrEff, 'r', lw=3, label='Central Fit Efficiency')
        ax1[0].set_title('Enriched Efficiency')
        ax1[0].set_ylabel('Efficiency')
        ax1[0].legend()
        ax1[2].plot(xVals, EnrEff - effEFit)
        ax1[2].set_xlabel('Energy (keV)')
        ax1[2].set_ylabel('Efficiency Difference (Central - Sideband)')

        ax1[1].errorbar(xVals,
                        effCorrN,
                        yerr=[
                            effCorrN - nat_ci_low / effScaleNat,
                            nat_ci_upp / effScaleNat - effCorrN
                        ],
                        color='k',
                        linewidth=0.8,
                        fmt='o',
                        alpha=0.75,
                        capsize=2,
                        label='Sideband Method')
        ax1[1].plot(xVals, effNFit, 'b', lw=3, label='Sideband Fit Efficiency')
        ax1[1].plot(xVals, NatEff, 'r', lw=3, label='Central Fit Efficiency')
        ax1[1].set_title('Natural Efficiency')
        ax1[1].set_ylabel('Efficiency')
        ax1[1].legend()
        ax1[3].plot(xVals, EnrEff - effEFit)
        ax1[3].set_xlabel('Energy (keV)')
        ax1[3].set_ylabel('Efficiency Difference (Central - Sideband)')

        plt.tight_layout()
        fig1.savefig('./plots/GPXEfficiencyComparison.png')