Esempio n. 1
0
 def scriptGet(self, script_id):
     with self._lock:
         return self._db.table('scripts').get(
             (tinydb.Query()['script_id'] == script_id))
Esempio n. 2
0
def applyThresholdCut(dsNum):
    """ ./plots2.py -thr [dsNum]

    Create "after cuts" histograms for each dataset (similar to genRawHists)
    Use a super fine binning and huge energy range for versatility.
    Don't distinguish between enriched and natural detectors in 2D plots.
        1. sum histogram 'hSum'
        2. cpd vs energy 'hCPDE'
        3. cpd vs run    'hCPDrun'
    """
    threshCutE = 0.9 # require the mu to be at least this high to keep the channel

    kpb = 0.01
    eLo, eHi = 0., 15000.
    nBins = int((eHi-eLo)/kpb)

    chList = ds.GetGoodChanList(dsNum)
    chList = [ch for ch in chList if ch!=692 and ch!=1232]

    calDB = db.TinyDB('../calDB.json')
    pars = db.Query()

    hTot = TH1D("hTot","hTot",nBins,eLo,eHi)
    hEnr = TH1D("hEnr","hEnr",nBins,eLo,eHi)
    hNat = TH1D("hNat","hNat",nBins,eLo,eHi)

    fList, fMissing, fNoThresh, fCut = [], [], [], []
    dsPath = "/global/homes/w/wisecg/project/cuts/fs_rn_wf"

    runLo, runHi = 0, 0
    for bkgIdx in range(ds.dsMap[dsNum]+1):

        if bkgIdx % 10 == 0 and bkgIdx > 0: print 100. * bkgIdx / float(ds.dsMap[dsNum]+1), "% done."

        threshKey = "thresh_ds%d_bkgidx%d" % (dsNum, bkgIdx)
        recList = calDB.search(pars.key == threshKey)
        if len(recList)!=1:
            print "Error: found too many records for key:",threshKey
            for record in recList:
                print record
            return
        threshDict = recList[0]['vals']

        for chan in chList:

            # save threshold information
            chKey = u'%d' % chan
            if chKey not in threshDict.keys():
                fNoThresh.append([dsNum,bkgIdx,chan])
                continue
            mu, sig = threshDict[u'%d' % chan][0], threshDict[u'%d' % chan][1]

            # save file paths
            fName = "%s/fs_rn_wf-DS%d-%d-ch%d.root" % (dsPath, dsNum, bkgIdx, chan)
            if os.path.isfile(fName) == True:
                if 0. < mu < threshCutE:
                    fList.append(fName)
                    # print "ch %d  bkgIdx %d  mu %.2f  sig %.2f" % (chan,bkgIdx,mu,sig)
                else:
                    fCut.append(fName)
                    # print "ch %d  bkgIdx %d  mu %.2f  sig %.2f" % (chan,bkgIdx,mu,sig)
            else:
                fMissing.append(fName)
                continue

            # calculate efficiency curve
            thisErf = TF1("thisErf","0.5*(1+TMath::Erf((x-[0])/(TMath::Sqrt(2)*[1]) ))")
            thisErf.SetParameter(0,mu)
            thisErf.SetParameter(1,abs(sig))

            fTmp = TFile(fName)
            tTmp = fTmp.Get("skimTree")

            tTmp.GetEntry(0)
            if runLo==0: runLo = tTmp.run
            tTmp.GetEntry(tTmp.GetEntries()-1)
            if tTmp.run > runHi:
                runHi = tTmp.run

            cTmp = fTmp.Get("chanCut").GetTitle()
            hTmp = wl.H1D(tTmp,nBins,eLo,eHi,"trapENFCal",cTmp,"hTmp","hTmp")
            hTmp.Divide(thisErf)
            hTot.Add(hTmp)

            hTmpEnr = wl.H1D(tTmp,nBins,eLo,eHi,"trapENFCal",cTmp+" && isEnr && trapENFCal > %.1f" % mu,"hTmpEnr","hTmpEnr")
            hTmpEnr.Divide(thisErf)
            hEnr.Add(hTmpEnr)

            hTmpNat = wl.H1D(tTmp,nBins,eLo,eHi,"trapENFCal",cTmp+" && !isEnr && trapENFCal > %.1f" % mu,"hTmpNat","hTmpNat")
            hTmpNat.Divide(thisErf)
            hNat.Add(hTmpNat)

    print "DS-%d  nFound: %d  nMissing: %d  nCut: %d  nNoThresh: %d" % (dsNum,len(fList),len(fMissing),len(fCut),len(fNoThresh))

    # save to a permanent place
    fOut = TFile("../data/latThresh_DS%d.root" % dsNum, "RECREATE")
    hTot.Write()
    hEnr.Write()
    hNat.Write()

    # load the full chain w/ files passing threshold cut (but no efficiency correction applied.)

    chainFSRNWF = TChain("skimTree")
    [chainFSRNWF.Add(f) for f in fList]
    hFSRNWF = wl.H1D(chainFSRNWF,nBins,eLo,eHi,"trapENFCal","","trapENFCal","","hFSRNWF")

    cpdLo, cpdHi = 111, 175
    if dsNum==4: cpdLo, cpdHi = 211, 275
    if dsNum==5: cpdLo, cpdHi = 111, 275
    nCPD = cpdHi - cpdLo

    hCPDE = wl.H2D(chainFSRNWF,nBins,eLo,eHi,nCPD,cpdLo,cpdHi,"C*100+P*10+D:trapENFCal","","trapENFCal","CPD","CPD vs. E","hCPDE")
    hCPDE.Write()

    chainFSRNWF.GetEntry(0)
    runLo = chainFSRNWF.run
    chainFSRNWF.GetEntry(chainFSRNWF.GetEntries()-1)
    runHi = chainFSRNWF.run
    nRuns = runHi - runLo + 1
    print "First run:",runLo,"Last run:",runHi

    nRuns = runHi - runLo + 1
    hCPDrun = wl.H2D(chainFSRNWF,nRuns,runLo,runHi,nCPD,cpdLo,cpdHi,"C*100+P*10+D:run","","run","CPD","CPD vs. run","hCPDrun")
    hCPDrun.Write()

    kpb = 0.1
    rebinFactor = int(kpb/0.01)
    hTot = hTot.Rebin(rebinFactor) # this creates a new histogram
    hTot.GetXaxis().SetRangeUser(0.,12.)

    c = TCanvas("c","Shan is pretty",800,600)
    c.SetLogy(1)
    hTot.SetMinimum(0.5)
    hTot.SetLineColor(ROOT.kBlue)
    hTot.SetTitle("")
    hTot.GetXaxis().SetTitle("trapENFCal")
    hTot.GetYaxis().SetTitle("Counts")
    hTot.Draw("hist")

    hFSRNWF = hFSRNWF.Rebin(rebinFactor)
    hFSRNWF.GetXaxis().SetRangeUser(0.,12.)
    hFSRNWF.SetLineColor(ROOT.kRed)
    hFSRNWF.Draw("hist same")

    leg = TLegend(0.7,0.75,0.89,0.89)
    leg.AddEntry(hFSRNWF,"no thresh corr.","l")
    leg.AddEntry(hTot,"w/ thresh","l")
    leg.Draw("same")

    c.Print("../plots/latThresh_DS%d.pdf" % dsNum)

    fOut.Close()
Esempio n. 3
0
import matplotlib as mpl
mpl.use('Agg')
sys.argv.append("-b")
import matplotlib.pyplot as plt
plt.style.use('../pltReports.mplstyle')
from matplotlib.colors import LogNorm, Normalize

# dsi = imp.load_source('dsi',os.environ['LATDIR']+'/dsi.py')
dsi = imp.load_source('dsi', os.environ['LATDIR'] + '/sandbox/DataSetInfo.py')
wl = imp.load_source('waveLibs', os.environ['LATDIR'] + '/waveLibs.py')

# load threshold data
import tinydb as db
dsNum, bkgIdx = 5, 83
calDB = db.TinyDB('../calDB.json')
pars = db.Query()
thD = dsi.getDBRecord("thresh_ds%d_bkgidx%d" % (dsNum, bkgIdx), False, calDB,
                      pars)

# load threshold data
import tinydb as db
dsNum, bkgIdx = 5, 83
calDB = db.TinyDB('../calDB.json')
pars = db.Query()
thD = dsi.getDBRecord("thresh_ds%d_bkgidx%d" % (dsNum, bkgIdx), False, calDB,
                      pars)

# load fitSlo vals for cal run range closest to the run range [22513, 22566]
dsNum, modNum, calIdx = 5, 1, 11  # calIdx 11: [[22568,22635],22568,22841],
fsD = dsi.getDBRecord("fitSlo_ds%d_idx%d_m%d_Peak" % (dsNum, calIdx, modNum),
                      False, calDB, pars)
Esempio n. 4
0
import os

import telebot
import tinydb
from telebot import types

import services
from config import config

if not os.path.exists(config['DB']['PATH']):
    os.makedirs(os.path.dirname(config['DB']['PATH']), exist_ok=True)

bot = telebot.TeleBot(config['BOT']['TOKEN'])
subjectsDb = tinydb.TinyDB(config['DB']['SUBJECTS']['PATH'])
Subject = tinydb.Query()

queue = []
homework = []


@bot.message_handler(commands=['start', 'help'])
def start_menu(message: types.Message):
    bot.reply_to(message, config['BOT']['START'])


@bot.message_handler(commands=['send_homework'])
def homework_start(message: types.Message):
    student = services.find_student(message.chat.id, config)

    if not student:
        bot.reply_to(message, config['BOT']['START'])
Esempio n. 5
0
def plotStats():

    # load data from testStats
    f = np.load('../plots/slo-m2s238-test.npz')
    evtIdx, evtSumET, evtHitE, evtChans = f['arr_0'], f['arr_1'], f[
        'arr_2'], f['arr_3']
    thrCal = f['arr_4'].item()
    evtCtr, totCtr, runTime = f['arr_5'], f['arr_6'], f['arr_7']

    # load threshKeV values from bkg/auto-thrsh/db
    calDB = db.TinyDB("%s/calDB-v2.json" % dsi.latSWDir)
    pars = db.Query()
    threshDB = dsi.getDBRecord("thresh_ds1_bkg35_sub0", calDB=calDB, pars=pars)

    # throw a threshold warning if any det is above 1 keV (and by how much)
    for ch in thrCal:
        thrChan = np.asarray([val[3] for val in thrCal[ch]])
        thrMean, thrStd = np.mean(thrChan), np.std(thrChan)
        thrDB = threshDB[ch][0] + 3 * threshDB[ch][1]
        errString = "Above 1" if thrMean > 1.0 else ""
        # print("ch %d  DB %.3f  CAL %.3f keV (%.3f), nRuns %d  %s" % (ch, thrDB, thrMean, thrStd, len(thrChan), errString))

    # fill hit arrays
    hitE, chan = [], []
    for iE in range(len(evtHitE)):
        hitE.extend(evtHitE[iE])
        chan.extend(evtChans[iE])

    # map channels
    chMap = list(sorted(set(chan)))
    chDict = {chMap[i]: i for i in range(len(chMap))}
    chan = [chDict[chan] for chan in chan]

    # -- plot 1 - hit E spectrum
    fig = plt.figure()

    xLo, xHi, xpb = 0, 250, 1
    x, hE = wl.GetHisto(hitE, xLo, xHi, xpb)

    plt.plot(x, hE, ls='steps', lw=1.5, c='b', label='m=2,s=238 hits')
    plt.xlabel("Energy (keV)", ha='right', x=1.)
    plt.ylabel("Counts", ha='right', y=1.)
    plt.legend(loc=1)
    plt.savefig("../plots/slo-hitE-test.png")

    # -- plot 2 - counts per channel vs E (2d), low-E region
    plt.cla()

    xLo, xHi, xpb = 0.5, 5, 0.2
    yLo, yHi = 0, len(chMap)
    nbx, nby = int((xHi - xLo) / xpb), len(chMap)

    h1, _, _ = np.histogram2d(hitE,
                              chan,
                              bins=[nbx, nby],
                              range=[[xLo, xHi], [yLo, yHi]])
    h1 = h1.T
    im1 = plt.imshow(
        h1,
        cmap='jet')  #,aspect='auto')#),vmin=hMin,vmax=hMax)#,norm=LogNorm())

    xticklabels = ["%.1f" % t for t in np.arange(0, 5.5, 0.5)]
    yticks = np.arange(0, len(chMap))
    plt.xlabel("Energy (keV)", ha='right', x=1.)
    plt.gca().set_xticklabels(xticklabels)

    plt.ylabel("channel", ha='right', y=1.)
    plt.yticks(yticks)
    plt.gca().set_yticklabels(chMap, fontsize=12)

    # note: can control z axis limits w/ code in LAT/sandbox/sea-plot.py
    fig.colorbar(im1, ax=plt.gca(), fraction=len(chMap) / 941, pad=0.04)

    plt.tight_layout()
    plt.savefig("../plots/slo-fsVsHitE-test.png")

    # -- output: counts in each detector under 5 keV

    cLo, cHi, nbx = 0, len(chMap), len(chMap)
    x, hC = wl.GetHisto(chan, cLo, cHi, 1)

    hLow = [0]
    for idx, ch in enumerate(chMap):
        nTot = hC[idx + 1]  # 0-250 kev
        nLow = np.sum(h1[idx, :])  # 0-5 keV
        hLow.append(nLow)
        nCPB = nLow / (xHi -
                       xLo) / xpb  # avg counts per bin, assume flat for now.
        rTot = nTot / runTime
        rLow = nLow / runTime
        rCPB = nCPB / nbx / runTime  # counts/bin/runTime
        rt100Cts = (100 / rCPB) / 3600. if rCPB != 0 else -1
        print(
            "rt %d  ch %d  rTot %.2f  rLow %.4f  rCPB %.4f / %.1f keV  need RT:%d hrs"
            % (runTime, ch, rTot, rLow, rCPB, xpb, rt100Cts))

    # -- plot 3 - counts per channel (1d), and a few different energy regions
    plt.cla()

    plt.bar(x - 0.5, hC, 0.95, color='b', label='all hits %d-%d' % (0, 250))
    plt.bar(x - 0.5, hLow, 0.95, color='r', label='hits %d-%d' % (xLo, xHi))

    plt.xlabel("channel", ha='right', x=1.)
    xticks = np.arange(0, len(chMap))
    plt.xticks(xticks)
    plt.gca().set_xticklabels(chMap, fontsize=12, rotation=90)

    plt.ylabel("Counts, mHT=2, sumET=238 hits", ha='right', x=1.)

    plt.legend(loc=1)
    plt.savefig("../plots/slo-chans-test.png")
Esempio n. 6
0
import webrtcvad
import wave
import tinydb
import numpy as np
from array import array
from struct import pack
from subprocess import DEVNULL, Popen, PIPE, STDOUT
from collections import deque
import speech_recognition as sr
import socketio

stream = None
client = None
audio = None
vad = None
Audio = tinydb.Query()

block_duration = 10
padding_duration = 1000
SAMPLERATE = 48000
FRAMES_PER_BUFFER = SAMPLERATE * block_duration / 1000
NUM_PADDING_CHUNKS = int(padding_duration / block_duration)
NUM_WINDOW_CHUNKS = int(400 / block_duration)
ring_buffer = deque(maxlen=NUM_PADDING_CHUNKS)
ring_buffer_flags = [0] * NUM_WINDOW_CHUNKS
ring_buffer_index = 0

AUDIOS = []
SPEECHRECDIR = None
SPEECHREC = False
TTSDIR = None
Esempio n. 7
0
def calculate(pdim):

    log.info("Calculating EC2 pricing with the following inputs: {}".format(
        str(pdim.__dict__)))

    ts = phelper.Timestamp()
    ts.start('totalCalculation')
    ts.start('tinyDbLoadOnDemand')
    ts.start('tinyDbLoadReserved')

    awsPriceListApiVersion = ''
    cost = 0
    pricing_records = []
    priceQuery = tinydb.Query()

    global regiondbs
    global indexMetadata

    #DBs for Data Transfer
    tmpDtDbKey = consts.SERVICE_DATA_TRANSFER + pdim.region + pdim.termType
    dtdbs = regiondbs.get(tmpDtDbKey, {})
    if not dtdbs:
        dtdbs, dtIndexMetadata = phelper.loadDBs(
            consts.SERVICE_DATA_TRANSFER,
            phelper.get_partition_keys(consts.SERVICE_DATA_TRANSFER,
                                       pdim.region,
                                       consts.SCRIPT_TERM_TYPE_ON_DEMAND,
                                       **{}))
        regiondbs[tmpDtDbKey] = dtdbs

    #_/_/_/_/_/ ON-DEMAND PRICING _/_/_/_/_/
    if pdim.termType == consts.SCRIPT_TERM_TYPE_ON_DEMAND:
        #Load On-Demand DBs
        indexArgs = {'tenancies': [consts.EC2_TENANCY_MAP[pdim.tenancy]]}
        tmpDbKey = consts.SERVICE_EC2 + pdim.region + pdim.termType + pdim.tenancy

        dbs = regiondbs.get(tmpDbKey, {})
        if not dbs:
            dbs, indexMetadata = phelper.loadDBs(
                consts.SERVICE_EC2,
                phelper.get_partition_keys(consts.SERVICE_EC2, pdim.region,
                                           consts.SCRIPT_TERM_TYPE_ON_DEMAND,
                                           **indexArgs))
            regiondbs[tmpDbKey] = dbs

        ts.finish('tinyDbLoadOnDemand')
        log.debug("Time to load OnDemand DB files: [{}]".format(
            ts.elapsed('tinyDbLoadOnDemand')))

        #TODO: Move common operations to a common module, and leave only EC2-specific operations in ec2/pricing.py (create a class)
        #TODO: support all tenancy types (Host and Dedicated)
        #Compute Instance
        if pdim.instanceHours:
            computeDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_COMPUTE_INSTANCE,
                 consts.EC2_TENANCY_SHARED))]
            ts.start('tinyDbSearchComputeFile')
            query = (
                (priceQuery['Instance Type'] == pdim.instanceType) &
                (priceQuery['Operating System']
                 == consts.EC2_OPERATING_SYSTEMS_MAP[pdim.operatingSystem]) &
                (priceQuery['Tenancy'] == consts.EC2_TENANCY_SHARED) &
                (priceQuery['Pre Installed S/W'] == pdim.preInstalledSoftware)
                & (priceQuery['CapacityStatus']
                   == consts.EC2_CAPACITY_RESERVATION_STATUS_MAP[
                       pdim.capacityReservationStatus]) &
                (priceQuery['License Model']
                 == consts.EC2_LICENSE_MODEL_MAP[pdim.licenseModel]))  # &
            #(priceQuery['OfferingClass'] == pdim.offeringClass) &
            #(priceQuery['PurchaseOption'] == purchaseOption ))

            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_EC2, computeDb, query, pdim.instanceHours,
                pricing_records, cost)
            log.debug("Time to search compute:[{}]".format(
                ts.finish('tinyDbSearchComputeFile')))

        #Data Transfer
        dataTransferDb = dtdbs[phelper.create_file_key(
            (consts.REGION_MAP[pdim.region],
             consts.TERM_TYPE_MAP[pdim.termType],
             consts.PRODUCT_FAMILY_DATA_TRANSFER))]

        #Out to the Internet
        if pdim.dataTransferOutInternetGb:
            ts.start('searchDataTransfer')
            query = ((priceQuery['To Location'] == 'External') &
                     (priceQuery['Transfer Type'] == 'AWS Outbound'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_DATA_TRANSFER, dataTransferDb, query,
                pdim.dataTransferOutInternetGb, pricing_records, cost)
            log.debug("Time to search AWS Data Transfer Out: [{}]".format(
                ts.finish('searchDataTransfer')))

        #Intra-regional data transfer - in/out/between EC2 AZs or using EIPs or ELB
        if pdim.dataTransferOutIntraRegionGb:
            query = ((priceQuery['Transfer Type'] == 'IntraRegion'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_DATA_TRANSFER, dataTransferDb, query,
                pdim.dataTransferOutIntraRegionGb, pricing_records, cost)

        #Inter-regional data transfer - out to other AWS regions
        if pdim.dataTransferOutInterRegionGb:
            query = ((priceQuery['Transfer Type'] == 'InterRegion Outbound') &
                     (priceQuery['To Location']
                      == consts.REGION_MAP[pdim.toRegion]))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_DATA_TRANSFER, dataTransferDb, query,
                pdim.dataTransferOutInterRegionGb, pricing_records, cost)

        #EBS Storage
        if pdim.ebsStorageGbMonth:
            #storageDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_STORAGE)]
            storageDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_STORAGE))]
            query = ((priceQuery['Volume Type'] == pdim.volumeType))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_EBS, storageDb, query, pdim.ebsStorageGbMonth,
                pricing_records, cost)

        #System Operation (pIOPS)
        if pdim.volumeType == consts.EBS_VOLUME_TYPE_PIOPS and pdim.pIops:
            #storageDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SYSTEM_OPERATION)]
            storageDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_SYSTEM_OPERATION))]
            query = ((priceQuery['Group'] == 'EBS IOPS'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_EBS, storageDb, query, pdim.pIops,
                pricing_records, cost)

        #Snapshot Storage
        if pdim.ebsSnapshotGbMonth:
            #snapshotDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_SNAPSHOT)]
            snapshotDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_SNAPSHOT))]
            query = (
                (priceQuery['usageType'] ==
                 consts.REGION_PREFIX_MAP[pdim.region] + 'EBS:SnapshotUsage')
            )  #EBS:SnapshotUsage comes with a prefix in the PriceList API file (i.e. EU-EBS:SnapshotUsage)
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_EBS, snapshotDb, query, pdim.ebsSnapshotGbMonth,
                pricing_records, cost)

        #Classic Load Balancer
        if pdim.elbHours:
            #elbDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_LOAD_BALANCER)]
            elbDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_LOAD_BALANCER))]
            query = ((priceQuery['usageType']
                      == consts.REGION_PREFIX_MAP[pdim.region] +
                      'LoadBalancerUsage') &
                     (priceQuery['operation'] == 'LoadBalancing'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_ELB, elbDb, query, pdim.elbHours,
                pricing_records, cost)

        if pdim.elbDataProcessedGb:
            #elbDb = dbs[phelper.create_file_key(consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType], consts.PRODUCT_FAMILY_LOAD_BALANCER)]
            elbDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_LOAD_BALANCER))]
            query = ((priceQuery['usageType']
                      == consts.REGION_PREFIX_MAP[pdim.region] +
                      'DataProcessing-Bytes') &
                     (priceQuery['operation'] == 'LoadBalancing'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_ELB, elbDb, query, pdim.elbDataProcessedGb,
                pricing_records, cost)

        #Application Load Balancer
        #TODO: add support for Network Load Balancer
        if pdim.albHours:
            albDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER))]
            query = ((priceQuery['usageType']
                      == consts.REGION_PREFIX_MAP[pdim.region] +
                      'LoadBalancerUsage') &
                     (priceQuery['operation'] == 'LoadBalancing:Application'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_ELB, albDb, query, pdim.albHours,
                pricing_records, cost)

        if pdim.albLcus:
            albDb = dbs[phelper.create_file_key(
                (consts.REGION_MAP[pdim.region],
                 consts.TERM_TYPE_MAP[pdim.termType],
                 consts.PRODUCT_FAMILY_APPLICATION_LOAD_BALANCER))]
            query = ((priceQuery['usageType']
                      == consts.REGION_PREFIX_MAP[pdim.region] + 'LCUUsage') &
                     (priceQuery['operation'] == 'LoadBalancing:Application'))
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_ELB, albDb, query, pdim.albLcus,
                pricing_records, cost)

        #TODO: EIP
        #TODO: Dedicated Host
        #TODO: NAT Gateway
        #TODO: Fee

    #_/_/_/_/_/ RESERVED PRICING _/_/_/_/_/
    #Load Reserved DBs
    if pdim.termType == consts.SCRIPT_TERM_TYPE_RESERVED:
        indexArgs = {
            'offeringClasses':
            [consts.EC2_OFFERING_CLASS_MAP[pdim.offeringClass]],
            'tenancies': [consts.EC2_TENANCY_MAP[pdim.tenancy]],
            'purchaseOptions':
            [consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]]
        }
        #Load all values for offeringClasses, tenancies and purchaseOptions
        #indexArgs = {'offeringClasses':consts.EC2_OFFERING_CLASS_MAP.values(),
        #             'tenancies':consts.EC2_TENANCY_MAP.values(), 'purchaseOptions':consts.EC2_PURCHASE_OPTION_MAP.values()}
        tmpDbKey = consts.SERVICE_EC2 + pdim.region + pdim.termType + pdim.offeringClass + pdim.tenancy + pdim.offeringType
        #tmpDbKey = consts.SERVICE_EC2+pdim.region+pdim.termType
        dbs = regiondbs.get(tmpDbKey, {})
        if not dbs:
            dbs, indexMetadata = phelper.loadDBs(
                consts.SERVICE_EC2,
                phelper.get_partition_keys(consts.SERVICE_EC2, pdim.region,
                                           consts.SCRIPT_TERM_TYPE_RESERVED,
                                           **indexArgs))
            #regiondbs[consts.SERVICE_EC2+pdim.region+pdim.termType]=dbs
            regiondbs[tmpDbKey] = dbs

        log.debug("dbs keys:{}".format(dbs.keys()))

        ts.finish('tinyDbLoadReserved')
        log.debug("Time to load Reserved DB files: [{}]".format(
            ts.elapsed('tinyDbLoadReserved')))

        computeDb = dbs[phelper.create_file_key(
            (consts.REGION_MAP[pdim.region],
             consts.TERM_TYPE_MAP[pdim.termType],
             consts.PRODUCT_FAMILY_COMPUTE_INSTANCE, pdim.offeringClass,
             consts.EC2_TENANCY_SHARED,
             consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]))]

        ts.start('tinyDbSearchComputeFileReserved')
        query = ((priceQuery['Instance Type'] == pdim.instanceType) &
                 (priceQuery['Operating System']
                  == consts.EC2_OPERATING_SYSTEMS_MAP[pdim.operatingSystem]) &
                 (priceQuery['Tenancy'] == consts.EC2_TENANCY_SHARED) &
                 (priceQuery['Pre Installed S/W'] == pdim.preInstalledSoftware)
                 & (priceQuery['License Model']
                    == consts.EC2_LICENSE_MODEL_MAP[pdim.licenseModel]) &
                 (priceQuery['OfferingClass']
                  == consts.EC2_OFFERING_CLASS_MAP[pdim.offeringClass]) &
                 (priceQuery['PurchaseOption']
                  == consts.EC2_PURCHASE_OPTION_MAP[pdim.offeringType]) &
                 (priceQuery['LeaseContractLength']
                  == consts.EC2_RESERVED_YEAR_MAP["{}".format(pdim.years)]))

        hrsQuery = query & (priceQuery['Unit'] == 'Hrs')
        qtyQuery = query & (priceQuery['Unit'] == 'Quantity')

        if pdim.offeringType in (
                consts.SCRIPT_EC2_PURCHASE_OPTION_ALL_UPFRONT,
                consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT):
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_EC2, computeDb, qtyQuery, pdim.instanceCount,
                pricing_records, cost)

        if pdim.offeringType in (
                consts.SCRIPT_EC2_PURCHASE_OPTION_NO_UPFRONT,
                consts.SCRIPT_EC2_PURCHASE_OPTION_PARTIAL_UPFRONT):
            #reservedInstanceHours = pdim.instanceCount * consts.HOURS_IN_MONTH * 12 * pdim.years
            reservedInstanceHours = utils.calculate_instance_hours_year(
                pdim.instanceCount, pdim.years)
            pricing_records, cost = phelper.calculate_price(
                consts.SERVICE_EC2, computeDb, hrsQuery, reservedInstanceHours,
                pricing_records, cost)

        log.debug("Time to search:[{}]".format(
            ts.finish('tinyDbSearchComputeFileReserved')))

    log.debug("regiondbs:[{}]".format(regiondbs.keys()))
    awsPriceListApiVersion = indexMetadata['Version']
    extraargs = {'priceDimensions': pdim}
    pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost,
                                   pricing_records, **extraargs)
    log.debug(json.dumps(vars(pricing_result), sort_keys=False, indent=4))

    #proc = psutil.Process()
    #log.debug("open_files: {}".format(proc.open_files()))

    log.debug("Total time: [{}]".format(ts.finish('totalCalculation')))
    return pricing_result.__dict__
Esempio n. 8
0
 def taskProfileGet(self, taskprofile_id):
     with self._lock:
         return self._db.table('taskprofiles').get(
             (tinydb.Query()['taskprofile_id'] == taskprofile_id))
Esempio n. 9
0
 def taskProfileDelete(self, taskprofile_id):
     with self._lock:
         return self._db.table('taskprofiles').remove(
             (tinydb.Query()['taskprofile_id'] == taskprofile_id))
Esempio n. 10
0
 def taskUpdate(self, profile_id, task_id, serialized_task):
     with self._lock:
         return self._db.table('tasks').update(
             serialized_task, (tinydb.Query()['profile_id'] == profile_id) &
             (tinydb.Query()['task_id'] == task_id))
Esempio n. 11
0
 def taskDelete(self, profile_id, task_id):
     with self._lock:
         return self._db.table(
             'tasks').remove((tinydb.Query()['profile_id'] == profile_id)
                             & (tinydb.Query()['task_id'] == task_id))
Esempio n. 12
0
 def taskGet(self, profile_id, task_id):
     with self._lock:
         return self._db.table(
             'tasks').get((tinydb.Query()['profile_id'] == profile_id)
                          & (tinydb.Query()['task_id'] == task_id))
Esempio n. 13
0
 def oiocGet(self, ioc_id):
     with self._lock:
         return self._db.table('openioc').get(
             (tinydb.Query()['ioc_id'] == ioc_id))
Esempio n. 14
0
 def oiocDelete(self, ioc_id):
     with self._lock:
         return self._db.table('openioc').remove(
             (tinydb.Query()['ioc_id'] == ioc_id))
Esempio n. 15
0
 def cacheListUpdate(self, profile_id, cacheType):
     with self._lock:
         return self._db.table('ObjectCache').search(
             ~(tinydb.Query()['removed'] == True)
             & (tinydb.Query()['profile_id'] == profile_id)
             & (tinydb.Query()['type'] == cacheType))
Esempio n. 16
0
 def auditGet(self, profile_id, audit_id):
     with self._lock:
         return self._db.table(
             'audits').get((tinydb.Query()['profile_id'] == profile_id)
                           & (tinydb.Query()['audit_id']))
Esempio n. 17
0
import tinydb
# https://media.readthedocs.org/pdf/tinydb/latest/tinydb.pdf

db = tinydb.TinyDB('test_file.json')  # creates a json database
db.insert({
    "Colour": "Blue",
    "Count": 5
})  # adds the dictionary values to the json file
db.insert({"Colour": "Purple", "Count": 2})

# You can search using the .query and .search function by:
Car_colour = tinydb.Query()
# The .Query used to creat a object and use that to search
db.search(Car_colour.Colour == "Blue")
db.search(Car_colour.Count > 3)

# can update database using .update function to your query.
db.update({"Count": 4}, Car_colour.Colour == "Green")

# Use .remove to remove a piece of document
db.remove(Car_colour.Colour == "Green")

#Use .pugrge to remove all data
db.purge()
Esempio n. 18
0
 def auditDelete(self, profile_id, audit_id):
     with self._lock:
         return self._db.table(
             'audits').remove((tinydb.Query()['profile_id'] == profile_id)
                              & (tinydb.Query()['audit_id']))
Esempio n. 19
0
File: state.py Progetto: vhcg77/dvc
 def __init__(self, root_dir, dvc_dir):
     self.root_dir = root_dir
     self.dvc_dir = dvc_dir
     self.state_file = os.path.join(dvc_dir, self.STATE_FILE)
     self._db = tinydb.TinyDB(self.state_file)
     self._q = tinydb.Query()
Esempio n. 20
0
 def ruleList(self, profile_id):
     with self._lock:
         return self._db.table('rules').search(
             (tinydb.Query()['profile_id'] == profile_id))
Esempio n. 21
0
def calculate(pdim):

    log.info("Calculating Lambda pricing with the following inputs: {}".format(
        str(pdim.__dict__)))

    ts = phelper.Timestamp()
    ts.start('totalCalculationAwsLambda')

    dbs, indexMetadata = phelper.loadDBs(
        consts.SERVICE_LAMBDA, phelper.get_partition_keys(pdim.region))

    cost = 0
    pricing_records = []

    awsPriceListApiVersion = indexMetadata['Version']
    priceQuery = tinydb.Query()

    #TODO: add support to include/ignore free-tier (include a flag)

    serverlessDb = dbs[phelper.create_file_key(
        consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType],
        consts.PRODUCT_FAMILY_SERVERLESS)]

    #Requests
    if pdim.requestCount:
        query = ((priceQuery['Group'] == 'AWS-Lambda-Requests'))
        pricing_records, cost = phelper.calculate_price(
            consts.SERVICE_LAMBDA, serverlessDb, query, pdim.requestCount,
            pricing_records, cost)

    #GB-s (aka compute time)
    if pdim.avgDurationMs:
        query = ((priceQuery['Group'] == 'AWS-Lambda-Duration'))
        usageUnits = pdim.GBs
        pricing_records, cost = phelper.calculate_price(
            consts.SERVICE_LAMBDA, serverlessDb, query, usageUnits,
            pricing_records, cost)

    #Data Transfer
    dataTransferDb = dbs[phelper.create_file_key(
        consts.REGION_MAP[pdim.region], consts.TERM_TYPE_MAP[pdim.termType],
        consts.PRODUCT_FAMILY_DATA_TRANSFER)]

    #To internet
    if pdim.dataTransferOutInternetGb:
        query = ((priceQuery['To Location'] == 'External') &
                 (priceQuery['Transfer Type'] == 'AWS Outbound'))
        pricing_records, cost = phelper.calculate_price(
            consts.SERVICE_LAMBDA, dataTransferDb, query,
            pdim.dataTransferOutInternetGb, pricing_records, cost)

    #Intra-regional data transfer - in/out/between EC2 AZs or using IPs or ELB
    if pdim.dataTransferOutIntraRegionGb:
        query = ((priceQuery['Transfer Type'] == 'IntraRegion'))
        pricing_records, cost = phelper.calculate_price(
            consts.SERVICE_LAMBDA, dataTransferDb, query,
            pdim.dataTransferOutIntraRegionGb, pricing_records, cost)

    #Inter-regional data transfer - out to other AWS regions
    if pdim.dataTransferOutInterRegionGb:
        query = (
            (priceQuery['Transfer Type'] == 'InterRegion Outbound') &
            (priceQuery['To Location'] == consts.REGION_MAP[pdim.toRegion]))
        pricing_records, cost = phelper.calculate_price(
            consts.SERVICE_LAMBDA, dataTransferDb, query,
            pdim.dataTransferOutInterRegionGb, pricing_records, cost)

    pricing_result = PricingResult(awsPriceListApiVersion, pdim.region, cost,
                                   pricing_records)
    log.debug(json.dumps(vars(pricing_result), sort_keys=False, indent=4))

    log.debug("Total time to compute: [{}]".format(
        ts.finish('totalCalculationAwsLambda')))
    return pricing_result.__dict__
Esempio n. 22
0
 def ruleUpdateState(self, rule_id, state):
     with self._lock:
         r = self._db.table('rules').update(
             {'state': state}, (tinydb.Query()['id'] == rule_id))
         return r
Esempio n. 23
0
 def list_tasks(self, owner=None):  # type (str) -> list[dict]
     """Return list of tasks."""
     if owner is None:
         return self._db.all()
     else:
         return self._db.search(tinydb.Query().owner == owner)
Esempio n. 24
0
 def ruleRemove(self, rule_id):
     with self._lock:
         return self._db.table('rules').remove(
             (tinydb.Query()['id'] == rule_id))
Esempio n. 25
0
def testStats():

    # load the last calibration run set in DS1 and figure out how many
    # counts we have in the m=2 s=238 population to work with.

    ds, calIdx = 1, 33
    calLo, calHi = 12726, 12733  # this is probably a lunchtime cal

    calDB = db.TinyDB("%s/calDB-v2.json" % dsi.latSWDir)
    pars = db.Query()

    # trap and HV thresholds for this calidx
    trapKey = "trapThr_ds1_m1_c33"
    trapVal = dsi.getDBRecord(trapKey, calDB=calDB, pars=pars)
    hvKey = "hvBias_ds1_m1_c33"
    hvVal = dsi.getDBRecord(hvKey, calDB=calDB, pars=pars)

    # pull thresh (keV) values for the bkgIdx closest to this calibration
    cLo, cHi = cal.GetCalRunCoverage("ds1_m1", calIdx)
    bkgRuns = bkg.getRunList(ds)
    bkgRanges = set()
    for run in bkgRuns:
        if cLo <= run <= cHi:
            bkgRanges.add(bkg.GetBkgIdx(ds, run))
    bkgIdx = list(bkgRanges)[0]  # it's 35

    # account for sub-ranges
    bkgRuns = bkg.getRunList(ds, bkgIdx)
    subRanges = bkg.GetSubRanges(ds, bkgIdx)
    if len(subRanges) == 0: subRanges.append((bkgRuns[0], bkgRuns[-1]))
    for subIdx, (runLo, runHi) in enumerate(subRanges):
        threshKey = "thresh_ds%d_bkg%d_sub%d" % (
            ds, bkgIdx, subIdx)  # returns "thresh_ds1_bkg35_sub0"

    # load threshKeV values from bkg/auto-thrsh/db
    threshVal = dsi.getDBRecord(threshKey, calDB=calDB, pars=pars)
    chList = []
    print("DB results")
    for chan in sorted(threshVal):
        thrBad = threshVal[chan][2]
        if thrBad: continue
        thrMu = threshVal[chan][0]
        thrSig = threshVal[chan][1]
        thrKeV = thrMu + 3 * thrSig
        print("%d  %.3f  %.3f  %d: %.3f keV" %
              (chan, thrMu, thrSig, thrBad, thrKeV))
        chList.append(chan)

    # ok, now let's load the cal runs themselves
    calRuns = cal.GetCalList("ds1_m1", calIdx)
    fileList = []
    for run in calRuns:
        latList = dsi.getSplitList(
            "%s/latSkimDS%d_run%d*" % (dsi.calLatDir, ds, run), run)
        tmpList = [f for idx, f in sorted(latList.items())]
        fileList.extend(tmpList)

    # declare the output stuff
    evtIdx, evtSumET, evtHitE, evtChans = [], [], [], []
    thrCal = {ch: [] for ch in chList}

    # loop over LAT cal files
    from ROOT import TFile, TTree
    prevRun = 0
    evtCtr, totCtr, runTime = 0, 0, 0
    for iF, f in enumerate(fileList):

        print("%d/%d %s" % (iF, len(fileList), f))
        tf = TFile(f)
        tt = tf.Get("skimTree")

        # increment the run time and fill the output dict of thresholds
        tt.GetEntry(0)
        run = tt.run
        if run != prevRun:
            start = tt.startTime_s
            stop = tt.stopTime_s
            runTime += stop - start

            # before applying thresholds (and getting sumET and mHT)
            # save them into the output dict (so we can compare w/ DB later).
            n = tt.Draw("channel:threshKeV:threshSigma", "", "goff")
            chan, thrM, thrS = tt.GetV1(), tt.GetV2(), tt.GetV3()
            tmpThresh = {}
            for i in range(n):
                if chan[i] not in chList:
                    continue
                if chan[i] in tmpThresh.keys():
                    continue
                thrK = thrM[i] + 3 * thrS[i]
                tmpThresh[chan[i]] = [run, thrM[i], thrS[i], thrK]

            # fill the output dict
            for ch in tmpThresh:
                thrCal[ch].append(tmpThresh[ch])  # [run, thrM, thrS, thrK]

        prevRun = run

        # loop over tree
        for iE in range(tt.GetEntries()):
            tt.GetEntry(iE)
            if tt.EventDC1Bits != 0: continue
            totCtr += 1

            # calculate mHT and sumET

            n = tt.channel.size()
            chTmp = np.asarray([tt.channel.at(i) for i in range(n)])
            idxRaw = [
                i for i in range(tt.channel.size())
                if tt.channel.at(i) in chList
            ]
            hitERaw = np.asarray([tt.trapENFCal.at(i) for i in idxRaw])

            # get indexes of hits above threshold
            idxList = [
                i for i in range(tt.channel.size())
                if tt.channel.at(i) in chList
                and tt.trapENFCal.at(i) > threshVal[tt.channel.at(i)][0] +
                3 * threshVal[tt.channel.at(i)][1]
                and 0.7 < tt.trapENFCal.at(i) < 9999
            ]
            hitE = np.asarray([tt.trapENFCal.at(i) for i in idxList])

            mHT, sumET = len(hitE), sum(hitE)

            # for now, let's just grab m=2 s=238 evts.
            if mHT != 2: continue
            if not 237.28 < sumET < 239.46: continue

            hitChans = np.asarray([tt.channel.at(i) for i in idxList])

            # save event pairs to output
            evtIdx.append([run, iE])
            evtSumET.append(sumET)
            evtHitE.append(hitE)
            evtChans.append(hitChans)
            evtCtr += 1

    # output stats we got
    print("m2s238 evts:", evtCtr, "total evts:", totCtr, "runTime:", runTime)

    # save output
    np.savez("../plots/slo-m2s238-test.npz", evtIdx, evtSumET, evtHitE,
             evtChans, thrCal, evtCtr, totCtr, runTime)
Esempio n. 26
0
 def cacheDrop(self, profile_id):
     with self._lock:
         return self._db.table("ObjectCache").remove(
             (tinydb.Query()['profile_id'] == profile_id))
Esempio n. 27
0
def find_cut(ds, ds_lo, write_db=False):

    #Make tier2 dataframe, get e_ftp and first pass calibration constants, then calibrate
    t2 = ds.get_t2df()
    t2 = t2.reset_index(drop=True)

    calDB = ds.calDB
    query = db.Query()
    table = calDB.table("cal_pass1")
    vals = table.all()
    df_cal = pd.DataFrame(vals) # <<---- omg awesome
    df_cal = df_cal.loc[df_cal.ds==ds_lo]
    p1cal = df_cal.iloc[0]["p1cal"]
    cal = p1cal * np.asarray(t2["e_ftp"])

    current = "current_max"
    e_over_unc = cal / np.asarray(t2["e_ftp"])
    y0 = np.asarray(t2[current])
    a_over_e = y0 * e_over_unc / cal

    y = linear_correction(cal, a_over_e)

    double_gauss_issue(cal, y, ds_lo)
    exit()


    dep_range = [1530,1620]
    hist, bins = np.histogram(cal, bins=450, range=dep_range)
    hist = hist * 5

    def gauss(x, *params):
        y = np.zeros_like(x)
        for i in range(0, len(params) - 1, 3):
            x0 = params[i]
            a = params[i + 1]
            sigma = params[i + 2]
            y += a * np.exp(-(x - x0)**2 / (2 * sigma**2))
        y = y + params[-1]
        return y

    p0_list = [1591, 200, 3, 4]

    par, pcov = curve_fit(
        gauss, bins[1:], hist, p0=p0_list)
    print(par)
    perr = np.sqrt(np.diag(pcov))
    print(perr)

    mu, amp, sig, bkg = par[0], par[1], par[2], par[-1]
    print("Scanning ", mu, " peak")
    ans = quad(gauss, 1583, 1600, args=(mu, amp, sig, bkg))
    counts = ans[0] - ((1600-1583)*bkg)
    print("Counts in ", mu, " peak is ", counts)

    cut = counts
    line = .4

    y1 = y[np.where(line < y)]
    x1 = cal[np.where(line < y)]
    # hist1, bins1 = np.histogram(x1, bins=500, range=[1500,1700])
    hist1, bins1 = np.histogram(x1, bins=450, range=[1530,1620])
    hist1 = hist1*5

    print("Finding optimal cut, keeping 90% of 1592 DEP")
    while cut > .9 * counts:

        y1 = y[np.where(line < y)]
        x1 = cal[np.where(line < y)]

        hist1, bins1 = np.histogram(x1, bins=450, range=dep_range)
        hist1 = hist1*5

        par1, pcov1 = curve_fit(
            gauss, bins1[1:], hist1, p0=p0_list)
        perr1 = np.sqrt(np.diag(pcov1))

        mu1, amp1, sig1, bkg1 = par1[0], par1[1], par1[2], par1[-1]
        ans1 = quad(gauss, 1583, 1600, args=(mu1, amp1, sig1, bkg1))
        cut = ans1[0] - ((1600-1583)*bkg1)

        line += .0005


    print(line, cut)
    plt.hist2d(cal, y, bins=[1000,200], range=[[0, 2000], [0, 2]], norm=LogNorm(), cmap='jet')
    plt.hlines(line, 0, 2000, color='r', linewidth=1.5)
    cbar = plt.colorbar()
    plt.title("Dataset {}".format(ds_lo))
    plt.xlabel("Energy (keV)", ha='right', x=1)
    plt.ylabel("A/Eunc", ha='right', y=1)
    cbar.ax.set_ylabel('Counts')
    plt.tight_layout()
    plt.show()

    hist, bins = np.histogram(cal, bins=2000, range=[0,2000])
    hist1, bins1 = np.histogram(x1, bins=2000, range=[0,2000])

    plt.clf()
    plt.semilogy(bins[1:], hist, color='black', ls="steps", linewidth=1.5, label='Calibrated Energy: Dataset {}'.format(ds_lo))
    plt.semilogy(bins1[1:], hist1, '-r', ls="steps", linewidth=1.5, label='AvsE Cut: Dataset {}'.format(ds_lo))
    plt.ylabel('Counts')
    plt.xlabel('keV')
    plt.legend()
    plt.tight_layout()
    plt.show()

    if write_db:
        table = calDB.table("A/E_cut")
        for dset in ds.ds_list:
            row = {"ds":dset, "line":line}
            table.upsert(row, query.ds == dset)
Esempio n. 28
0
 def cacheListAll(self, profile_id):
     with self._lock:
         return self._db.table('ObjectCache').search(
             (tinydb.Query()['profile_id'] == profile_id))
Esempio n. 29
0
def scrapeData():
    from ROOT import TChain

    ds, mod = 1, 1
    print("Loading cut data ...")
    bkg = dsi.BkgInfo()
    cal = dsi.CalInfo()
    nSub = bkg.dsMap()[ds]
    chList = dsi.GetGoodChanList(ds)
    calDB = db.TinyDB('../calDB.json')
    pars = db.Query()

    fsD, rnSD, rnCD = {}, {}, {}
    nCal = cal.GetNCalIdxs(ds, mod)
    for iC in range(nCal + 1):
        fsD[iC] = dsi.getDBRecord("fitSlo_ds%d_idx%d_m%d_Peak" % (ds, iC, mod),
                                  False, calDB, pars)
        rnSD[iC] = dsi.getDBRecord(
            "riseNoise_ds%d_idx%d_m%d_SoftPlus" % (ds, iC, mod), False, calDB,
            pars)
        rnCD[iC] = dsi.getDBRecord(
            "riseNoise_ds%d_idx%d_m%d_Continuum" % (ds, iC, mod), False, calDB,
            pars)
    thD = {}
    for iB in range(nSub + 1):
        thD[iB] = dsi.getDBRecord("thresh_ds%d_bkgidx%d" % (ds, iB), False,
                                  calDB, pars)

    print("Looping over sub-ranges ...")
    hitE, fSlo, chans, runs, fsCut, thMu, thSig = [], [], [], [], [], [], []
    rise, riseCut = [], []
    for sub in range(nSub + 1):
        tc = TChain("skimTree")
        fRaw = sorted(
            glob.glob("%s/latSkimDS%d_%d_*.root" % (dsi.latDir, ds, sub)))
        for f in fRaw:
            tc.Add(f)

        print("%d/%d %d" % (sub, nSub, tc.GetEntries()))

        # for some reason range 44 in ds1 is corrupted?
        n = tc.Draw("trapENFCal:fitSlo:channel:run", "", "goff")
        if n == 0:
            print("skipped", sub)
            continue

        # n = tc.Draw("run:channel:trapENFCal:fitSlo","","goff")
        # t1, t2, t3, t4 = tc.GetV1(), tc.GetV2(), tc.GetV3(), tc.GetV4()

        tn = ["run", "channel", "trapENFCal", "fitSlo", "riseNoise"]
        vals = wl.GetVX(tc, tn, "")
        t1, t2, t3, t4, t5 = vals["run"], vals["channel"], vals[
            "trapENFCal"], vals["fitSlo"], vals["riseNoise"]
        n = len(t1)

        pRun = -1
        for i in range(n):
            run = int(t1[i])
            if run != pRun:
                cIdx = cal.GetCalIdx("ds%d_m%d" % (ds, mod), run)
                bIdx = bkg.GetBkgIdx(ds, run)
                tmpFS = fsD[cIdx]
                tmpTH = thD[bIdx]
                tmpRNC = rnCD[cIdx]
                tmpRNS = rnSD[cIdx]
                fsCutChan = list(tmpFS.keys())
                thCutChan = list(tmpTH.keys())
                rnCutChan = list(tmpRNC.keys())
            pRun = run

            chan = int(t2[i])
            if chan not in chList: continue
            if chan not in fsCutChan: continue
            if chan not in thCutChan: continue
            if chan not in rnCutChan: continue

            fsVal = tmpFS[chan][2]
            thVal = tmpTH[chan][0] + 3 * tmpTH[chan][1]

            a = max(tmpRNS[chan][0], tmpRNC[chan][4])
            b = tmpRNS[chan][1]
            c = tmpRNS[chan][2]
            d = tmpRNS[chan][3]
            if d == 0: continue
            rnVal = a + b * np.log(1 + np.exp(t3[i] - c / d))

            hitE.append(t3[i])
            fSlo.append(t4[i])
            runs.append(run)
            chans.append(chan)
            fsCut.append(tmpFS[chan][2])
            thMu.append(tmpTH[chan][0])
            thSig.append(tmpTH[chan][1])
            rise.append(t5[i])
            riseCut.append(rnVal)

    # # fCut = sorted(glob.glob("%s/results_v1/fs/fitSlo-DS%d-*.root" % (dsi.dataDir, dsNum)))

    print(len(hitE))
    np.savez("../data/sea-plt-2.npz", hitE, fSlo, chans, runs, fsCut, thMu,
             thSig, rise, riseCut)
Esempio n. 30
0
 def scriptDelete(self, script_id):
     with self._lock:
         return self._db.table('scripts').remove(
             (tinydb.Query()['script_id'] == script_id))