Ejemplo n.º 1
0
    def __init__(self,
                 process,
                 nEvents,
                 config,
                 xsec_cache='xsec_DBv2.db',
                 reweight=False):

        self.process = process
        self.config = config
        self.processCardDir = os.path.join(self.config.data_path,
                                           'processCards')
        self.processCardFile = process + '.dat'
        self.reweight = reweight

        # template process card file
        self.templateProcessCard = os.path.join(self.processCardDir,
                                                self.processCardFile)
        # temporary process card output
        self.tmpProcessCard = os.path.join(self.config.uniquePath,
                                           self.processCardFile)

        # temporary process output directory
        self.processTmpDir = os.path.join(self.config.uniquePath, 'processtmp')
        self.nEvents = nEvents

        self.GP_outputDir = os.path.join(results_directory, 'gridpacks')

        # xsec cache location
        columns = ["process", "nEvents"] + self.config.all_model_couplings
        self.xsecDB = resultsDB(os.path.join(results_directory, xsec_cache),
                                self.config.model_name, columns)
Ejemplo n.º 2
0
def getEstimate(sample, region, channel, overwrite=False):
    ''' to be extended '''
    res = resultsDB(results_directory + "resultsCache_v2.db", "TopEFT",
                    columns)
    key = {
        "process": sample.name,
        "channel": channel,
        "region": region.cutString(),
        "lumi": lumi,
        "presel": presel,
        "weightString": weightString
    }
    if res.contains(key) and not overwrite:
        print "Found estimate for %s in region %s" % (sample.name, region)
        return res.get(key)
    else:
        print "Adding estimate for %s in region %s" % (sample.name, region)
        y = u_float(
            sample.getYieldFromDraw("&&".join([presel,
                                               region.cutString()]),
                                    weightString))
        res.add(key, y, overwrite=True)
        return y
Ejemplo n.º 3
0
else:
    variations = ["abs(LHEweight_wgt[%s])" % (options.selectWeight)]

results = {}

scale_systematics = {}

cacheDir = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/PDF_v2_%s/" % (
    PDFset)

estimate = MCBasedEstimate(name=sample.name, sample=sample)
estimate.initCache(cacheDir)

## Results DB for scale and PDF uncertainties

PDF_cache = resultsDB(cacheDir + sample.name + '_unc.sq', "PDF",
                      ["region", "channel", "PDFset"])
scale_cache = resultsDB(cacheDir + sample.name + '_unc.sq', "scale",
                        ["region", "channel", "PDFset"])
PS_cache = resultsDB(cacheDir + sample.name + '_unc.sq', "PSscale",
                     ["region", "channel", "PDFset"])
'''
Recommendation from arxiv:1510.03865
for MC sets sort the obtained values e.g. in a list, then calculate
delta(PDF)sigma = (sigma[84] - sigma[16])/2
which gives the 68% CL
'''


def wrapper(args):
    r, c, setup = args
    res = estimate.cachedEstimate(r,
Ejemplo n.º 4
0
import os
import argparse
from RootTools.core.Sample import Sample


from TopEFT.Tools.resultsDB     import resultsDB
from TopEFT.Tools.user          import combineReleaseLocation, analysis_results, results_directory, plot_directory

from TopEFT.Analysis.Setup      import Setup

setup = Setup()

cacheFileName = os.path.join(plot_directory, setup.resultsFile)
columns = ['signal', 'exp', 'obs', 'exp1up', 'exp1down', 'exp2up', 'exp2down', 'NLL_prefit', 'dNLL_postfit_r1', 'dNLL_bestfit']

res = resultsDB(cacheFileName, "limits", columns)

def getResult(sample):
    ''' to be extended '''
    key = {"signal":sample.name}
    if res.contains(key):
        return res.getDicts(key)[0]

def addResult(sample, key, value, overwrite):
    print "Adding result for %s to database %s"%(sample.name, cacheFileName)
    key.update({"signal":sample.name})
    if overwrite:
        res.removeObjects({"signal":sample.name})
    res.add(key, value, overwrite=True)

Ejemplo n.º 5
0
def wrapper(s):
    
    logger.info("Now working on %s", s.name)
    xSecScale = 1
    c = cardFileWriter.cardFileWriter()
    c.releaseLocation = combineReleaseLocation

    for coup in nonZeroCouplings:
        try:
            modification_dict[coup] = getCouplingFromName(s.name, coup)
            logger.info("The following coupling is set to non-zero value: %s: %s", coup, modification_dict[coup])
        except ValueError:
            logger.info("The following coupling is kept at zero: %s: %s", coup, modification_dict[coup])
            continue
    try:
        p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB)
        xsec = p.xsecDB.get(modification_dict)
    except IndexError:
        logger.info("Looking into backup DB for x-sec")
        p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB_Backup)
        xsec = p.xsecDB.get(modification_dict)
    if not xsec:
        try:
            p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB_Backup)
            xsec = p.xsecDB.get(modification_dict)
        except IndexError:
            logger.info("No x-sec found.")
    logger.info("Found modified x-sec of %s", xsec)
    
    cardFileName = os.path.join(limitDir, s.name+'.txt')
    if not os.path.exists(cardFileName) or overWrite:
        counter=0
        c.reset()
        c.setPrecision(3)
        postfix = '_%s'%args.year
        c.addUncertainty('PU',                  'lnN') # correlated
        c.addUncertainty('JEC'+postfix,         'lnN') # uncorrelated, for now!
        c.addUncertainty('btag_heavy'+postfix,  'lnN') # uncorrelated, wait for offical recommendation
        c.addUncertainty('btag_light'+postfix,  'lnN') # uncorrelated, wait for offical recommendation
        c.addUncertainty('trigger'+postfix,     'lnN') # uncorrelated, statistics dominated
        c.addUncertainty('leptonSF',            'lnN') # correlated
        c.addUncertainty('scale',               'lnN') # correlated.
        c.addUncertainty('scale_sig',           'lnN') # correlated.
        c.addUncertainty('PDF',                 'lnN') # correlated.
        c.addUncertainty('PartonShower',        'lnN') # correlated.
        c.addUncertainty('nonprompt',           'lnN') # correlated?!
        c.addUncertainty('WZ_xsec',             'lnN') # correlated.
        c.addUncertainty('WZ_bb',               'lnN') # correlated
        c.addUncertainty('WZ_powheg',           'lnN') # correlated
        c.addUncertainty('ZZ_xsec',             'lnN') # correlated.
        c.addUncertainty('ZG_xsec',             'lnN') # correlated.
        c.addUncertainty('rare',                'lnN') # correlated.
        c.addUncertainty('ttX',                 'lnN') # correlated.
        c.addUncertainty('Lumi'+postfix, 'lnN')

        uncList = ['PU', 'JEC', 'btag_heavy', 'btag_light', 'leptonSF', 'trigger']
        for unc in uncList:
            uncertainties[unc] = []
        
        ## use rate parameters??
        #c.addRateParameter('WZ', 1, '[0,2]')
        #c.addRateParameter('ZZ', 1, '[0,2]')

        for setupPair in setups:
            
            # extract the nominal and nonprompt setup from the pair
            setup, setupNP = setupPair
            
            signal      = MCBasedEstimate(name="TTZ", sample=setup.samples["TTZ"], cacheDir=setup.defaultCacheDir())
            #nonprompt   = FakeEstimate(name="nonPromptDD", sample=setup.samples["Data"], setup=setupNP, cacheDir=setup.defaultCacheDir())
            if args.unblind or (setup == setup3l_CR) or (setup == setup4l_CR):
                observation = DataObservation(name="Data", sample=setup.samples["Data"], cacheDir=setup.defaultCacheDir())
                logger.info("Using data!")
            else:
                observation = MCBasedEstimate(name="observation", sample=setup.samples["pseudoData"], cacheDir=setup.defaultCacheDir())
                logger.info("Using pseudo-data!")
            for e in setup.estimators: e.initCache(setup.defaultCacheDir())

            for r in setup.regions:
                totalBackground = u_float(0)
                for channel in setup.channels:
                    niceName = ' '.join([channel.name, r.__str__()])
                    binname = 'Bin'+str(counter)
                    logger.info("Working on %s", binname)
                    counter += 1
                    c.addBin(binname, [e.name.split('-')[0] for e in setup.estimators]+["nonPromptDD"], niceName)
                    #c.addBin(binname, 'nonPromptDD', niceName)

                    for e in setup.estimators:
                        name = e.name.split('-')[0]
                        if name.count('WZ'):
                            logger.info("Using reweighting to powheg for WZ sample")
                            wzReweighting = WZReweighting( cacheDir = reweightCacheWZ )
                            f = wzReweighting.cachedReweightingFunc( setup.WZselection )
                            powhegExpected = e.reweight1D(r, channel, setup, f)
                            expected = e.cachedEstimate(r, channel, setup)
                            print expected
                            WZ_powheg_unc = (powhegExpected-expected)/expected
                        else:
                            expected = e.cachedEstimate(r, channel, setup)
                        logger.info("Adding expectation %s for process %s", expected.val, name)
                        c.specifyExpectation(binname, name, expected.val if expected.val > 0.01 else 0.01)

                        totalBackground += expected

                        if not args.statOnly:
                            # uncertainties
                            pu          = 1 + e.PUSystematic( r, channel, setup).val            if expected.val>0.01 else 1.1
                            jec         = 1 + e.JECSystematic( r, channel, setup).val           if expected.val>0.01 else 1.1
                            btag_heavy  = 1 + e.btaggingSFbSystematic(r, channel, setup).val    if expected.val>0.01 else 1.1
                            btag_light  = 1 + e.btaggingSFlSystematic(r, channel, setup).val    if expected.val>0.01 else 1.1
                            trigger     = 1 + e.triggerSystematic(r, channel, setup).val        if expected.val>0.01 else 1.1
                            leptonSF    = 1 + e.leptonSFSystematic(r, channel, setup).val       if expected.val>0.01 else 1.1
                            if name.count('WZ'):
                                WZ_powheg   = 1 + WZ_powheg_unc.val                                 if expected.val>0.01 else 1.1

                            c.specifyUncertainty('PU',          binname, name, 1 + e.PUSystematic( r, channel, setup).val)
                            if not name.count('nonprompt'):
                                c.specifyUncertainty('JEC'+postfix,         binname, name, jec)
                                c.specifyUncertainty('btag_heavy'+postfix,  binname, name, btag_heavy)
                                c.specifyUncertainty('btag_light'+postfix,  binname, name, btag_light)
                                c.specifyUncertainty('trigger'+postfix,     binname, name, trigger)
                                c.specifyUncertainty('leptonSF',    binname, name, leptonSF)
                                c.specifyUncertainty('scale',       binname, name, 1.01) 
                                c.specifyUncertainty('PDF',         binname, name, 1.01)
                                c.specifyUncertainty('Lumi'+postfix, binname, name, 1.025 )

                            if name.count('ZZ'):    c.specifyUncertainty('ZZ_xsec',     binname, name, 1.10)
                            if name.count('ZG'):    c.specifyUncertainty('ZG_xsec',     binname, name, 1.20)
                            if name.count('WZ'):
                                c.specifyUncertainty('WZ_xsec',     binname, name, 1.10)
                                if setup == setup3l:
                                    c.specifyUncertainty('WZ_bb',     binname, name, 1.08)
                                c.specifyUncertainty('WZ_powheg',     binname, name, WZ_powheg)
                            
                            if name.count('nonprompt'):    c.specifyUncertainty('nonprompt',   binname, name, 1.30)
                            if name.count('rare'):    c.specifyUncertainty('rare',        binname, name, 1.50)
                            if name.count('TTX'):     c.specifyUncertainty('ttX',         binname, name, 1.11)


                        #MC bkg stat (some condition to neglect the smaller ones?)
                        uname = 'Stat_'+binname+'_'+name+postfix
                        c.addUncertainty(uname, 'lnN')
                        if expected.val > 0:
                            c.specifyUncertainty(uname, binname, name, 1 + expected.sigma/expected.val )
                        else:
                            c.specifyUncertainty(uname, binname, name, 1.01 )
                    
                    uname = 'Stat_'+binname+'_nonprompt'+postfix
                    c.addUncertainty(uname, 'lnN')
                    
                    if setup.nLeptons == 3 and setupNP:
                        nonprompt   = FakeEstimate(name="nonPromptDD", sample=setup.samples["Data"], setup=setupNP, cacheDir=setup.defaultCacheDir())
                        np = nonprompt.cachedEstimate(r, channel, setupNP)
                        if np.val < 0.01:
                            np = u_float(0.01,0.)
                        c.specifyExpectation(binname, 'nonPromptDD', np.val ) 
                        c.specifyUncertainty(uname,   binname, "nonPromptDD", 1 + np.sigma/np.val )
                        c.specifyUncertainty('nonprompt',   binname, "nonPromptDD", 1.30)
                    else:
                        np = u_float(0)
                        c.specifyExpectation(binname, 'nonPromptDD', np.val)
                    
                    if args.expected:
                        sig = signal.cachedEstimate(r, channel, setup)
                        obs = totalBackground + sig + np
                    elif args.unblind or (setup == setup3l_CR) or (setup == setup4l_CR):
                        obs = observation.cachedObservation(r, channel, setup)
                    else:
                        obs = observation.cachedEstimate(r, channel, setup)
                    c.specifyObservation(binname, int(round(obs.val,0)) )


                    if args.useShape:
                        logger.info("Using 2D reweighting method for shapes")
                        if args.model == "dim6top_LO":
                            source_gen = dim6top_central
                        elif args.model == "ewkDM":
                            source_gen = ewkDM_central

                        signalReweighting = SignalReweighting( source_sample = source_gen, target_sample = s, cacheDir = reweightCache)
                        f = signalReweighting.cachedReweightingFunc( setup.genSelection )
                        sig = signal.reweight2D(r, channel, setup, f)
                    else:
                        sig = signal.cachedEstimate(r, channel, setup)

                    xSecMod = 1
                    if args.useXSec:
                        xSecMod = xsec.val/xsec_central.val
                    
                    logger.info("x-sec is multiplied by %s",xSecMod)
                    
                    c.specifyExpectation(binname, 'signal', sig.val * xSecScale * xSecMod )
                    logger.info('Adding signal %s'%(sig.val * xSecScale * xSecMod))
                    
                    if sig.val>0:
                        c.specifyUncertainty('Lumi'+postfix, binname, 'signal', 1.025 )
                        if not args.statOnly:
                            # uncertainties
                            pu          = 1 + e.PUSystematic( r, channel, setup).val
                            jec         = 1 + e.JECSystematic( r, channel, setup).val
                            btag_heavy  = 1 + e.btaggingSFbSystematic(r, channel, setup).val
                            btag_light  = 1 + e.btaggingSFlSystematic(r, channel, setup).val
                            trigger     = 1 + e.triggerSystematic(r, channel, setup).val
                            leptonSF    = 1 + e.leptonSFSystematic(r, channel, setup).val

                            if sig.sigma/sig.val < 0.05:
                                uncertainties['PU']         += [pu]
                                uncertainties['JEC']        += [jec]
                                uncertainties['btag_heavy'] += [btag_heavy]
                                uncertainties['btag_light'] += [btag_light]
                                uncertainties['trigger']    += [trigger]
                                uncertainties['leptonSF']   += [leptonSF]

                            c.specifyUncertainty('PU',                  binname, "signal", pu)
                            c.specifyUncertainty('JEC'+postfix,         binname, "signal", jec)
                            c.specifyUncertainty('btag_heavy'+postfix,  binname, "signal", btag_heavy)
                            c.specifyUncertainty('btag_light'+postfix,  binname, "signal", btag_light)
                            c.specifyUncertainty('trigger'+postfix,     binname, "signal", trigger)
                            c.specifyUncertainty('leptonSF',            binname, "signal", leptonSF)
                            # This doesn't get the right uncertainty in CRs. However, signal doesn't matter there anyway.
                            if setup in [setup3l, setup4l]:
                                c.specifyUncertainty('scale_sig',   binname, "signal", 1 + scale_cache.get({"region":r, "channel":channel.name, "PDFset":"scale"}).val)
                                c.specifyUncertainty('PDF',         binname, "signal", 1 + PDF_cache.get({"region":r, "channel":channel.name, "PDFset":PDFset}).val)
                                c.specifyUncertainty('PartonShower',binname, "signal", PS_cache.get({"region":r, "channel":channel.name, "PDFset":"PSscale"}).val) #something wrong here?
                            #c.specifyUncertainty('scale_sig',   binname, "signal", 1.05) #1.30
                            #c.specifyUncertainty('PDF',         binname, "signal", 1.04) #1.15

                        uname = 'Stat_'+binname+'_signal'+postfix
                        c.addUncertainty(uname, 'lnN')
                        c.specifyUncertainty(uname, binname, 'signal', 1 + sig.sigma/sig.val )
                    else:
                        uname = 'Stat_'+binname+'_signal'+postfix
                        c.addUncertainty(uname, 'lnN')
                        c.specifyUncertainty(uname, binname, 'signal', 1 )

                    
        #c.addUncertainty('Lumi'+postfix, 'lnN')
        #c.specifyFlatUncertainty('Lumi'+postfix, 1.026)
        cardFileName = c.writeToFile(cardFileName)
    else:
        logger.info("File %s found. Reusing.",cardFileName)
    
    res = {}
    
    if not os.path.isdir(limitDir):
        os.makedirs(limitDir)
    resDB = resultsDB(limitDir+'/results.sq', "results", setup.resultsColumns)
    res = {"signal":s.name}
    if not overWrite and res.DB.contains(key):
        res = resDB.getDicts(key)[0]
        logger.info("Found result for %s, reusing", s.name)
    else:
        # We don't calculate limits here, but just in case we find a way how to do it, put placeholders here
        res.update({"exp":0, "obs":0, "exp1up":0, "exp2up":0, "exp1down":0, "exp2down":0})
        # Don't extract all the nuisances by default
        signalRegions = range(15,30) ## shouldn't be hardcoded
        masks = ['mask_ch1_Bin'+str(i)+'=1' for i in signalRegions]
        masks = ','.join(masks)

        if args.calcNuisances:
            c.calcNuisances(cardFileName, masks=masks)
        # extract the NLL
        #nll = c.calcNLL(cardFileName, options="")
        nll = c.physicsModel(cardFileName, options="", normList=["WZ_norm","ZZ_norm"], masks=masks) # fastScan turns of profiling
        if nll["nll0"] > 0:
            res.update({"dNLL_postfit_r1":nll["nll"], "dNLL_bestfit":nll["bestfit"], "NLL_prefit":nll["nll0"]})
        else:
            res.update({"dNLL_postfit_r1":-999, "dNLL_bestfit":-999, "NLL_prefit":-999})
            logger.info("Fits failed, adding values -999 as results")
        logger.info("Adding results to database")
        resDB.add(res, nll['nll_abs'], overwrite=True)

    print
    print "NLL results:"
    print "{:>15}{:>15}{:>15}".format("Pre-fit", "Post-fit r=1", "Best fit")
    print "{:15.2f}{:15.2f}{:15.2f}".format(float(res["NLL_prefit"]), float(res["NLL_prefit"])+float(res["dNLL_postfit_r1"]), float(res["NLL_prefit"])+float(res["dNLL_bestfit"]))
    
    print 'PU', min(uncertainties['PU']), max(uncertainties['PU'])
    print 'JEC', min(uncertainties['JEC']), max(uncertainties['JEC'])
    print 'btag_heavy', min(uncertainties['btag_heavy']), max(uncertainties['btag_heavy'])
    print 'btag_light', min(uncertainties['btag_light']), max(uncertainties['btag_light'])
    print 'trigger', min(uncertainties['trigger']), max(uncertainties['trigger'])
    print 'leptonSF', min(uncertainties['leptonSF']), max(uncertainties['leptonSF'])
Ejemplo n.º 6
0
 def initCache(self, cacheDir):
     self.cache = resultsDB(
         os.path.join(cacheDir, 'WZReweightingTemplate.sql'),
         "signalWeights", ["selection", "weight", "source", "target"])
Ejemplo n.º 7
0
cacheDir = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/FEBug/"

#TZQToLL_nom#WZTo3LNu_fxfx_nom#TTZToLLNuNu_amc_nom
sample_nom = WZTo3LNu_fxfx_nom
#TZQToLL_FEBug#WZTo3LNu_fxfx_FEBug#TTZToLLNuNu_amc_FEBug
sample_FEBug = WZTo3LNu_fxfx_FEBug

estimate_nom = MCBasedEstimate(name=sample_nom.name, sample=sample_nom)
estimate_nom.initCache(cacheDir)

estimate_FEBug = MCBasedEstimate(name=sample_FEBug.name, sample=sample_FEBug)
estimate_FEBug.initCache(cacheDir)

## Results DB for scale and PDF uncertainties

FE_cache = resultsDB(cacheDir + sample_nom.name + '_unc.sq', "PDF",
                     ["region", "channel"])


def wrapper(args):
    e, r, c, setup = args
    res = e.cachedEstimate(r, c, setup, save=True, overwrite=options.overwrite)
    return (e.uniqueKey(r, c, setup), res)


jobs = []

# remove all so to avoid unnecessary concurrency. All will be calculated as sum of the individual channels later
seperateChannels3l = allTrilepChannels
allTrilepChannelNames = [c.name for c in allTrilepChannels]
seperateChannels3l.pop(allTrilepChannelNames.index('all'))
Ejemplo n.º 8
0
key = {"channel":"all", "lumi":lumi, "presel":presel, "weightString":weightString}

r0 = regionsA[0].cutString()
r1 = regionsA[1].cutString()
r2 = regionsA[2].cutString()

#ttZ     = Sample("ttZ", "Events", ["dummy.root"])

def fillResults(process, values, key):
    tmp = copy.deepcopy(key)
    for i in range(3):
        r = regionsA[i].cutString()
        k = {"channel":"all", "lumi":lumi, "presel":presel, "weightString":weightString, "process":process, "region":r}
        resultsCache.add(k, values[i], overwrite=True)

resultsCache = resultsDB( os.path.join(results_directory, "resultsCache.db"), "TopEFT", columns )

values = [u(102.2091, 102.2091*0.0075), u(31.6185, 31.6185*0.0143), u(5.3084, 5.3084*0.0352)]
fillResults("ttZ", values, key)

values = [u(30.8514, 30.8514*0.1178), u(2.4059, 2.4059*0.4277), u(0.1, 0.1*0.7393)]
fillResults("fake", values, key)

values = [u(23.1666, 23.1666*0.0438), u(7.4121, 7.4121*0.0758), u(1.8730, 1.8730*0.1537)]
fillResults("WZ", values, key)

values = [u(2.4804, 2.4804*0.0511), u(0.1074, 0.1074*0.2437), u(0.0020,0.0020*2.0613)]
fillResults("ttH", values, key)

values = [u(3.9295, 3.9295*0.0228), u(0.2330, 0.2330*0.0935), u(0.0105, 0.0105*0.4897)]
fillResults("ttW", values, key)
Ejemplo n.º 9
0
#cardDir += "_lowUnc%s"%exp
cardDir += "_lowUnc%s_SRandCR" % exp

#regionsE_COMBINED_xsec_shape_lowUnc_expected_SRandCR
#regionsE_2017_xsec_shape_lowUnc_expected_SRandCR

#/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/regionsE_2016_xsec_shape_lowUnc_WZreweight_SRandCR/

plane = "currents" if args.parameter in ["DC1V", "DC1A", "cpt", "cpQM"
                                         ] else "dipoles"

limitDir = os.path.join(baseDir, 'cardFiles', cardDir, subDir,
                        '_'.join([args.model, plane]))

print limitDir
resDB = resultsDB(limitDir + '/results.sq', "results", setup.resultsColumns)

fitKey = "dNLL_postfit_r1" if not args.useBestFit else "dNLL_bestfit"

# get the absolute post fit NLL value of pure ttZ
if args.model == "ewkDM":
    ttZ_res = resDB.getDicts({"signal": ewkDM_central.name})[-1]
elif args.model == "dim6top_LO":
    ttZ_res = resDB.getDicts({"signal": dim6top_central.name})[-1]

if args.prefit:
    ttZ_NLL_abs = float(ttZ_res["NLL_prefit"])
else:
    ttZ_NLL_abs = float(ttZ_res["NLL_prefit"]) + float(ttZ_res[fitKey])

print "Max Likelihood ttZ SM"
Ejemplo n.º 10
0
def wrapper(s):

    logger.info("Now working on %s", s.name)

    c = cardFileWriter.cardFileWriter()
    c.releaseLocation = combineReleaseLocation
    cards = {}
    
    # get the seperated cards
    for year in [2016,2017]:
        
        subDir = ''
        cardDir = "regionsE_%s"%(year)
        if args.useXSec: cardDir += "_xsec"
        if args.useShape: cardDir += "_shape"
        exp = "_expected" if args.expected else ''
        cardDir += "_lowUnc%s"%exp
        if args.includeCR: cardDir += "_SRandCR"


        baseDir = os.path.join(analysis_results)
        limitDir    = os.path.join(baseDir, 'cardFiles', cardDir, subDir, '_'.join([args.model, args.signal]))
        #resDB = resultsDB(limitDir+'/results.sq', "results", setup.resultsColumns)
        
        cardFileName = os.path.join(limitDir, s.name+'.txt')

        if not os.path.isfile(cardFileName):
            raise IOError("File %s doesn't exist!"%cardFileName)

        cards[year] = cardFileName
    
    limitDir = limitDir.replace('2017','COMBINED')
    
    # run combine and store results in sqlite database
    if not os.path.isdir(limitDir):
        os.makedirs(limitDir)
    resDB = resultsDB(limitDir+'/results.sq', "results", setup.resultsColumns)
    res = {"signal":s.name}

    overWrite = True

    if not overWrite and res.DB.contains(key):
        res = resDB.getDicts(key)[0]
        logger.info("Found result for %s, reusing", s.name)

    else:
        signalRegions = range(15,30)
        masks_2016 = ['mask_ch1_dc_2016_Bin'+str(i)+'=1' for i in signalRegions]
        masks_2017 = ['mask_ch1_dc_2017_Bin'+str(i)+'=1' for i in signalRegions]
    
        masks = ','.join(masks_2016+masks_2017)
        
        combinedCard = c.combineCards( cards )
        
        # We don't calculate limits here, but just in case we find a way how to do it, put placeholders here
        res.update({"exp":0, "obs":0, "exp1up":0, "exp2up":0, "exp1down":0, "exp2down":0})
        # Don't extract all the nuisances by default

        if args.calcNuisances:
            c.calcNuisances(combinedCard, masks=masks)

        #nll = c.physicsModel(combinedCard, options="", normList=["WZ_norm","ZZ_norm"], masks=masks) # fastScan turns of profiling
        nll = c.calcNLL(combinedCard)

        if nll["nll0"] > 0:
            res.update({"dNLL_postfit_r1":nll["nll"], "dNLL_bestfit":nll["bestfit"], "NLL_prefit":nll["nll0"]})
        else:
            res.update({"dNLL_postfit_r1":-999, "dNLL_bestfit":-999, "NLL_prefit":-999})
            logger.info("Fits failed, adding values -999 as results")
        logger.info("Adding results to database")
        resDB.add(res, nll['nll_abs'], overwrite=True)


    logger.info("Results stored in %s", limitDir)
Ejemplo n.º 11
0
 def initCache(self, cacheDir):
     self.cache = resultsDB(os.path.join(cacheDir, 'puProfiles_v2.sql'),
                            "puProfile", ["selection", "weight", "source"])
Ejemplo n.º 12
0
 def initCache(self, filename):
     self.filename = filename
     self.columns = ["region", "channel", "weights", "modification", "lumi"]
     self.DB = resultsDB(filename, "Cache", self.columns)