Esempio n. 1
0
    def _addShapeNuisances(self, nuisances, effects, opts, suffix, yields):
        # local copy
        shapeNu = OrderedDict()

        self._addWWShapeNuisances(shapeNu, effects)
        self._addInterfShapeNuisances(shapeNu, effects)
        self._addExperimentalShapeNuisances(shapeNu, effects, suffix, yields)

        if 'shapeFlags' not in opts:
            sys.exit(-1)
        flags = opts['shapeFlags']

        nus = set(shapeNu.keys())
        dummy = nus.copy()
        for exp,flag in flags:
            subset = set(fnmatch.filter(nus,exp))
            if flag:
                dummy |= subset
            else:
                dummy -= subset

        for eff in shapeNu:
            if eff not in dummy: continue
            if eff in nuisances: del nuisances[eff]
            nuisances[eff] = shapeNu[eff]
Esempio n. 2
0
    def _addShapeNuisances(self, nuisances, effects, opts, suffix, yields):
        # local copy
        shapeNu = OrderedDict()

        self._addWWShapeNuisances(shapeNu, effects)
        self._addInterfShapeNuisances(shapeNu, effects)
        self._addExperimentalShapeNuisances(shapeNu, effects, suffix, yields)

        if 'shapeFlags' not in opts:
            sys.exit(-1)
        flags = opts['shapeFlags']

        nus = set(shapeNu.keys())
        dummy = nus.copy()
        for exp,flag in flags:
            subset = set(fnmatch.filter(nus,exp))
            if flag:
                dummy |= subset
            else:
                dummy -= subset

        for eff in shapeNu:
            if eff not in dummy: continue
            if eff in nuisances: del nuisances[eff]
            nuisances[eff] = shapeNu[eff]
Esempio n. 3
0
    def __init__(self, ddPath, noWWddAbove, shape=True, isssactive=False, statmode='unified'):
        self._common       = OrderedDict()
        self._0jetOnly     = OrderedDict()
        self._1jetOnly     = OrderedDict()
        self._ddEstimates  = OrderedDict()
        self._shape        = shape
        self._isssactive   = isssactive
        # to options
        self.statShapeVeto = []
        self.expShapeVeto  = OrderedDict()
        self._statmode     = statmode

        # data driven reader and filter for the ww
        self._ddreader      = datadriven.DDCardReader(ddPath)
        self._wwddfilter    = datadriven.DDWWFilter(self._ddreader, noWWddAbove)
Esempio n. 4
0
    def _build(self, paths):
        jsonDir = self.tmpDir+'jsons/'
        lumiDir = self.tmpDir+'lumi/'
        os.system('mkdir -p '+self.tmpDir)
        os.system('mkdir -p '+jsonDir)
        os.system('mkdir -p '+lumiDir)

        (jBase,jExt) = os.path.splitext(self.json)
        self.paths = OrderedDict()
        for p in paths:
            runMin,runMax,hlt = self.pRegex.match(p).group(1,2,3)
            # patch
            json = jsonDir+jBase+'.'+runMin+'-'+runMax+'.'+hlt.replace('*','X')+'.json' if self.splitJson else self.json
            lumi = lumiDir+jBase+'.'+runMin+'-'+runMax+'.'+hlt.replace('*','X')+'.lumi' if self.splitJson else lumiDir+jBase+'.'+hlt.replace('*','X')+'.lumi'
            self.paths[hlt] = dict(
                [('runMin',runMin),
                 ('runMax',runMax),
                 ('json',json),
                 ('lumi',lumi)
                ])
    def _build(self, paths):
        jsonDir = self.tmpDir + "jsons/"
        lumiDir = self.tmpDir + "lumi/"
        os.system("mkdir -p " + self.tmpDir)
        os.system("mkdir -p " + jsonDir)
        os.system("mkdir -p " + lumiDir)

        (jBase, jExt) = os.path.splitext(self.json)
        self.paths = OrderedDict()
        for p in paths:
            runMin, runMax, hlt = self.pRegex.match(p).group(1, 2, 3)
            # patch
            json = (
                jsonDir + jBase + "." + runMin + "-" + runMax + "." + hlt.replace("*", "X") + ".json"
                if self.splitJson
                else self.json
            )
            lumi = (
                lumiDir + jBase + "." + runMin + "-" + runMax + "." + hlt.replace("*", "X") + ".lumi"
                if self.splitJson
                else lumiDir + jBase + "." + hlt.replace("*", "X") + ".lumi"
            )
            self.paths[hlt] = dict([("runMin", runMin), ("runMax", runMax), ("json", json), ("lumi", lumi)])
Esempio n. 6
0
class Scanner():
    def __init__(self, label, paths, json, tmp):
        self.label = label
        self.json  = json
        self.tmpDir = tmp
        self.pRegex = re.compile('(\d*)-(\d*):(.*)')
        self.splitJson = False
        self.force = False
        self._build(paths)

        fields = [' *(\d+) ',        # run number
                  ' *(\d+) ',        # ls
                  ' *([^ ]+) ',      # hlt
                  ' *([^ ]+) ',      # l1seed
                  ' *(\d+) ',        # hlt prescale
                  ' *(\d+) ',        # l1 prescale
                  ' *([0-9\.]+) ',    # recorded
                  ' *([0-9\.]+) ',    # effective
                 ]

        self.lRegex = re.compile('\|'+'\|'.join(fields)+'\|')


    def _build(self, paths):
        jsonDir = self.tmpDir+'jsons/'
        lumiDir = self.tmpDir+'lumi/'
        os.system('mkdir -p '+self.tmpDir)
        os.system('mkdir -p '+jsonDir)
        os.system('mkdir -p '+lumiDir)

        (jBase,jExt) = os.path.splitext(self.json)
        self.paths = OrderedDict()
        for p in paths:
            runMin,runMax,hlt = self.pRegex.match(p).group(1,2,3)
            # patch
            json = jsonDir+jBase+'.'+runMin+'-'+runMax+'.'+hlt.replace('*','X')+'.json' if self.splitJson else self.json
            lumi = lumiDir+jBase+'.'+runMin+'-'+runMax+'.'+hlt.replace('*','X')+'.lumi' if self.splitJson else lumiDir+jBase+'.'+hlt.replace('*','X')+'.lumi'
            self.paths[hlt] = dict(
                [('runMin',runMin),
                 ('runMax',runMax),
                 ('json',json),
                 ('lumi',lumi)
                ])
        #     hltPathRange[hlt] = (runMin,runMax,json,lumi)
    
    def dload(self):
        self.start = time.time()
        for p,info in self.paths.iteritems():
            print p, self.paths[p]
            if not os.path.exists(info['lumi']) or self.force:
                cmd =  'lumiCalc2.py -i '+info['json']+' -hltpath "'+p+'" lumibyls >& '+info['lumi'] 
                d = LumiDownloader(cmd)
                d.start() 

        self._monitor()

    def __del__(self):
        pass

    def _monitor(self):
        while threading.activeCount() > 1:
            time.sleep(10.)
#             print threading.activeCount() 
            now = start = time.time()
            print '\rActive threads',threading.activeCount(),' - Elapsed time: ',(now-self.start),
            sys.stdout.flush()

    def analyze(self):
        for p, info in self.paths.iteritems():
            nLumiSections = 0
            lumiRecordedByRun = {}
            lumiPrescaledByRun = {}
    #         print '-'+hltName+'---------------------------------'
            allRuns = {}
            hltRunsPS = {}
            l1seedRunsPS = {}
            runMin = int(info['runMin'])
            runMax = int(info['runMax'])
            firstRun = 999999
            lastRun = 1

            lumiFile = open(info['lumi'])
            for line in lumiFile:
                m = self.lRegex.match(line)
                if m is None:
                #             print 'NoMatch',line
                    continue
                (run,ls,hlt,l1,psHlt,psL1,recL, effL) = m.group(1,2,3,4,5,6,7,8)
                nLumiSections += 1
                recordedLumi = float(recL)

                iRun = int(run)
                firstRun = min(firstRun,iRun)
                lastRun  = max(lastRun, iRun)
    #             if iRun < runMin or iRun > runMax:
    #                 raise RuntimeError('porcazzoccola')
                if iRun not in allRuns:
                    allRuns[iRun] = 1
                    lumiRecordedByRun[iRun] = recordedLumi
                else:
                    allRuns[iRun] += 1
                    lumiRecordedByRun[iRun] += recordedLumi

                # 1 no prescale
                # 0 completely masked
                if psHlt != '1':
                    if iRun not in hltRunsPS:
                        hltRunsPS[iRun] = 1
                    else:
                        hltRunsPS[iRun] += 1 

        #             print  (run,ls,hlt,l1,psHlt,psL1)
                if psL1 != '1':
                    if iRun not in l1seedRunsPS:
                        l1seedRunsPS[iRun] = 1
                    else:
                        l1seedRunsPS[iRun] += 1 
                
                # count the lost luminosity
                if psHlt != '1':# or psL1 != '1':
                    if iRun not in lumiPrescaledByRun:
                        lumiPrescaledByRun[iRun] = recordedLumi 
                    else:
                        lumiPrescaledByRun[iRun] += recordedLumi 

    #         print 'summary'
    #         print '\nRuns:\n','{'+', '.join([ str(r)+': '+str(allRuns[r]) for r in sorted(allRuns)])+'}'
    #         print '\nHLTPrescaled:\n','{'+', '.join([ str(r)+': '+str(hltRunsPS[r]) for r in sorted(hltRunsPS)])+'}'
    #         print '\nL1Prescaled:\n','{'+', '.join([ str(r)+': '+str(l1seedRunsPS[r]) for r in sorted(l1seedRunsPS)])+'}'


            if nLumiSections == 0:
                raise RuntimeError('No lumisections found for HLT path '+p)
            
            info['hltRunsPS'] = hltRunsPS
            info['l1seedRunsPS'] = l1seedRunsPS
            info['firstRun'] = firstRun 
            info['lastRun'] = lastRun
            info['nLumiSections'] = nLumiSections
            info['lumiRecordedByRun'] = lumiRecordedByRun
            info['lumiPrescaledByRun'] = lumiPrescaledByRun

    def details(self,label):
        print '*'*100
        print '* {0:<96} *'.format(label)
        print '*'*100
        for p, info in self.paths.iteritems():
            print 'Hlt Path:',p
            rangeMin = int(info['runMin'])
            rangeMax = int(info['runMax'])
            firstRun = info['firstRun']
            lastRun  = info['lastRun']
            print '   Range of existance: ({0},{1})'.format(firstRun,lastRun)
            print '   HWW active range: ({0},{1})'.format(rangeMin,rangeMax)
            hltPSRuns = info['hltRunsPS']
            l1PSRuns  = info['l1seedRunsPS']
            sortedHltRunsPS     = sorted(hltPSRuns)
            sortedL1seedRunsPS  = sorted(l1PSRuns)

            hltPSInRange = [ x for x in sortedHltRunsPS if (x >= rangeMin and x <= rangeMax)]
            l1PSInRange = [ x for x in sortedL1seedRunsPS if (x >= rangeMin and x <= rangeMax)]

            print '   Prescaled runs -'
            print '   Active range: [{0}][{1}]'.format(len(hltPSInRange),len(l1PSInRange))
            print '   + HLT [{0}]:'.format(len(hltPSInRange)),', '.join(['{0}[{1}]'.format(p,hltPSRuns[p]) for p in hltPSInRange])
            print '   + L1  [{0}]:'.format(len(l1PSInRange)),', '.join(['{0}[{1}]'.format(p,l1PSRuns[p]) for p in l1PSInRange])
            print '   Full range: [{0}][{1}]'.format(len(hltPSRuns),len(l1PSRuns))
            print '   + HLT [{0}]:'.format(len(hltPSRuns)),', '.join(['{0}[{1}]'.format(p,hltPSRuns[p]) for p in hltPSRuns])
            print '   + L1  [{0}]:'.format(len(l1PSRuns)),', '.join(['{0}[{1}]'.format(p,l1PSRuns[p]) for p in l1PSRuns])

            print '-'*100


    def summarize(self,label):
        ljust = 45
        print '\n-- Summary',label,'--------------------'
        print 'HLT path'.ljust(ljust),' runMin-runMax: [hltNrange|l1Nrange] (hlt1st,hlt2nd,hltlast,hltN|l11st,l12nd,l1last,l1N )'
        for p, info in self.paths.iteritems():
            runMin    = int(info['runMin'])
            runMax    = int(info['runMax'])
            hltRunsPS = info['hltRunsPS']
            l1seedPS  = info['l1seedRunsPS']
            sortedHltRunsPS     = sorted(hltRunsPS)
            sortedL1seedRunsPS  = sorted(l1seedPS)
            lumiRecordedByRun   = info['lumiRecordedByRun']
            lumiPrescaledByRun  = info['lumiPrescaledByRun']

        #         onePercent = info['nLumiSections']*0.01
        #         print onePercent

            hltPSDump    = [
                '%d[%d],' % (sortedHltRunsPS[0],hltRunsPS[sortedHltRunsPS[0]])  if sortedHltRunsPS else 'None',
                '%d[%d]...' % (sortedHltRunsPS[1],hltRunsPS[sortedHltRunsPS[1]])  if len(sortedHltRunsPS)>1 else 'None',
                '%d[%d]' % (sortedHltRunsPS[-1],hltRunsPS[sortedHltRunsPS[-1]])  if sortedHltRunsPS else 'None',
                str(len(sortedHltRunsPS)),
            ]
            l1seedPSDump = [
                '%d[%d],' % (sortedL1seedRunsPS[0],l1seedPS[sortedL1seedRunsPS[0]])  if sortedL1seedRunsPS else 'None',
                '%d[%d]...' % (sortedL1seedRunsPS[1],l1seedPS[sortedL1seedRunsPS[1]])  if len(sortedL1seedRunsPS)>1 else 'None',
                '%d[%d]' % (sortedL1seedRunsPS[-1],l1seedPS[sortedL1seedRunsPS[-1]])  if sortedL1seedRunsPS else 'None',
                str(len(sortedL1seedRunsPS)),
            ]

            hltPSInRange = [ x for x in sortedHltRunsPS if (x >= runMin and x <= runMax)]
            l1seedPSInRange = [ x for x in sortedL1seedRunsPS if (x >= runMin and x <= runMax)]
            
            print p.ljust(ljust),(str(runMin)+'-'+str(runMax)).ljust(13),
            print (': ['+str(len(hltPSInRange))+'|'+str(len(l1seedPSInRange))+']').ljust(10),
            print '('+(' '.join(hltPSDump)).ljust(20),'|',' '.join(l1seedPSDump),')'


        #         print 'xx',first,min(hltRunsPS)
        #         print 'xx',last,max(hltRunsPS)
        #         print last-first+1

    def makeplots(self,out):
        for p, info in self.paths.iteritems():
            runMin    = int(info['runMin'])
            runMax    = int(info['runMax'])
            hltRunsPS = info['hltRunsPS']
            l1seedPS  = info['l1seedRunsPS']
            sortedHltRunsPS     = sorted(hltRunsPS)
            sortedL1seedRunsPS  = sorted(l1seedPS)
            lumiRecordedByRun   = info['lumiRecordedByRun']
            lumiPrescaledByRun  = info['lumiPrescaledByRun']

            out.cd()
            first = info['firstRun']
            last = info['lastRun']
            
            name = p.replace('*','X')
            hHlt = ROOT.TH1F(name,p+';Run #R;# of prescaled LS',last-first+1, first, last+1)
            hHlt.SetBit(ROOT.TH1.kNoStats)
            xax = hHlt.GetXaxis()
            for run,nls in hltRunsPS.iteritems():
                hHlt.Fill(run,nls)
                xax.SetBinLabel(xax.FindBin(run),str(run))
            xax.SetBinLabel(1,str(first))
            xax.SetBinLabel(xax.GetNbins(),str(last))
            hHlt.Write()

            totLumi = 0
            preLumi = 0 
            hPrescaledLumiFraction = ROOT.TH1D(name+'_lumiFrac',p+' Prescaled/Recorded;Run #R;prescaled/recorded',last-first+1, first, last+1)
            hPrescaledLumiFraction.SetBit(ROOT.TH1.kNoStats)
            hPrescaledLumiFraction.SetLineColor(ROOT.kRed)
            hPrescaledLumiFraction.SetLineWidth(2)
            xax = hPrescaledLumiFraction.GetXaxis()

            ratio = 0.
            xMin = int(xax.GetXmin())
            xMax = int(xax.GetXmax())
            for iRun in xrange(xMin,xMax):
                run = int(iRun)
                if run not in lumiRecordedByRun:
                    hPrescaledLumiFraction.SetBinContent(xax.FindBin(run), ratio )
                    continue
                totLumi += lumiRecordedByRun[run]
                if run in lumiPrescaledByRun:
                    preLumi += lumiPrescaledByRun[run]
                    xax.SetBinLabel(xax.FindBin(run),str(run))
                ratio = preLumi/totLumi
                hPrescaledLumiFraction.SetBinContent(xax.FindBin(run), ratio )

            xax.SetBinLabel(1,str(first))
            xax.SetBinLabel(xax.GetNbins(),str(last))
            hPrescaledLumiFraction.Write()
Esempio n. 7
0
            hPrescaledLumiFraction.Write()





if __name__ == '__main__':
    motherJson='certifiedToScan.json'
    cwd = os.getcwd()
    tmpDir  = cwd+'/psdata/'


    allPaths = OrderedDict([
        ('singleMu',singleMuDataPaths),
#         ('singleEl',singleElDataPaths),
#         ('doubleEl',doubleElDataPaths),
#         ('doubleMu',doubleMuDataPaths),
#         ('muEg',muEGDataPaths),
    ])

    out = ROOT.TFile.Open('psPlots.root','recreate')
    for p,list in allPaths.iteritems():
        s = Scanner(p,list,motherJson,tmpDir)
        s.analyze()
        s.details(p)
        s.summarize(p)
        s.makeplots(out)
    out.Close()
    sys.exit(0)

class Scanner:
    def __init__(self, label, paths, json, tmp):
        self.label = label
        self.json = json
        self.tmpDir = tmp
        self.pRegex = re.compile("(\d*)-(\d*):(.*)")
        self.splitJson = False
        self.force = False
        self._build(paths)

        fields = [
            " *(\d+) ",  # run number
            " *(\d+) ",  # ls
            " *([^ ]+) ",  # hlt
            " *([^ ]+) ",  # l1seed
            " *(\d+) ",  # hlt prescale
            " *(\d+) ",  # l1 prescale
            " *([0-9\.]+) ",  # recorded
            " *([0-9\.]+) ",  # effective
        ]

        self.lRegex = re.compile("\|" + "\|".join(fields) + "\|")

    def _build(self, paths):
        jsonDir = self.tmpDir + "jsons/"
        lumiDir = self.tmpDir + "lumi/"
        os.system("mkdir -p " + self.tmpDir)
        os.system("mkdir -p " + jsonDir)
        os.system("mkdir -p " + lumiDir)

        (jBase, jExt) = os.path.splitext(self.json)
        self.paths = OrderedDict()
        for p in paths:
            runMin, runMax, hlt = self.pRegex.match(p).group(1, 2, 3)
            # patch
            json = (
                jsonDir + jBase + "." + runMin + "-" + runMax + "." + hlt.replace("*", "X") + ".json"
                if self.splitJson
                else self.json
            )
            lumi = (
                lumiDir + jBase + "." + runMin + "-" + runMax + "." + hlt.replace("*", "X") + ".lumi"
                if self.splitJson
                else lumiDir + jBase + "." + hlt.replace("*", "X") + ".lumi"
            )
            self.paths[hlt] = dict([("runMin", runMin), ("runMax", runMax), ("json", json), ("lumi", lumi)])
        #     hltPathRange[hlt] = (runMin,runMax,json,lumi)

    def dload(self):
        self.start = time.time()
        for p, info in self.paths.iteritems():
            print p, self.paths[p]
            if not os.path.exists(info["lumi"]) or self.force:
                cmd = "lumiCalc2.py -i " + info["json"] + ' -hltpath "' + p + '" lumibyls >& ' + info["lumi"]
                d = LumiDownloader(cmd)
                d.start()

        self._monitor()

    def __del__(self):
        pass

    def _monitor(self):
        while threading.activeCount() > 1:
            time.sleep(10.0)
            #             print threading.activeCount()
            now = start = time.time()
            print "\rActive threads", threading.activeCount(), " - Elapsed time: ", (now - self.start),
            sys.stdout.flush()

    def analyze(self):
        for p, info in self.paths.iteritems():
            nLumiSections = 0
            lumiRecordedByRun = {}
            lumiPrescaledByRun = {}
            #         print '-'+hltName+'---------------------------------'
            allRuns = {}
            hltRunsPS = {}
            l1seedRunsPS = {}
            runMin = int(info["runMin"])
            runMax = int(info["runMax"])
            firstRun = 999999
            lastRun = 1

            lumiFile = open(info["lumi"])
            for line in lumiFile:
                m = self.lRegex.match(line)
                if m is None:
                    #             print 'NoMatch',line
                    continue
                (run, ls, hlt, l1, psHlt, psL1, recL, effL) = m.group(1, 2, 3, 4, 5, 6, 7, 8)
                nLumiSections += 1
                recordedLumi = float(recL)

                iRun = int(run)
                firstRun = min(firstRun, iRun)
                lastRun = max(lastRun, iRun)
                #             if iRun < runMin or iRun > runMax:
                #                 raise RuntimeError('porcazzoccola')
                if iRun not in allRuns:
                    allRuns[iRun] = 1
                    lumiRecordedByRun[iRun] = recordedLumi
                else:
                    allRuns[iRun] += 1
                    lumiRecordedByRun[iRun] += recordedLumi

                # 1 no prescale
                # 0 completely masked
                if psHlt != "1":
                    if iRun not in hltRunsPS:
                        hltRunsPS[iRun] = 1
                    else:
                        hltRunsPS[iRun] += 1

                #             print  (run,ls,hlt,l1,psHlt,psL1)
                if psL1 != "1":
                    if iRun not in l1seedRunsPS:
                        l1seedRunsPS[iRun] = 1
                    else:
                        l1seedRunsPS[iRun] += 1

                # count the lost luminosity
                if psHlt != "1":  # or psL1 != '1':
                    if iRun not in lumiPrescaledByRun:
                        lumiPrescaledByRun[iRun] = recordedLumi
                    else:
                        lumiPrescaledByRun[iRun] += recordedLumi

            #         print 'summary'
            #         print '\nRuns:\n','{'+', '.join([ str(r)+': '+str(allRuns[r]) for r in sorted(allRuns)])+'}'
            #         print '\nHLTPrescaled:\n','{'+', '.join([ str(r)+': '+str(hltRunsPS[r]) for r in sorted(hltRunsPS)])+'}'
            #         print '\nL1Prescaled:\n','{'+', '.join([ str(r)+': '+str(l1seedRunsPS[r]) for r in sorted(l1seedRunsPS)])+'}'

            if nLumiSections == 0:
                raise RuntimeError("No lumisections found for HLT path " + p)

            info["hltRunsPS"] = hltRunsPS
            info["l1seedRunsPS"] = l1seedRunsPS
            info["firstRun"] = firstRun
            info["lastRun"] = lastRun
            info["nLumiSections"] = nLumiSections
            info["lumiRecordedByRun"] = lumiRecordedByRun
            info["lumiPrescaledByRun"] = lumiPrescaledByRun

    def details(self, label):
        print "*" * 100
        print "* {0:<96} *".format(label)
        print "*" * 100
        for p, info in self.paths.iteritems():
            print "Hlt Path:", p
            rangeMin = int(info["runMin"])
            rangeMax = int(info["runMax"])
            firstRun = info["firstRun"]
            lastRun = info["lastRun"]
            print "   Range of existance: ({0},{1})".format(firstRun, lastRun)
            print "   HWW active range: ({0},{1})".format(rangeMin, rangeMax)
            hltPSRuns = info["hltRunsPS"]
            l1PSRuns = info["l1seedRunsPS"]
            sortedHltRunsPS = sorted(hltPSRuns)
            sortedL1seedRunsPS = sorted(l1PSRuns)

            hltPSInRange = [x for x in sortedHltRunsPS if (x >= rangeMin and x <= rangeMax)]
            l1PSInRange = [x for x in sortedL1seedRunsPS if (x >= rangeMin and x <= rangeMax)]

            print "   Prescaled runs -"
            print "   Active range: [{0}][{1}]".format(len(hltPSInRange), len(l1PSInRange))
            print "   + HLT [{0}]:".format(len(hltPSInRange)), ", ".join(
                ["{0}[{1}]".format(p, hltPSRuns[p]) for p in hltPSInRange]
            )
            print "   + L1  [{0}]:".format(len(l1PSInRange)), ", ".join(
                ["{0}[{1}]".format(p, l1PSRuns[p]) for p in l1PSInRange]
            )
            print "   Full range: [{0}][{1}]".format(len(hltPSRuns), len(l1PSRuns))
            print "   + HLT [{0}]:".format(len(hltPSRuns)), ", ".join(
                ["{0}[{1}]".format(p, hltPSRuns[p]) for p in hltPSRuns]
            )
            print "   + L1  [{0}]:".format(len(l1PSRuns)), ", ".join(
                ["{0}[{1}]".format(p, l1PSRuns[p]) for p in l1PSRuns]
            )

            print "-" * 100

    def summarize(self, label):
        ljust = 45
        print "\n-- Summary", label, "--------------------"
        print "HLT path".ljust(
            ljust
        ), " runMin-runMax: [hltNrange|l1Nrange] (hlt1st,hlt2nd,hltlast,hltN|l11st,l12nd,l1last,l1N )"
        for p, info in self.paths.iteritems():
            runMin = int(info["runMin"])
            runMax = int(info["runMax"])
            hltRunsPS = info["hltRunsPS"]
            l1seedPS = info["l1seedRunsPS"]
            sortedHltRunsPS = sorted(hltRunsPS)
            sortedL1seedRunsPS = sorted(l1seedPS)
            lumiRecordedByRun = info["lumiRecordedByRun"]
            lumiPrescaledByRun = info["lumiPrescaledByRun"]

            #         onePercent = info['nLumiSections']*0.01
            #         print onePercent

            hltPSDump = [
                "%d[%d]," % (sortedHltRunsPS[0], hltRunsPS[sortedHltRunsPS[0]]) if sortedHltRunsPS else "None",
                "%d[%d]..." % (sortedHltRunsPS[1], hltRunsPS[sortedHltRunsPS[1]])
                if len(sortedHltRunsPS) > 1
                else "None",
                "%d[%d]" % (sortedHltRunsPS[-1], hltRunsPS[sortedHltRunsPS[-1]]) if sortedHltRunsPS else "None",
                str(len(sortedHltRunsPS)),
            ]
            l1seedPSDump = [
                "%d[%d]," % (sortedL1seedRunsPS[0], l1seedPS[sortedL1seedRunsPS[0]]) if sortedL1seedRunsPS else "None",
                "%d[%d]..." % (sortedL1seedRunsPS[1], l1seedPS[sortedL1seedRunsPS[1]])
                if len(sortedL1seedRunsPS) > 1
                else "None",
                "%d[%d]" % (sortedL1seedRunsPS[-1], l1seedPS[sortedL1seedRunsPS[-1]]) if sortedL1seedRunsPS else "None",
                str(len(sortedL1seedRunsPS)),
            ]

            hltPSInRange = [x for x in sortedHltRunsPS if (x >= runMin and x <= runMax)]
            l1seedPSInRange = [x for x in sortedL1seedRunsPS if (x >= runMin and x <= runMax)]

            print p.ljust(ljust), (str(runMin) + "-" + str(runMax)).ljust(13),
            print (": [" + str(len(hltPSInRange)) + "|" + str(len(l1seedPSInRange)) + "]").ljust(10),
            print "(" + (" ".join(hltPSDump)).ljust(20), "|", " ".join(l1seedPSDump), ")"

        #         print 'xx',first,min(hltRunsPS)
        #         print 'xx',last,max(hltRunsPS)
        #         print last-first+1

    def makeplots(self, out):
        for p, info in self.paths.iteritems():
            runMin = int(info["runMin"])
            runMax = int(info["runMax"])
            hltRunsPS = info["hltRunsPS"]
            l1seedPS = info["l1seedRunsPS"]
            sortedHltRunsPS = sorted(hltRunsPS)
            sortedL1seedRunsPS = sorted(l1seedPS)
            lumiRecordedByRun = info["lumiRecordedByRun"]
            lumiPrescaledByRun = info["lumiPrescaledByRun"]

            out.cd()
            first = info["firstRun"]
            last = info["lastRun"]

            name = p.replace("*", "X")
            hHlt = ROOT.TH1F(name, p + ";Run #R;# of prescaled LS", last - first + 1, first, last + 1)
            hHlt.SetBit(ROOT.TH1.kNoStats)
            xax = hHlt.GetXaxis()
            for run, nls in hltRunsPS.iteritems():
                hHlt.Fill(run, nls)
                xax.SetBinLabel(xax.FindBin(run), str(run))
            xax.SetBinLabel(1, str(first))
            xax.SetBinLabel(xax.GetNbins(), str(last))
            hHlt.Write()

            totLumi = 0
            preLumi = 0
            hPrescaledLumiFraction = ROOT.TH1D(
                name + "_lumiFrac",
                p + " Prescaled/Recorded;Run #R;prescaled/recorded",
                last - first + 1,
                first,
                last + 1,
            )
            hPrescaledLumiFraction.SetBit(ROOT.TH1.kNoStats)
            hPrescaledLumiFraction.SetLineColor(ROOT.kRed)
            hPrescaledLumiFraction.SetLineWidth(2)
            xax = hPrescaledLumiFraction.GetXaxis()

            ratio = 0.0
            xMin = int(xax.GetXmin())
            xMax = int(xax.GetXmax())
            for iRun in xrange(xMin, xMax):
                run = int(iRun)
                if run not in lumiRecordedByRun:
                    hPrescaledLumiFraction.SetBinContent(xax.FindBin(run), ratio)
                    continue
                totLumi += lumiRecordedByRun[run]
                if run in lumiPrescaledByRun:
                    preLumi += lumiPrescaledByRun[run]
                    xax.SetBinLabel(xax.FindBin(run), str(run))
                ratio = preLumi / totLumi
                hPrescaledLumiFraction.SetBinContent(xax.FindBin(run), ratio)

            xax.SetBinLabel(1, str(first))
            xax.SetBinLabel(xax.GetNbins(), str(last))
            hPrescaledLumiFraction.Write()
Esempio n. 9
0
 def __init__(self, path):
     self._src = ROOT.TFile.Open(path)
     self._yields = OrderedDict()
Esempio n. 10
0
    print '-'*screenlen(line)



if __name__ == '__main__':
    usage = 'usage: %prog [dir] [cmd]'
    parser = optparse.OptionParser(usage)
    parser.add_option('-a','--all',dest='all',action='store_true',default=False)
    parser.add_option('-d','--diff',dest='diff',action='store_true',help='check different histograms among the files',default=False)
    parser.add_option('-s','--stat',dest='stat',action='store_true',help='check statistics',default=False)
    parser.add_option('-o','--over',dest='over',action='store_true',help='show under/overflow',default=False)
    (opt, args) = parser.parse_args()

    sys.argv.append('-b')

    files = OrderedDict()

    
    entries = OrderedDict()
    for arg in args:
        (file,names,processes) = GetHistPaths(arg)

        if processes and not opt.all:
            hp = [ 'histo_'+p for p in processes]
            names = [ n for n in names if n in hp ]
        
        files[arg] = (file,names,processes)


    if opt.diff:
        for arg,(file,names,processes) in files.iteritems():
Esempio n. 11
0
 def __init__(self, path):
     self._src = ROOT.TFile.Open(path)
     self._yields = OrderedDict()
Esempio n. 12
0
class ShapeLoader:
    '''Load the histogram data from the shape file
    + Yields
    + Nuisance shapes and parameters'''
    _log = logging.getLogger('ShapeLoader')

    def __init__(self, path):
        self._src = ROOT.TFile.Open(path)
        self._yields = OrderedDict()

    def __del__(self):
        del self._src

    def yields(self):
        return self._yields.copy()

    def effects(self):
        return self._effects.copy()

    def load(self):
        # load the list of processes
        processes = sorted([ p.GetName() for p in self._src.Get('processes') ])
        # load the histograms and calculate the yields
        names = sorted([ k.GetName() for k in self._src.GetListOfKeys() ])
        names.remove('processes')


        self._nominals    = []
        self._systematics = []
        
        self._nominals = [ ('histo_'+p,p) for p in processes if 'histo_'+p in names] 
        if len(self._nominals) != len(processes):
            raise RuntimeError('Not all process shapes have been found')

        for p in processes:
            systre = re.compile('^histo_%s_(.+)(Up|Down)$' % p)
            systp = []
            for name in names:
                m = systre.match(name)
                if not m: continue
                systp.append( (name,p,m.group(1),m.group(2)) )
#             systs = [ (name,p, for name in names if systre.match(name) ]
#               print 'xxx',p,systp
#                print 'xxx',p,m.group(1),m.group(2)
            self._systematics += systp

        self._systematics = sorted(self._systematics)

        for name,process in self._nominals:
#             process = self._nomRegex.match(name).group(1)
            h = self._src.Get(name)
            N =  h.Integral(0,h.GetNbinsX())
            entries = h.GetEntries()

            # TODO: DYTT cleanup
#             if entries < 5: continue

            self._yields[process] = Yield( N, name=process, entries=entries ) 
#             self._yields[process] = Yield( N, name=process, entries=entries, shape=h ) 
        
        ups = {}
        downs = {}
        for name,process,effect,var in self._systematics:
            # check for Up/Down
#             (process,effect,var) = self._systRegex.match(name).group(1,2,3)
            print "process,effect,var = ", process,effect,var
            if var == 'Up': 
                if effect not in ups: ups[effect]= []
                ups[effect].append(process)
            elif var == 'Down':
                if effect not in downs: downs[effect]= []
                downs[effect].append(process)
        # check 
        print " ups = ", ups
        print " downs = ", downs

        for effect in ups:
            if set(ups[effect]) != set(downs[effect]):
                sUp = set(ups[effect])
                sDown = set(downs[effect])
                raise RuntimeError('Some systematics shapes for '+effect+' not found in up and down variation: \n '+', '.join( (sUp | sDown) - ( sUp & sDown ) ))

        # all checks out, save only one
        self._effects = ups
Esempio n. 13
0
    #             for flavor in flavors:
                print '- Processing',mass, ch
                loader = ShapeLoader(shapeTmpl.format(mass = mass, channel=ch) ) 
                loader.load()
    
                writer = ShapeDatacardWriter( mass, ch, opt.shape, opt.dataset )
                print '   + loading yields'
                yields = loader.yields()
    
                # reshuffle the order
                #order = [ 'vbfH', 'ggH', 'wzttH', 'ggWW', 'Vg', 'WJet', 'Top', 'WW', 'DYLL', 'VV', 'DYTT', 'Data']
                order = [ 'ggH','ggH_ALT','qqH','qqH_ALT', 'wzttH','wzttH_ALT', 'WH', 'ZH', 'ttH', 'ggWW', 'VgS', 'Vg', 'WJet', 'Top', 'TopPt0', 'TopPt1', 'TopPt2', 'TopPt3', 'TopPt4', 'TopPt5', 'TopPt6', 'TopPt7', 'TopPt8', 'WW', 'WWlow', 'WWhigh', 'WW1', 'WW2', 'WW3', 'WW4', 'WW5', 'WW6',  'WWewk', 'DYLL', 'VV', 'DYTT', 'DYee', 'DYmm', 'DYee05', 'DYmm05', 'Other', 'VVV', 'Data','ggH_SM', 'qqH_SM', 'WH_SM','ZH_SM' , 'wzttH_SM', 'ggH_sbi', 'ggH_s', 'ggH_b', 'qqH_sbi', 'qqH_s', 'qqH_b' ]
    
   
                oldYields = yields.copy()
                yields = OrderedDict([ (k,oldYields[k]) for k in order if k in oldYields])
                
                # lista systematiche sperimentali (dal file. root)
                effects = loader.effects()

                print '   + making nuisance map'
                nuisances = builder.nuisances( yields, effects , mass, ch, jcat, fl, optsNuis)
    
                for n,(pdf, eff) in nuisances.iteritems():
                    if 'ggH' in eff and 'shape' not in pdf[0] and 'stat_bin' not in n :
                        eff['ggH_ALT'] =  eff['ggH']
                    if 'qqH' in eff and 'shape' not in pdf[0] and 'stat_bin' not in n :
                        eff['qqH_ALT'] =  eff['qqH']
                    if 'wzttH' in eff and 'shape' not in pdf[0] and 'stat_bin' not in n :
                        eff['wzttH_ALT'] =  eff['wzttH']
    
Esempio n. 14
0
            # nominal shapes
            print factory.makeNominals(variable,selection,nomInputDir,nomOutDir+nominalOutFile)

        if opt.makeSyst:
            class Systematics:
                def __init__(self,name,nick,indir,mask):
                    pass
            # systematic shapes
            systematics = OrderedDict([
                ('electronResolution'      , 'p_res_e'),
                ('electronScale_down'      , 'p_scale_eDown'),
                ('electronScale_up'        , 'p_scale_eUp'),
                ('jetEnergyScale_down'     , 'p_scale_jDown'),
                ('jetEnergyScale_up'       , 'p_scale_jUp'),
                ('leptonEfficiency_down'   , 'eff_lDown'),
                ('leptonEfficiency_up'     , 'eff_lUp'),
                ('puW_up'                  , 'puModelUp'),
                ('puW_down'                , 'puModelDown'),
                ('metResolution'           , 'met'),
                ('muonScale_down'          , 'p_scale_mDown'),
                ('muonScale_up'            , 'p_scale_mUp'),
                ('chargeResolution'        , 'ch_res'),
            ])

            # remove skip-syst list
#             if opt.skipSyst!='':
#                for s in opt.skipSyst.split(','):
            for s in opt.skipSyst:
              print 'skipping systematics: '+s
              systematics.pop(s)
Esempio n. 15
0
class ShapeLoader:
    '''Load the histogram data from the shape file
    + Yields
    + Nuisance shapes and parameters'''

    def __init__(self, path):
#         self._systRegex = re.compile('^histo_([^_]+)_CMS_(.+)(Up|Down)$')
        self._systRegex = re.compile('^histo_([^_]+)_(.+)(Up|Down)$')
        self._nomRegex  = re.compile('^histo_([^_]+)$')
        self._src = ROOT.TFile.Open(path)
        self._yields = OrderedDict()

    def __del__(self):
        del self._src

    def yields(self):
        return self._yields.copy()

    def effects(self):
        return self._effects.copy()

    def load(self):
        # load the histograms and calculate the yields
        names = [ k.GetName() for k in self._src.GetListOfKeys()]
        
        self._nominals = sorted([ name for name in names if self._nomRegex.match(name) ]) 
        self._systematics = sorted([ name for name in names if self._systRegex.match(name) ])
#         print '\n'.join(self._nominals)
#         print '\n'.join(self._systematics)
        for name in self._nominals:
            process = self._nomRegex.match(name).group(1)
            h = self._src.Get(name)
            N =  h.Integral(0,h.GetNbinsX())
            entries = h.GetEntries()

            self._yields[process] = Yield( N, name=process, entries=entries ) 
#             self._yields[process] = Yield( N, name=process, entries=entries, shape=h ) 
#             print process, '%.3f' % h.Integral(0,h.GetNbins())
        
#         print self._systematics
        ups = {}
        downs = {}
        for name in self._systematics:
            # check for Up/Down
            (process,effect,var) = self._systRegex.match(name).group(1,2,3)
            if var == 'Up': 
                if effect not in ups: ups[effect]= []
                ups[effect].append(process)
            elif var == 'Down':
                if effect not in downs: downs[effect]= []
                downs[effect].append(process)

#         del ups['p_scale_j'][0]
#         del ups['p_scale_e'][1]
        # check 
        for effect in ups:
            if set(ups[effect]) != set(downs[effect]):
                sUp = set(ups[effect])
                sDown = set(downs[effect])
                raise RuntimeError('Some systematics shapes for '+effect+' not found in up and down variation: \n '+', '.join( (sUp | sDown) - ( sUp & sDown ) ))
        
        # all checks out, save only one
        self._effects = ups
Esempio n. 16
0
    def nuisances(self, yields, effects, mass, channel, jetcat, flavor, opts ):
        '''Add the nuisances according to the options'''
        allNus = OrderedDict()

        optMatt = mattOpts()
        optMatt.WJadd = 0.36
        optMatt.WJsub = 0.0

        qqWWfromData = self._wwddfilter.haswwdd(mass, channel)

        # vh and vbf and wwewk mapped to "2j" category
        if (jetcat == 'vh2j' or jetcat == 'whsc' or jetcat == '2jtche05' or jetcat == '2jtche05CJ' or jetcat == '2jtche05FJ') :
           jetcat = '2j'

        # vh : remove some nuisances, typical of vbf only
        if (jetcat == 'vh2j' or jetcat == 'whsc') :
           optMatt.VH = 1
        else :
           optMatt.VH = 0

        optMatt.HWidth = 0
        if 'SpecialSettings' in opts:
           if opts['SpecialSettings'] == 'HWidth' :
             optMatt.HWidth = 1

        optMatt.WWxsec = 0
        if 'SpecialSettings' in opts:
           if opts['SpecialSettings'] == 'WWxsec' :
             optMatt.WWxsec = 1




        if jetcat not in ['0j','1j','2j','2jex','01j']: raise ValueError('Unsupported jet category found: %s')

#         suffix = '_8TeV'
#         if '2011' in opt.dataset: suffix = '_7TeV'

        suffix = '_'+opt.energy
        CutBased = getCommonSysts(int(mass),flavor,int(jetcat[0]),qqWWfromData, self._shape, optMatt, suffix, self._isssactive, opt.energy,opts['newInterf'],opt.YRSysVer,opt.mHSM,125.0,opts['ewksinglet'])
        if self._shape:
            # float WW+ggWW background normalisation float together
#             for p in opts['floatN'].split(' '):
            for p in opts['floatN']:
                print p
                floatN = floatNorm(p,jetcat)
                CutBased.update( floatN )

        common = OrderedDict()
        for k in sorted(CutBased):
            common[k] = CutBased[k]
        allNus.update( common )


        # only if not bin-by-bin, then add statistical uncertainty
        # for bbb it is already included
        if self._statmode != 'bybin':
            self._addStatisticalNuisances(allNus, yields, channel, suffix)

        self._addDataDrivenNuisances(allNus, yields, mass, channel, jetcat, suffix, opts)

        if self._shape:
            self._addShapeNuisances(allNus,effects, opts, suffix, yields)

        if 'nuisFlags' not in opts:
            raise RuntimeError('nuisFlags not found among the allNus options')

        flags = opts['nuisFlags']

        finalNuisances = OrderedDict()
        nus = set(allNus.keys())
        dummy = nus.copy()
        for exp,flag in flags:
            subset = set(fnmatch.filter(nus,exp))
            if flag:
                dummy |= subset
            else:
                dummy -= subset

        nuisances = OrderedDict()
        for eff in allNus:
            if eff not in dummy: continue
            nuisances[eff] = allNus[eff]

        return nuisances
Esempio n. 17
0
    def __init__(self, path):
#         self._systRegex = re.compile('^histo_([^_]+)_CMS_(.+)(Up|Down)$')
        self._systRegex = re.compile('^histo_([^_]+)_(.+)(Up|Down)$')
        self._nomRegex  = re.compile('^histo_([^_]+)$')
        self._src = ROOT.TFile.Open(path)
        self._yields = OrderedDict()
Esempio n. 18
0
                      default=None)
    parser.add_option('-o',
                      '--out',
                      dest='out',
                      help='Output directory',
                      default=None)

    (opt, args) = parser.parse_args()
    if not opt.tag:
        parser.print_help()
        parser.error('Tag not defined, check the usage')

    sys.argv.append('-b')
    ROOT.gROOT.SetBatch()

    decks = OrderedDict()
    for arg in args:
        if '=' not in arg:
            decks[os.path.basename(arg if arg[-1] != '/' else arg[:-1])] = arg
            continue

        i = arg.index('=')
        decks[arg[:i]] = arg[i + 1:]

    print decks
    doexist = zip(map(os.path.exists, decks.itervalues()), decks.itervalues())

    dontexist = filter(lambda x: not x[0], doexist)

    if dontexist:
        raise ValueError('workareas not found:' +
Esempio n. 19
0
class ShapeLoader:
    '''Load the histogram data from the shape file
    + Yields
    + Nuisance shapes and parameters'''
    _log = logging.getLogger('ShapeLoader')

    def __init__(self, path):
        self._src = ROOT.TFile.Open(path)
        self._yields = OrderedDict()

    def __del__(self):
        del self._src

    def yields(self):
        return self._yields.copy()

    def effects(self):
        return self._effects.copy()

    def load(self):
        # load the list of processes
        processes = sorted([ p.GetName() for p in self._src.Get('processes') ])
        # load the histograms and calculate the yields
        names = sorted([ k.GetName() for k in self._src.GetListOfKeys() ])
        names.remove('processes')


        self._nominals    = []
        self._systematics = []
        
        self._nominals = [ ('histo_'+p,p) for p in processes if 'histo_'+p in names] 
        if len(self._nominals) != len(processes):
            raise RuntimeError('Not all process shapes have been found')

        for p in processes:
            systre = re.compile('^histo_%s_(.+)(Up|Down)$' % p)
            systp = []
            for name in names:
                m = systre.match(name)
                if not m: continue
                systp.append( (name,p,m.group(1),m.group(2)) )
#             systs = [ (name,p, for name in names if systre.match(name) ]
#               print 'xxx',p,systp
#                print 'xxx',p,m.group(1),m.group(2)
            self._systematics += systp

        self._systematics = sorted(self._systematics)

        for name,process in self._nominals:
#             process = self._nomRegex.match(name).group(1)
            h = self._src.Get(name)
            N =  h.Integral(0,h.GetNbinsX())
            entries = h.GetEntries()

            # TODO: DYTT cleanup
#             if entries < 5: continue

            self._yields[process] = Yield( N, name=process, entries=entries ) 
#             self._yields[process] = Yield( N, name=process, entries=entries, shape=h ) 
        
        ups = {}
        downs = {}
        for name,process,effect,var in self._systematics:
            # check for Up/Down
#             (process,effect,var) = self._systRegex.match(name).group(1,2,3)
            if var == 'Up': 
                if effect not in ups: ups[effect]= []
                ups[effect].append(process)
            elif var == 'Down':
                if effect not in downs: downs[effect]= []
                downs[effect].append(process)

        # check 
        for effect in ups:
            if set(ups[effect]) != set(downs[effect]):
                sUp = set(ups[effect])
                sDown = set(downs[effect])
                raise RuntimeError('Some systematics shapes for '+effect+' not found in up and down variation: \n '+', '.join( (sUp | sDown) - ( sUp & sDown ) ))
        
        # all checks out, save only one
        self._effects = ups
Esempio n. 20
0
    usage = 'Usage: %prog -t <tag> deck1 deck2 deck3'
    parser = optparse.OptionParser(usage)
    parser.add_option('-r','--reference',dest='reference',help='Compare to a reference histogram', default=None)
    parser.add_option('-t','--tag',dest='tag',help='Defint the tag to process', default=None)
    parser.add_option('-o','--out',dest='out',help='Output directory',default=None)
    
    (opt, args) = parser.parse_args()
    if not opt.tag:
        parser.print_help()
        parser.error('Tag not defined, check the usage')

    sys.argv.append( '-b' )
    ROOT.gROOT.SetBatch()

    decks = OrderedDict()
    for arg in args:
        if '=' not in arg:
            decks[os.path.basename(arg if arg[-1] != '/' else arg[:-1])] = arg
            continue

        i = arg.index('=')
        decks[arg[:i]]=arg[i+1:]

    print decks
    doexist = zip(map(os.path.exists,decks.itervalues()),decks.itervalues())

    dontexist = filter( lambda x: not x[0], doexist )

    if dontexist:
        raise ValueError('workareas not found:'+' '.join(map(operator.itemgetter(1),dontexist)))
Esempio n. 21
0
    def nuisances(self, yields, effects, mass, channel, jetcat, flavor, opts):
        '''Add the nuisances according to the options'''
        allNus = OrderedDict()

        optMatt = mattOpts()
        optMatt.WJadd = 0.36
        optMatt.WJsub = 0.0

        qqWWfromData = self._wwddfilter.haswwdd(mass, channel)

        # vh and vbf and wwewk mapped to "2j" category
        if (jetcat == 'vh2j' or jetcat == 'whsc' or jetcat == '2jtche05' or jetcat == '2jtche05CJ' or jetcat == '2jtche05FJ') :
           jetcat = '2j'

        # vh : remove some nuisances, typical of vbf only
        if (jetcat == 'vh2j' or jetcat == 'whsc') :
           optMatt.VH = 1
        else :
           optMatt.VH = 0

        if jetcat not in ['0j','1j','2j','2jex']: raise ValueError('Unsupported jet category found: %s')

#         suffix = '_8TeV'
#         if '2011' in opt.dataset: suffix = '_7TeV'

        suffix = '_'+opt.energy

        CutBased = getCommonSysts(int(mass),flavor,int(jetcat[0]),qqWWfromData, self._shape, optMatt, suffix, self._isssactive)
        if self._shape:
            # float WW+ggWW background normalisation float together
#             for p in opts['floatN'].split(' '):
            for p in opts['floatN']:
                print p
                floatN = floatNorm(p)
                CutBased.update( floatN )

        common = OrderedDict()
        for k in sorted(CutBased):
            common[k] = CutBased[k]
        allNus.update( common )


        # only if not bin-by-bin, then add statistical uncertainty
        # for bbb it is already included
        if self._statmode != 'bybin':
            self._addStatisticalNuisances(allNus, yields, channel, suffix)

        self._addDataDrivenNuisances(allNus, yields, mass, channel, jetcat, suffix, opts)

        if self._shape:
            self._addShapeNuisances(allNus,effects, opts, suffix, yields)

        if 'nuisFlags' not in opts:
            raise RuntimeError('nuisFlags not found among the allNus options')

        flags = opts['nuisFlags']

        finalNuisances = OrderedDict()
        nus = set(allNus.keys())
        dummy = nus.copy()
        for exp,flag in flags:
            subset = set(fnmatch.filter(nus,exp))
            if flag:
                dummy |= subset
            else:
                dummy -= subset

        nuisances = OrderedDict()
        for eff in allNus:
            if eff not in dummy: continue
            nuisances[eff] = allNus[eff]

        return nuisances
Esempio n. 22
0
            xax.SetBinLabel(1, str(first))
            xax.SetBinLabel(xax.GetNbins(), str(last))
            hPrescaledLumiFraction.Write()


if __name__ == "__main__":
    motherJson = "certifiedToScan.json"
    cwd = os.getcwd()
    tmpDir = cwd + "/psdata/"

    allPaths = OrderedDict(
        [
            ("singleMu", singleMuDataPaths),
            #         ('singleEl',singleElDataPaths),
            #         ('doubleEl',doubleElDataPaths),
            #         ('doubleMu',doubleMuDataPaths),
            #         ('muEg',muEGDataPaths),
        ]
    )

    out = ROOT.TFile.Open("psPlots.root", "recreate")
    for p, list in allPaths.iteritems():
        s = Scanner(p, list, motherJson, tmpDir)
        s.analyze()
        s.details(p)
        s.summarize(p)
        s.makeplots(out)
    out.Close()
    sys.exit(0)