Exemplo n.º 1
0
def wrapper(s):
    logger.info("Processing impacts")
    #name = "ewkDM_ttZ_ll_DC2A_0p200000_DC2V_0p200000"
    #name = "ewkDM_ttZ_ll_DC2A_0p250000_DC2V_m0p150000"
    #name = "dim6top_LO_ttZ_ll"
    #name = "newCard_2016"
    #name = "ttZ_fix"
    name = "shapesCard_2017_oldPattern"
    cardFile = name + ".txt"
    #cardFilePath = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/regionsE_2016_xsec_shape_lowUnc_SRandCR/dim6top_LO_dipoles/"+cardFile
    #cardFilePath = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/regionsE_COMBINED_xsec_shape_lowUnc_SRandCRnoZZ/dim6top_LO_dipoles/"+cardFile
    #cardFilePath = "/afs/hephy.at/work/d/dspitzbart/top/devel/CMSSW_8_1_0/src/TOP-18-009/"+cardFile
    #cardFilePath = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/inclusiveRegions_2017_lowUnc/dim6top_LO_dipoles/dim6top_LO_ttZ_ll.txt"
    #cardFilePath = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/inclusiveRegions_2016_lowUnc/dim6top_LO_currents/dim6top_LO_ttZ_ll.txt"
    cardFilePath = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/regionsE_COMBINED_xsec_shape_lowUnc_SRandCR/dim6top_LO_currents/dim6top_LO_ttZ_ll.txt"
    combineDirname = os.path.join(releaseLocation, "ewkDM_dipoles")
    logger.info("Creating %s" % combineDirname)
    if not os.path.isdir(combineDirname): os.makedirs(combineDirname)
    shutil.copyfile(cardFilePath, combineDirname + '/' + cardFile)
    if args.bkgOnly:
        prepWorkspace = "text2workspace.py %s --X-allow-no-signal -m 125" % cardFile
    else:
        prepWorkspace = "text2workspace.py %s -m 125" % cardFile
    if args.bkgOnly:
        robustFit = "combineTool.py -M Impacts -d %s.root -m 125 --doInitialFit --robustFit 1 --rMin -0.98 --rMax 1.02" % name
        impactFits = "combineTool.py -M Impacts -d %s.root -m 125 --robustFit 1 --doFits --parallel %s --rMin -0.98 --rMax 1.02" % (
            name, str(args.cores))
    else:
        robustFit = "combineTool.py -M Impacts -d %s.root -m 125 --doInitialFit " % name
        impactFits = "combineTool.py -M Impacts -d %s.root -m 125 --doFits --parallel %s " % (
            name, str(args.cores))
    extractImpact = "combineTool.py -M Impacts -d %s.root -m 125 -o impacts.json" % name
    plotImpacts = "plotImpacts.py -i impacts.json -o impacts"
    combineCommand = "cd %s;eval `scramv1 runtime -sh`;%s;%s;%s;%s;%s" % (
        combineDirname, prepWorkspace, robustFit, impactFits, extractImpact,
        plotImpacts)
    logger.info("Will run the following command, might take a few hours:\n%s" %
                combineCommand)

    os.system(combineCommand)

    plotDir = plot_directory + "/impacts2016/"
    if not os.path.isdir(plotDir): os.makedirs(plotDir)
    if args.bkgOnly:
        shutil.copyfile(combineDirname + '/impacts.pdf',
                        "%s/%s_bkgOnly.pdf" % (plotDir, "ewkDM"))
    else:
        shutil.copyfile(combineDirname + '/impacts.pdf',
                        "%s/%s.pdf" % (plotDir, "test_comb_BSM_njetUnc0p2"))
    logger.info("Copied result to %s" % plotDir)

    if args.removeDir:
        logger.info("Removing directory in release location")
        rmtree(combineDirname)
Exemplo n.º 2
0
    def makeTemplate(self, selection, weight='(1)'):
        logger.info(
            "Make PU profile for sample %s and selection %s and weight %s",
            self.source_sample.name, selection, weight)

        h_source = self.source_sample.get1DHistoFromDraw(
            self.draw_string,
            self.binning,
            selectionString=selection,
            weightString=weight)
        logger.info("PU histogram contains %s weighted events",
                    h_source.Integral())
        h_source.Scale(1. / h_source.Integral())
        return h_source
Exemplo n.º 3
0
    def cachedTemplate(self,
                       selection,
                       weight='weight',
                       save=True,
                       overwrite=False):

        key = {
            "selection": selection,
            "weight": weight,
            "source": self.source_sample.name,
            "target": self.target_sample.name
        }
        #key =  self.uniqueKey( selection, weight, self.source_sample.name, self.target_sample.name)
        if (self.cache and self.cache.contains(key)) and not overwrite:
            result = self.cache.get(key)
            logger.info("Loaded reweighting template from %s for %s : %r" %
                        (self.cache.database_file, key, result))
            logger.debug("With properties %s : %s" % (key, result))
        elif self.cache:
            logger.info("Obtain template for %s" % (key, ))
            result = self.makeTemplate(selection=selection, weight=weight)
            if result:
                result = self.cache.addData(key, result, overwrite=save)
                #print "Adding template to cache for %s : %r" %( key, result)
                logger.info("Adding template to cache for %s : %r" %
                            (key, result))
            else:
                logger.info("Couldn't create template to cache for %s : %r" %
                            (key, result))
        else:
            result = self.makeTemplate(selection=selection, weight=weight)
        return result
Exemplo n.º 4
0
    def cachedTemplate( self, selection, weight = 'weight', save = True, overwrite = False):

        key =  self.uniqueKey( selection, weight, self.sample.name)
        if (self.cache and self.cache.contains(key)) and not overwrite:
            result = self.cache.get(key)
            logger.debug( "Loading cached template for %s : %s"%( key, result) )
        elif self.cache:
            logger.info( "Obtain template for %s"%( key, ) )
            result = self.makeTemplate( selection = selection, weight = weight)
            result = self.cache.add( key, result, save=save)
            #print "Adding template to cache for %s : %r" %( key, result)
            logger.debug( "Adding template to cache for %s : %r" %( key, result) )
        else:
            result = self.makeTemplate( selection = selection, weight = weight)
        return result
Exemplo n.º 5
0
def getCommands(line):
    commands = []
    split = None
    try:
        m = re.search(r"SPLIT[0-9][0-9]*", line)
        split = int(m.group(0).replace('SPLIT', ''))
    except:
        pass
    line = line.split('#')[0]
    if line:
        if split:
            logger.info("Splitting in %i jobs", split)
            for i in range(split):
                commands.append(line + " --nJobs %i --job %i" % (split, i))
        else:
            commands.append(line)
    return commands
Exemplo n.º 6
0
    def makeTemplate( self, selection, weight = 'weight'):
        logger.info( "Make polarisation template for sample %s and selection %s and weight %s", self.sample.name, selection, weight )

        h_gen = self.sample.get1DHistoFromDraw( 'genZ_cosThetaStar', [20, -1, 1 ], selectionString = selection, weightString = weight )
        if h_gen.Integral()>0:
            h_gen.Scale(1./h_gen.Integral())
        else:
            raise ValueError

        templates = { p : ROOT.TH1F('template_'+p, 'template_'+p, 20, -1, 1 ) for p in self.polarisations }

        r = self.sample.treeReader( \
                variables = map( TreeVariable.fromString, ['genZ_cosThetaStar/F', 'cosThetaStar/F', 'weight/F']),
                selectionString = selection )

        r.start()
        while r.run():
            for p in self.polarisations:

                if not r.event.genZ_cosThetaStar<float('inf'): continue
                w = r.event.weight*self.f_Z_pol[p].Eval(r.event.genZ_cosThetaStar)
                sw = h_gen.GetBinContent(h_gen.FindBin(r.event.genZ_cosThetaStar))
                if sw>0:
                    w/=sw
                else:
                    w=0
                if r.event.cosThetaStar<float('inf'):
                    templates[p].Fill( r.event.cosThetaStar, w )
                #print r.event.weight, f_Z_pol[p].Eval(r.event.genZ_cosThetaStar), h.GetBinContent(h.FindBin(r.event.genZ_cosThetaStar))

        self.sample.chain.SetBranchStatus( "*", 1 )

        # Normalisation
        for p in self.polarisations:
            h = templates[p]
            s = h.Integral()
            if s>0:
                h.Scale( 1./s )

        return templates
Exemplo n.º 7
0
 def cachedTemplate(self,
                    selection,
                    weight='(1)',
                    save=True,
                    overwrite=False):
     key = {
         "selection": selection,
         "weight": weight,
         "source": self.source_sample.name
     }
     if (self.cache and self.cache.contains(key)) and not overwrite:
         result = self.cache.get(key)
         logger.info("Loaded MC PU profile from %s" %
                     (self.cache.database_file))
         logger.debug("Key used: %s result: %r" % (key, result))
     elif self.cache:
         logger.info("Obtain PU profile for %s" % (key, ))
         result = self.makeTemplate(selection=selection, weight=weight)
         if result:
             result = self.cache.addData(key, result, overwrite=save)
             logger.info("Adding PU profile to cache for %s : %r" %
                         (key, result))
         else:
             logger.warning(
                 "Couldn't create PU profile to cache for %s : %r" %
                 (key, result))
     else:
         result = self.makeTemplate(selection=selection, weight=weight)
     return result
Exemplo n.º 8
0
    def makeTemplate(self, selection, weight='(1)'):
        logger.info(
            "Make polarisation template for source_sample %s and target_sample %s and selection %s and weight %s",
            self.source_sample.name, self.target_sample.name, selection,
            weight)

        h_source = self.source_sample.get2DHistoFromDraw(
            self.template_draw_string,
            (self.cosThetaStar_binning, self.Z_pt_binning),
            selectionString=selection,
            weightString=weight,
            binningIsExplicit=True)
        logger.info("Source histogram contains %s weighted events",
                    h_source.Integral())
        if h_source.Integral() > 0:
            h_source.Scale(1. / h_source.Integral())
        else:
            raise ValueError
        h_target = self.target_sample.get2DHistoFromDraw(
            self.template_draw_string,
            (self.cosThetaStar_binning, self.Z_pt_binning),
            selectionString=selection,
            weightString=weight,
            binningIsExplicit=True)
        logger.info("Target histogram contains %s weighted events",
                    h_target.Integral())
        if h_target.Integral() > 0:
            h_target.Scale(1. / h_target.Integral())
        else:
            raise ValueError

        h_target.Divide(h_source)
        template = h_target

        return template
Exemplo n.º 9
0
logger = logger.get_logger(args.logLevel, logFile=None)

# Check that we have an even number of arguments
if not len(args.couplings) % 2 == 0:
    logger.error(
        "Need an even number of coupling arguments of the format coupling1, value1, coupling2, value2, ... . Got %r",
        args.couplings)

# Interpret coupling argument list
coupling_names = args.couplings[::2]
coupling_values = map(float, args.couplings[1::2])

modified_couplings = {c: v for c, v in zip(coupling_names, coupling_values)}

# Let's not leave the user in the dark
logger.info("Model:        %s", args.model)
logger.info("Process:      %s", args.process)
logger.info("Couplings:    %s",
            ", ".join(["%s=%5.4f" % c for c in modified_couplings.items()]))

# Create configuration class
config = Configuration(model_name=args.model)

#p = Process(process = args.process, nEvents = 1, config = config, modified_couplings = modified_couplings )
p = Process(process=args.process, nEvents=1, config=config)
xsec_val = p.diagrams(plot_dir=plot_directory,
                      modified_couplings=modified_couplings)

config.cleanup()

logger.info("Done! Calculated xsec: %s ", repr(xsec_val))
Exemplo n.º 10
0
def wrapper(s):
    
    logger.info("Now working on %s", s.name)
    xSecScale = 1
    c = cardFileWriter.cardFileWriter()
    c.releaseLocation = combineReleaseLocation

    for coup in nonZeroCouplings:
        try:
            modification_dict[coup] = getCouplingFromName(s.name, coup)
            logger.info("The following coupling is set to non-zero value: %s: %s", coup, modification_dict[coup])
        except ValueError:
            logger.info("The following coupling is kept at zero: %s: %s", coup, modification_dict[coup])
            continue
    try:
        p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB)
        xsec = p.xsecDB.get(modification_dict)
    except IndexError:
        logger.info("Looking into backup DB for x-sec")
        p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB_Backup)
        xsec = p.xsecDB.get(modification_dict)
    if not xsec:
        try:
            p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB_Backup)
            xsec = p.xsecDB.get(modification_dict)
        except IndexError:
            logger.info("No x-sec found.")
    logger.info("Found modified x-sec of %s", xsec)
    
    cardFileName = os.path.join(limitDir, s.name+'.txt')
    if not os.path.exists(cardFileName) or overWrite:
        counter=0
        c.reset()
        c.setPrecision(3)
        postfix = '_%s'%args.year
        c.addUncertainty('PU',                  'lnN') # correlated
        c.addUncertainty('JEC'+postfix,         'lnN') # uncorrelated, for now!
        c.addUncertainty('btag_heavy'+postfix,  'lnN') # uncorrelated, wait for offical recommendation
        c.addUncertainty('btag_light'+postfix,  'lnN') # uncorrelated, wait for offical recommendation
        c.addUncertainty('trigger'+postfix,     'lnN') # uncorrelated, statistics dominated
        c.addUncertainty('leptonSF',            'lnN') # correlated
        c.addUncertainty('scale',               'lnN') # correlated.
        c.addUncertainty('scale_sig',           'lnN') # correlated.
        c.addUncertainty('PDF',                 'lnN') # correlated.
        c.addUncertainty('PartonShower',        'lnN') # correlated.
        c.addUncertainty('nonprompt',           'lnN') # correlated?!
        c.addUncertainty('WZ_xsec',             'lnN') # correlated.
        c.addUncertainty('WZ_bb',               'lnN') # correlated
        c.addUncertainty('WZ_powheg',           'lnN') # correlated
        c.addUncertainty('ZZ_xsec',             'lnN') # correlated.
        c.addUncertainty('ZG_xsec',             'lnN') # correlated.
        c.addUncertainty('rare',                'lnN') # correlated.
        c.addUncertainty('ttX',                 'lnN') # correlated.
        c.addUncertainty('Lumi'+postfix, 'lnN')

        uncList = ['PU', 'JEC', 'btag_heavy', 'btag_light', 'leptonSF', 'trigger']
        for unc in uncList:
            uncertainties[unc] = []
        
        ## use rate parameters??
        #c.addRateParameter('WZ', 1, '[0,2]')
        #c.addRateParameter('ZZ', 1, '[0,2]')

        for setupPair in setups:
            
            # extract the nominal and nonprompt setup from the pair
            setup, setupNP = setupPair
            
            signal      = MCBasedEstimate(name="TTZ", sample=setup.samples["TTZ"], cacheDir=setup.defaultCacheDir())
            #nonprompt   = FakeEstimate(name="nonPromptDD", sample=setup.samples["Data"], setup=setupNP, cacheDir=setup.defaultCacheDir())
            if args.unblind or (setup == setup3l_CR) or (setup == setup4l_CR):
                observation = DataObservation(name="Data", sample=setup.samples["Data"], cacheDir=setup.defaultCacheDir())
                logger.info("Using data!")
            else:
                observation = MCBasedEstimate(name="observation", sample=setup.samples["pseudoData"], cacheDir=setup.defaultCacheDir())
                logger.info("Using pseudo-data!")
            for e in setup.estimators: e.initCache(setup.defaultCacheDir())

            for r in setup.regions:
                totalBackground = u_float(0)
                for channel in setup.channels:
                    niceName = ' '.join([channel.name, r.__str__()])
                    binname = 'Bin'+str(counter)
                    logger.info("Working on %s", binname)
                    counter += 1
                    c.addBin(binname, [e.name.split('-')[0] for e in setup.estimators]+["nonPromptDD"], niceName)
                    #c.addBin(binname, 'nonPromptDD', niceName)

                    for e in setup.estimators:
                        name = e.name.split('-')[0]
                        if name.count('WZ'):
                            logger.info("Using reweighting to powheg for WZ sample")
                            wzReweighting = WZReweighting( cacheDir = reweightCacheWZ )
                            f = wzReweighting.cachedReweightingFunc( setup.WZselection )
                            powhegExpected = e.reweight1D(r, channel, setup, f)
                            expected = e.cachedEstimate(r, channel, setup)
                            print expected
                            WZ_powheg_unc = (powhegExpected-expected)/expected
                        else:
                            expected = e.cachedEstimate(r, channel, setup)
                        logger.info("Adding expectation %s for process %s", expected.val, name)
                        c.specifyExpectation(binname, name, expected.val if expected.val > 0.01 else 0.01)

                        totalBackground += expected

                        if not args.statOnly:
                            # uncertainties
                            pu          = 1 + e.PUSystematic( r, channel, setup).val            if expected.val>0.01 else 1.1
                            jec         = 1 + e.JECSystematic( r, channel, setup).val           if expected.val>0.01 else 1.1
                            btag_heavy  = 1 + e.btaggingSFbSystematic(r, channel, setup).val    if expected.val>0.01 else 1.1
                            btag_light  = 1 + e.btaggingSFlSystematic(r, channel, setup).val    if expected.val>0.01 else 1.1
                            trigger     = 1 + e.triggerSystematic(r, channel, setup).val        if expected.val>0.01 else 1.1
                            leptonSF    = 1 + e.leptonSFSystematic(r, channel, setup).val       if expected.val>0.01 else 1.1
                            if name.count('WZ'):
                                WZ_powheg   = 1 + WZ_powheg_unc.val                                 if expected.val>0.01 else 1.1

                            c.specifyUncertainty('PU',          binname, name, 1 + e.PUSystematic( r, channel, setup).val)
                            if not name.count('nonprompt'):
                                c.specifyUncertainty('JEC'+postfix,         binname, name, jec)
                                c.specifyUncertainty('btag_heavy'+postfix,  binname, name, btag_heavy)
                                c.specifyUncertainty('btag_light'+postfix,  binname, name, btag_light)
                                c.specifyUncertainty('trigger'+postfix,     binname, name, trigger)
                                c.specifyUncertainty('leptonSF',    binname, name, leptonSF)
                                c.specifyUncertainty('scale',       binname, name, 1.01) 
                                c.specifyUncertainty('PDF',         binname, name, 1.01)
                                c.specifyUncertainty('Lumi'+postfix, binname, name, 1.025 )

                            if name.count('ZZ'):    c.specifyUncertainty('ZZ_xsec',     binname, name, 1.10)
                            if name.count('ZG'):    c.specifyUncertainty('ZG_xsec',     binname, name, 1.20)
                            if name.count('WZ'):
                                c.specifyUncertainty('WZ_xsec',     binname, name, 1.10)
                                if setup == setup3l:
                                    c.specifyUncertainty('WZ_bb',     binname, name, 1.08)
                                c.specifyUncertainty('WZ_powheg',     binname, name, WZ_powheg)
                            
                            if name.count('nonprompt'):    c.specifyUncertainty('nonprompt',   binname, name, 1.30)
                            if name.count('rare'):    c.specifyUncertainty('rare',        binname, name, 1.50)
                            if name.count('TTX'):     c.specifyUncertainty('ttX',         binname, name, 1.11)


                        #MC bkg stat (some condition to neglect the smaller ones?)
                        uname = 'Stat_'+binname+'_'+name+postfix
                        c.addUncertainty(uname, 'lnN')
                        if expected.val > 0:
                            c.specifyUncertainty(uname, binname, name, 1 + expected.sigma/expected.val )
                        else:
                            c.specifyUncertainty(uname, binname, name, 1.01 )
                    
                    uname = 'Stat_'+binname+'_nonprompt'+postfix
                    c.addUncertainty(uname, 'lnN')
                    
                    if setup.nLeptons == 3 and setupNP:
                        nonprompt   = FakeEstimate(name="nonPromptDD", sample=setup.samples["Data"], setup=setupNP, cacheDir=setup.defaultCacheDir())
                        np = nonprompt.cachedEstimate(r, channel, setupNP)
                        if np.val < 0.01:
                            np = u_float(0.01,0.)
                        c.specifyExpectation(binname, 'nonPromptDD', np.val ) 
                        c.specifyUncertainty(uname,   binname, "nonPromptDD", 1 + np.sigma/np.val )
                        c.specifyUncertainty('nonprompt',   binname, "nonPromptDD", 1.30)
                    else:
                        np = u_float(0)
                        c.specifyExpectation(binname, 'nonPromptDD', np.val)
                    
                    if args.expected:
                        sig = signal.cachedEstimate(r, channel, setup)
                        obs = totalBackground + sig + np
                    elif args.unblind or (setup == setup3l_CR) or (setup == setup4l_CR):
                        obs = observation.cachedObservation(r, channel, setup)
                    else:
                        obs = observation.cachedEstimate(r, channel, setup)
                    c.specifyObservation(binname, int(round(obs.val,0)) )


                    if args.useShape:
                        logger.info("Using 2D reweighting method for shapes")
                        if args.model == "dim6top_LO":
                            source_gen = dim6top_central
                        elif args.model == "ewkDM":
                            source_gen = ewkDM_central

                        signalReweighting = SignalReweighting( source_sample = source_gen, target_sample = s, cacheDir = reweightCache)
                        f = signalReweighting.cachedReweightingFunc( setup.genSelection )
                        sig = signal.reweight2D(r, channel, setup, f)
                    else:
                        sig = signal.cachedEstimate(r, channel, setup)

                    xSecMod = 1
                    if args.useXSec:
                        xSecMod = xsec.val/xsec_central.val
                    
                    logger.info("x-sec is multiplied by %s",xSecMod)
                    
                    c.specifyExpectation(binname, 'signal', sig.val * xSecScale * xSecMod )
                    logger.info('Adding signal %s'%(sig.val * xSecScale * xSecMod))
                    
                    if sig.val>0:
                        c.specifyUncertainty('Lumi'+postfix, binname, 'signal', 1.025 )
                        if not args.statOnly:
                            # uncertainties
                            pu          = 1 + e.PUSystematic( r, channel, setup).val
                            jec         = 1 + e.JECSystematic( r, channel, setup).val
                            btag_heavy  = 1 + e.btaggingSFbSystematic(r, channel, setup).val
                            btag_light  = 1 + e.btaggingSFlSystematic(r, channel, setup).val
                            trigger     = 1 + e.triggerSystematic(r, channel, setup).val
                            leptonSF    = 1 + e.leptonSFSystematic(r, channel, setup).val

                            if sig.sigma/sig.val < 0.05:
                                uncertainties['PU']         += [pu]
                                uncertainties['JEC']        += [jec]
                                uncertainties['btag_heavy'] += [btag_heavy]
                                uncertainties['btag_light'] += [btag_light]
                                uncertainties['trigger']    += [trigger]
                                uncertainties['leptonSF']   += [leptonSF]

                            c.specifyUncertainty('PU',                  binname, "signal", pu)
                            c.specifyUncertainty('JEC'+postfix,         binname, "signal", jec)
                            c.specifyUncertainty('btag_heavy'+postfix,  binname, "signal", btag_heavy)
                            c.specifyUncertainty('btag_light'+postfix,  binname, "signal", btag_light)
                            c.specifyUncertainty('trigger'+postfix,     binname, "signal", trigger)
                            c.specifyUncertainty('leptonSF',            binname, "signal", leptonSF)
                            # This doesn't get the right uncertainty in CRs. However, signal doesn't matter there anyway.
                            if setup in [setup3l, setup4l]:
                                c.specifyUncertainty('scale_sig',   binname, "signal", 1 + scale_cache.get({"region":r, "channel":channel.name, "PDFset":"scale"}).val)
                                c.specifyUncertainty('PDF',         binname, "signal", 1 + PDF_cache.get({"region":r, "channel":channel.name, "PDFset":PDFset}).val)
                                c.specifyUncertainty('PartonShower',binname, "signal", PS_cache.get({"region":r, "channel":channel.name, "PDFset":"PSscale"}).val) #something wrong here?
                            #c.specifyUncertainty('scale_sig',   binname, "signal", 1.05) #1.30
                            #c.specifyUncertainty('PDF',         binname, "signal", 1.04) #1.15

                        uname = 'Stat_'+binname+'_signal'+postfix
                        c.addUncertainty(uname, 'lnN')
                        c.specifyUncertainty(uname, binname, 'signal', 1 + sig.sigma/sig.val )
                    else:
                        uname = 'Stat_'+binname+'_signal'+postfix
                        c.addUncertainty(uname, 'lnN')
                        c.specifyUncertainty(uname, binname, 'signal', 1 )

                    
        #c.addUncertainty('Lumi'+postfix, 'lnN')
        #c.specifyFlatUncertainty('Lumi'+postfix, 1.026)
        cardFileName = c.writeToFile(cardFileName)
    else:
        logger.info("File %s found. Reusing.",cardFileName)
    
    res = {}
    
    if not os.path.isdir(limitDir):
        os.makedirs(limitDir)
    resDB = resultsDB(limitDir+'/results.sq', "results", setup.resultsColumns)
    res = {"signal":s.name}
    if not overWrite and res.DB.contains(key):
        res = resDB.getDicts(key)[0]
        logger.info("Found result for %s, reusing", s.name)
    else:
        # We don't calculate limits here, but just in case we find a way how to do it, put placeholders here
        res.update({"exp":0, "obs":0, "exp1up":0, "exp2up":0, "exp1down":0, "exp2down":0})
        # Don't extract all the nuisances by default
        signalRegions = range(15,30) ## shouldn't be hardcoded
        masks = ['mask_ch1_Bin'+str(i)+'=1' for i in signalRegions]
        masks = ','.join(masks)

        if args.calcNuisances:
            c.calcNuisances(cardFileName, masks=masks)
        # extract the NLL
        #nll = c.calcNLL(cardFileName, options="")
        nll = c.physicsModel(cardFileName, options="", normList=["WZ_norm","ZZ_norm"], masks=masks) # fastScan turns of profiling
        if nll["nll0"] > 0:
            res.update({"dNLL_postfit_r1":nll["nll"], "dNLL_bestfit":nll["bestfit"], "NLL_prefit":nll["nll0"]})
        else:
            res.update({"dNLL_postfit_r1":-999, "dNLL_bestfit":-999, "NLL_prefit":-999})
            logger.info("Fits failed, adding values -999 as results")
        logger.info("Adding results to database")
        resDB.add(res, nll['nll_abs'], overwrite=True)

    print
    print "NLL results:"
    print "{:>15}{:>15}{:>15}".format("Pre-fit", "Post-fit r=1", "Best fit")
    print "{:15.2f}{:15.2f}{:15.2f}".format(float(res["NLL_prefit"]), float(res["NLL_prefit"])+float(res["dNLL_postfit_r1"]), float(res["NLL_prefit"])+float(res["dNLL_bestfit"]))
    
    print 'PU', min(uncertainties['PU']), max(uncertainties['PU'])
    print 'JEC', min(uncertainties['JEC']), max(uncertainties['JEC'])
    print 'btag_heavy', min(uncertainties['btag_heavy']), max(uncertainties['btag_heavy'])
    print 'btag_light', min(uncertainties['btag_light']), max(uncertainties['btag_light'])
    print 'trigger', min(uncertainties['trigger']), max(uncertainties['trigger'])
    print 'leptonSF', min(uncertainties['leptonSF']), max(uncertainties['leptonSF'])
Exemplo n.º 11
0
# zip with coupling names
allCombinations = [zip(nonZeroCouplings, a) for a in couplingGrid]
allCombinationsFlat = []
for comb in allCombinations:
    allCombinationsFlat.append([item for sublist in comb for item in sublist])

#processes = ['tZq_4f', 'ttZ','ttW','ttH']
#processes = ['ttgamma', 'ttZ']
processes = ['ttZ_ll']
submitCMD = "submitBatch.py"
#submitCMD = "echo"

nJobs = len(processes[:1]) * len(allCombinationsFlat)

logger.info("Will need to run over %i combinations.", nJobs)

combinationChunks = chunks(allCombinationsFlat, 3)

for p in processes[:1]:
    for i, comb in enumerate(combinationChunks):
        with open("%s_%i.txt" % (p, i), 'w') as f:
            for c in comb:
                strBase = "{} {} " * nDim
                couplingStr = (strBase + '\n').format(*c)
                f.write(couplingStr)
        #if i == 2 or i == 8: continue
        os.system(submitCMD + " --title %s_%i 'python run.py --model " %
                  ("GP", i) + model_name + " --process " + p +
                  " --couplings %s_%i.txt --makeGridpack --calcXSec'" %
                  (p, i))  #--calcXSec
Exemplo n.º 12
0
#Z_pt['name'] = "Z_pt"
#plots.append( Z_pt )
#
#Z_cosPhill  = {sample.name:ROOT.TH1D('Z_cosPhill', 'Z_cosPhill', 25, 0, pi) for sample in samples}
#Z_cosPhill['texX'] = "#phi(ll)"
#Z_cosPhill['name'] = "Z_cosPhill"
#plots.append( Z_cosPhill )

reader = sample.fwliteReader( products = products )

n_non_standard = 0

reader.start()
while reader.run( ):

    if reader.position % 1000==0: logger.info("At event %i/%i", reader.position, reader.nEvents)
    if maxN>0 and reader.position > maxN: break
    # All gen particles
    gp      = reader.products['gp']
    # for searching
    search  = GenSearch( gp )

    tops = filter( lambda p:abs(p.pdgId())==6 and search.isLast(p),  gp)
    Zs   = filter( lambda p:abs(p.pdgId())==23 and search.isLast(p), gp)
    Ws   = filter( lambda p:abs(p.pdgId())==24 and search.isLast(p), gp)
    
    logger.info( "tops %i Zs %i Ws %i", len(tops), len(Zs), len(Ws) )
    for W in Ws:
        print "W",W.numberOfDaughters(), W.daughter(0).pdgId(), W.daughter(1).pdgId()
    for Z in Zs:
        print "Z",W.numberOfDaughters(), W.daughter(0).pdgId(), W.daughter(1).pdgId()
Exemplo n.º 13
0
elif args.model == "ewkDM":
    xsecDB = "/afs/hephy.at/data/rschoefbeck02/TopEFT/results/xsec_DBv2.db"
    # Just in case
    xsecDB_Backup = "/afs/hephy.at/data/dspitzbart01/TopEFT/results/xsec_DBv2.db"
    if args.signal == "dipoles":
        nonZeroCouplings = ["DC2A","DC2V"]
    elif args.signal == "currents":
        nonZeroCouplings = ["DC1A", "DC1V"]
    # for safety, set all couplings to 0.
    modification_dict["DC1A"]   = 0.
    modification_dict["DC1V"]   = 0.
    modification_dict["DC2A"]   = 0.
    modification_dict["DC2V"]   = 0.


logger.info("Using model %s in plane: %s", args.model, args.signal)

logger.info("Will scan the following coupling values: %s and %s", nonZeroCouplings[0], nonZeroCouplings[1])

p = Process(process = "ttZ_ll", nEvents = 5000, config = config, xsec_cache=xsecDB)

xsec_central = p.xsecDB.get(modification_dict)

if year == 2016:
    PDFset = "NNPDF30"
    TTZ_sample = "TTZ_NLO"
elif year == 2017:
    PDFset = "NNPDF30"
    TTZ_sample = "TTZ_NLO_17"
elif year == 20167:
    PDFset = "NNPDF30"
Exemplo n.º 14
0
def filler(event):

    event.run, event.lumi, event.evt = reader.evt

    if reader.position % 100 == 0:
        logger.info("At event %i/%i", reader.position, reader.nEvents)

    if args.addReweights:
        event.nrw = weightInfo.nid
        lhe_weights = reader.products['lhe'].weights()
        weights = []
        param_points = []
        for weight in lhe_weights:
            # Store nominal weight (First position!)
            if weight.id == 'rwgt_1': event.rw_nominal = weight.wgt
            if not weight.id in weightInfo.id: continue
            pos = weightInfo.data[weight.id]
            event.rw_w[pos] = weight.wgt
            weights.append(weight.wgt)
            interpreted_weight = interpret_weight(weight.id)
            for var in weightInfo.variables:
                getattr(event, "rw_" + var)[pos] = interpreted_weight[var]
            # weight data for interpolation
            if not hyperPoly.initialized:
                param_points.append(
                    tuple(interpreted_weight[var]
                          for var in weightInfo.variables))

        # Initialize
        if not hyperPoly.initialized: hyperPoly.initialize(param_points)
        coeff = hyperPoly.get_parametrization(weights)

        # = HyperPoly(weight_data, args.interpolationOrder)
        event.np = hyperPoly.ndof
        event.chi2_ndof = hyperPoly.chi2_ndof(coeff, weights)
        #logger.debug( "chi2_ndof %f coeff %r", event.chi2_ndof, coeff )
        logger.debug("chi2_ndof %f", event.chi2_ndof)
        for n in xrange(hyperPoly.ndof):
            event.p_C[n] = coeff[n]

    # All gen particles
    gp = reader.products['gp']

    # for searching
    search = GenSearch(gp)

    # find heavy objects before they decay
    tops = map(lambda t: {var: getattr(t, var)()
                          for var in top_varnames},
               filter(lambda p: abs(p.pdgId()) == 6 and search.isLast(p), gp))

    tops.sort(key=lambda p: -p['pt'])
    fill_vector(event, "top", top_varnames, tops)

    gen_Zs = filter(lambda p: abs(p.pdgId()) == 23 and search.isLast(p), gp)
    gen_Zs.sort(key=lambda p: -p.pt())
    if len(gen_Zs) > 0:
        gen_Z = gen_Zs[0]
        for var in Z_read_varnames:
            setattr(event, "Z_" + var, getattr(gen_Z, var)())
    else:
        gen_Z = None

    if gen_Z is not None:

        d1, d2 = gen_Z.daughter(0), gen_Z.daughter(1)
        if d1.pdgId() > 0:
            lm, lp = d1, d2
        else:
            lm, lp = d2, d1
        event.Z_daughterPdg = lm.pdgId()
        event.Z_cosThetaStar = cosThetaStar(gen_Z.mass(), gen_Z.pt(),
                                            gen_Z.eta(), gen_Z.phi(), lm.pt(),
                                            lm.eta(), lm.phi())

    gen_Gammas = filter(lambda p: abs(p.pdgId()) == 22 and search.isLast(p),
                        gp)
    gen_Gammas.sort(key=lambda p: -p.pt())
    if len(gen_Gammas) > 0:
        gen_Gamma = gen_Gammas[0]
        for var in gamma_read_varnames:
            setattr(event, "gamma_" + var, getattr(gen_Gamma, var)())
    else:
        gen_Gamma = None

    # find all leptons
    leptons = [(search.ascend(l), l) for l in filter(
        lambda p: abs(p.pdgId()) in [11, 13] and search.isLast(p) and p.pt() >=
        0, gp)]
    leps = []
    for first, last in leptons:
        mother_pdgId = first.mother(
            0).pdgId() if first.numberOfMothers() > 0 else -1
        leps.append({var: getattr(last, var)() for var in lep_varnames})
        leps[-1]['motherPdgId'] = mother_pdgId

    leps.sort(key=lambda p: -p['pt'])
    fill_vector(event, "GenLep", lep_all_varnames, leps)

    # MET
    event.GenMet_pt = reader.products['genMET'][0].pt()
    event.GenMet_phi = reader.products['genMET'][0].phi()

    # jets
    jets = map(lambda t: {var: getattr(t, var)()
                          for var in jet_read_varnames},
               filter(lambda j: j.pt() > 30, reader.products['genJets']))

    # jet/lepton disambiguation
    jets = filter(
        lambda j: (min([999] + [deltaR2(j, l) for l in leps
                                if l['pt'] > 10]) > 0.3**2), jets)

    # find b's from tops:
    b_partons = [
        b for b in filter(
            lambda p: abs(p.pdgId()) == 5 and p.numberOfMothers() == 1 and abs(
                p.mother(0).pdgId()) == 6, gp)
    ]

    for jet in jets:
        jet['matchBParton'] = (min([999] + [
            deltaR2(jet, {
                'eta': b.eta(),
                'phi': b.phi()
            }) for b in b_partons
        ]) < 0.2**2)

    jets.sort(key=lambda p: -p['pt'])
    fill_vector(event, "GenJet", jet_write_varnames, jets)
Exemplo n.º 15
0
def make_batch_job(batch_job_file, batch_job_title, batch_output_dir, command):
    # If X509_USER_PROXY is set, use existing proxy.
    if options.dpm:
        if host == 'lxplus':
            from StopsDilepton.Tools.user import cern_proxy_certificate
            proxy_location = cern_proxy_certificate
        else:
            proxy_location = None

        from RootTools.core.helpers import renew_proxy
        proxy = renew_proxy(proxy_location)

        logger.info("Using proxy certificate %s", proxy)
        proxy_cmd = "export X509_USER_PROXY=%s" % proxy
    else:
        proxy_cmd = ""

    import subprocess

    if host == 'heplx':
        template =\
"""\
#!/bin/sh
#SBATCH -J {batch_job_title}
#SBATCH -D {pwd}
#SBATCH -o {batch_output_dir}batch-test.%j.out

{proxy_cmd}
voms-proxy-info -all
eval \`"scram runtime -sh"\` 
echo CMSSW_BASE: {cmssw_base} 
echo Executing user command  
echo "{command}"
{command} 

voms-proxy-info -all

""".format(\
                command          = command,
                cmssw_base       = os.getenv("CMSSW_BASE"),
                batch_output_dir = batch_output_dir,
                batch_job_title  = batch_job_title,
                pwd              = os.getenv("PWD"),
                proxy_cmd = proxy_cmd
              )
    elif host == 'lxplus':
        template =\
"""\
#!/bin/bash
export CMSSW_PROJECT_SRC={cmssw_base}/src

cd $CMSSW_PROJECT_SRC
eval `scramv1 ru -sh`

alias python={python_release}
which python
python --version

{proxy_cmd}
voms-proxy-info -all
echo CMSSW_BASE: $CMSSW_BASE
cd {pwd}
echo Executing user command while in $PWD
echo "{command}"
{command} 

voms-proxy-info -all

""".format(\
                command          = command,
                cmssw_base       = os.getenv("CMSSW_BASE"),
                #batch_output_dir = batch_output_dir,
                #batch_job_title  = batch_job_title,
                pwd              = os.getenv("PWD"),
                proxy_cmd = proxy_cmd,
                python_release = subprocess.check_output(['which', 'python']).rstrip(),
              )

    batch_job = file(batch_job_file, "w")
    batch_job.write(template)
    batch_job.close()
    return
Exemplo n.º 16
0
    yields[mode]["MC"] = sum(yields[mode][s.name] for s in mc)
    dataMCScale        = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan')

    drawPlots(plots, mode, dataMCScale)
    allPlots[mode] = plots

# Add the different channels into SF and all
for mode in ["comb1","all"]:
    yields[mode] = {}
    for y in yields[allModes[0]]:
        try:    yields[mode][y] = sum(yields[c][y] for c in ['ee','mue','mumu'])
        except: yields[mode][y] = 0
    dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan')
    
    for plot in allPlots['mumu']:
        if mode=="comb1":
            tmp = allPlots['mue']
        else:
            tmp = allPlots['ee']
        for plot2 in (p for p in tmp if p.name == plot.name):
            for i, j in enumerate(list(itertools.chain.from_iterable(plot.histos))):
                for k, l in enumerate(list(itertools.chain.from_iterable(plot2.histos))):
                    if i==k:
                        j.Add(l)
    
    if mode == "all": drawPlots(allPlots['mumu'], mode, dataMCScale)

logger.info( "Done with prefix %s and selectionString %s", args.selection, cutInterpreter.cutString(args.selection) )

Exemplo n.º 17
0
        "color": ROOT.kRed + 1
    },
    "ttH": {
        "marker": 22,
        "color": ROOT.kGreen + 2
    }
}

processes = ["ttZ", "ttH", "ttW"]

SM_xsec = {}

modified_couplings = {args.coupling: 0.0}

for proc in processes:
    logger.info("Checking SM x-sec:")
    config = Configuration(model_name=model_name,
                           modified_couplings=modified_couplings)
    p = Process(process=proc, nEvents=50000, config=config)
    SM_xsec[proc] = p.xsec()
    logger.info("SM x-sec for %s is %s", proc, SM_xsec[proc])
    if SM_xsec[proc].val == 0.: SM_xsec[proc] = u_float(1)

del config

hists = []
fits = []
m = 0

if args.scale:
    scale = lambdaSqInv[args.coupling]
    source_gen = dim6top_central
    #source_gen = ewkDM_central

    #allTargets = allSamples_dim6top
    #allTargets = ewkDM_currents + [ ewkDM_central ]
    #allTargets = dim6top_all
    allTargets = dim6top_currents
    #allTargets = ewkDM_all

    print len(allTargets)

    #for t in allTargets:
    #    print t.name

    for target in allTargets:
        logger.info("Working on target samples %s", target.name)
        target_gen = target

        signalReweighting = SignalReweighting(source_sample=source_gen,
                                              target_sample=target_gen,
                                              cacheDir=cacheDir)

        # reweighting selection
        selection = "Sum$(GenJet_pt>30)>=3&& abs(Z_mass-91.2)<10&&(abs(Z_daughterPdg)==11 || abs(Z_daughterPdg)==13 || abs(Z_daughterPdg)==15 )"

        # reweighting function
        f = signalReweighting.cachedReweightingFunc(selection)

        # plot the reweighting matrix
        if args.makePlots:
            matrix = signalReweighting.cachedTemplate(selection,
Exemplo n.º 19
0
# select MC generation. For 2016, always use Summer16
MCgeneration = options.MCgeneration
if options.year == 2016:
    MCgeneration = "Summer16"

samples = [
    fromHeppySample(s,
                    data_path=options.dataDir,
                    maxN=maxN,
                    MCgeneration=MCgeneration) for s in options.samples
]
logger.debug("Reading from CMG tuples: %s",
             ",".join(",".join(s.files) for s in samples))

if len(samples) == 0:
    logger.info("No samples found. Was looking for %s. Exiting" %
                options.samples)
    sys.exit(-1)

isData = False not in [s.isData for s in samples]
isMC = True not in [s.isData for s in samples]

# Check that all samples which are concatenated have the same x-section.
assert isData or len(set([
    s.heppy.xSection for s in samples
])) == 1, "Not all samples have the same xSection: %s !" % (",".join(
    [s.name for s in samples]))
assert isMC or len(samples) == 1, "Don't concatenate data samples"

xSection = samples[0].heppy.xSection if isMC else None

diMuTriggers = [
Exemplo n.º 20
0
    target_gen  = s[1]

    modification_dict = {}
    
    nonZeroCouplings = ["DC1A","DC1V","DC2A","DC2V"]
    
    # for safety, set all couplings to 0.
    modification_dict["DC1A"]   = 0.
    modification_dict["DC1V"]   = 0.
    modification_dict["DC2A"]   = 0.
    modification_dict["DC2V"]   = 0.
    
    for coup in nonZeroCouplings:
        try:
            modification_dict[coup] = getCouplingFromName(target_reco.name, coup)
            logger.info("The following coupling is set to non-zero value: %s: %s", coup, modification_dict[coup])
        except ValueError:
            logger.info("The following coupling is kept at zero: %s: %s", coup, modification_dict[coup])
            continue
    
    #for s in ewkDM_all:
    #    if s.name == "ewkDM_%s"%target_reco.name.replace('0j',''):
    #        target_gen = s
    #        print "Found gen sample"
    
    #SM_reco = TTZtoLLNuNu
    SM_reco = ttZ0j_ll
    
    reco_selection = 'nJetSelected>=3&&nBTag>=1&&min_dl_mass>=12&&abs(Z_mass - 91.1876)<=10&&Z_fromTight>0&&nLeptons_tight_3l==3&&Sum$((lep_tight_3l*(lep_pt - lep_ptCorr) + lep_ptCorr)>40&&lep_tight_3l>0)>0&&Sum$((lep_tight_3l*(lep_pt - lep_ptCorr) + lep_ptCorr)>20&&lep_tight_3l>0)>1&&Sum$((lep_tight_3l*(lep_pt - lep_ptCorr) + lep_ptCorr)>10&&lep_tight_3l>0)>2&&!(nLeptons_tight_4l>=4)'
    
    norm_source     = SM_reco.getYieldFromDraw(reco_selection, "weight")
Exemplo n.º 21
0
setup16.short = True

##Summer16 samples

allRegions = regionsE + regions4lB
regions = allRegions

hists = {}
Nbins = len(regions)
for i, var in enumerate(["PDF", "scale", "PSscale"]):
    hists[var] = ROOT.TH1F(var, var, Nbins, 0, Nbins)
    hists[var].style = styles.lineStyle(colors[i], width=2)
    hists[var].legendText = var

for i, r in enumerate(regions):
    logger.info("Working on 2016 results")

    hists["PDF"].SetBinContent(
        i + 1,
        PDF_cache.get({
            "region": r,
            "channel": 'all',
            "PDFset": "PDF4LHC15_nlo_100"
        }).val)
    hists["PDF"].GetXaxis().SetBinLabel(i + 1, "%i" % (i + 1))
    hists["scale"].SetBinContent(
        i + 1,
        scale_cache.get({
            "region": r,
            "channel": 'all',
            "PDFset": "scale"
Exemplo n.º 22
0
        for c in seperateChannels4l:
            jobs.append((estimate, noRegions[0], c, setupIncl4l))

    if not options.combine:
        for region in regions:
            seperateChannels = seperateChannels4l if region in regions4lB else seperateChannels3l
            for c in seperateChannels:
                #for region in regions:
                if options.controlRegion:
                    setup = setupCR4l if region in regions4lB else setupCR3l
                else:
                    setup = setup4l if region in regions4lB else setup3l

                jobs.append((estimate, region, c, setup))

        logger.info("Created %s jobs", len(jobs))

        if options.noMultiThreading:
            results = map(wrapper, jobs)
        else:
            from multiprocessing import Pool
            pool = Pool(processes=8)
            results = pool.map(wrapper, jobs)
            pool.close()
            pool.join()

    logger.info("All done with the estimates.")

if options.combine:
    for c in [channel(-1, -1)]:  #allChannels:
Exemplo n.º 23
0
                       default=2,
                       help="Interpolation order for EFT weights.")
args = argParser.parse_args()

#
# Logger
#
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt

logger = logger.get_logger(args.logLevel, logFile=None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile=None)

# Load sample either from
if len(args.inputFiles) > 0:
    logger.info("Input files found. Ignoring 'sample' argument. Files: %r",
                args.inputFiles)
    sample = FWLiteSample(args.targetSampleName, args.inputFiles)
else:
    sample_file = "$CMSSW_BASE/python/TopEFT/samples/fwlite_benchmarks.py"
    samples = imp.load_source("samples", os.path.expandvars(sample_file))
    sample = getattr(samples, args.sample)
    print sample.files

maxEvents = -1
if args.small:
    args.targetDir += "_small"
    maxEvents = 100  # Number of files
    sample.files = sample.files[:1]

# Load reweight pickle file if supposed to keep weights.
extra_variables = []
Exemplo n.º 24
0
        cutInterpreter.cutString(args.selection)
    ])
    TTZ_ptG = TTZtoLLNuNu.get1DHistoFromDraw("gamma_pt[0]", [20, 0, 1000],
                                             selectionString=sel_string,
                                             weightString="weight")
    TTZ_ptG.Scale(1. / TTZ_ptG.Integral())

    def get_reweight(var, histo):
        def reweight(event, sample):
            i_bin = histo.FindBin(getattr(event, var))
            return histo.GetBinContent(i_bin)

        return reweight

    for signal in signals:
        logger.info("Computing PtG reweighting for signal %s", signal.name)
        signal_ptG = signal.get1DHistoFromDraw("Z_pt", [20, 0, 1000],
                                               selectionString=sel_string,
                                               weightString="weight")
        signal_ptG.Scale(1. / signal_ptG.Integral())

        signal.reweight_ptG_histo = TTZ_ptG.Clone()
        signal.reweight_ptG_histo.Divide(signal_ptG)

        signal.weight = get_reweight("Z_pt", signal.reweight_ptG_histo)

# Read variables and sequences
#
read_variables = [
    "weight/F",
    "jet[pt/F,eta/F,phi/F,btagCSV/F,id/I]",
Exemplo n.º 25
0
    if line:
        if split:
            logger.info("Splitting in %i jobs", split)
            for i in range(split):
                commands.append(line + " --nJobs %i --job %i" % (split, i))
        else:
            commands.append(line)
    return commands


if __name__ == '__main__':
    if not len(args) == 1:
        raise Exception(
            "Only one argument accepted! Instead this was given: %s" % args)
    if os.path.isfile(args[0]):
        logger.info("Reading commands from file: %s", args[0])
        commands = []
        with open(args[0]) as f:
            for line in f.xreadlines():
                commands.extend(getCommands(line.rstrip("\n")))

    elif type(args[0]) == type(""):
        commands = getCommands(args[0])
    if commands:
        logger.info("Working on host %s", host)
        if host == 'heplx':
            if not os.path.isdir(batch_output_dir):
                os.mkdir(batch_output_dir)

            logger.info(
                "Batch system output file to be written to directory: %s",
Exemplo n.º 26
0
                           save=True,
                           overwrite=options.overwrite)
    return (e.uniqueKey(r, channel, setup), res)


jobs = []

if options.sample not in ["Data", "TTZ", "pseudoData", "nonprompt"]:
    estimators = estimators.constructEstimatorList([
        options.sample
    ]) if options.sample else estimators.constructEstimatorList(
        ["WZ", "TTX", "TTW", "TZQ", "rare"])
else:
    estimators = []

logger.info("Starting estimates for sample %s",
            setup.samples[options.sample].name)

for setup in allSetups:

    signal = MCBasedEstimate(name="TTZ_%s" % year,
                             sample=setup.samples["TTZ"],
                             cacheDir=setup.defaultCacheDir())
    data = DataObservation(name="Data_%s" % year,
                           sample=setup.samples["Data"],
                           cacheDir=setup.defaultCacheDir())
    observation = MCBasedEstimate(name="observation_%s" % year,
                                  sample=setup.samples["pseudoData"],
                                  cacheDir=setup.defaultCacheDir())
    nonprompt = FakeEstimate(name="nonPromptDD_%s" % year,
                             sample=setup.samples["Data"],
                             setup=setup,
Exemplo n.º 27
0
    from TopEFT.samples.heppy_dpm_samples import lepton_2016_heppy_mapper as lepton_heppy_mapper
else:
    module_ = 'CMGTools.RootTools.samples.samples_13TeV_RunIIFall17MiniAOD'
    MCgeneration = "Fall17"
    from TopEFT.samples.heppy_dpm_samples import lepton_2017_heppy_mapper as lepton_heppy_mapper

try:
    heppy_sample = getattr(importlib.import_module( module_ ), options.sample)
except:
    raise ValueError( "Could not load sample '%s' from %s "%( options.sample, module_ ) )


sample = lepton_heppy_mapper.from_heppy_samplename(heppy_sample.name, maxN = maxN)
    
if sample is None or len(sample.files)==0:
    logger.info( "Sample %r is empty. Exiting" % sample )
    sys.exit(-1)
else:
    logger.info( "Sample %s has %i files", sample.name, len(sample.files))

len_orig = len(sample.files)
sample = sample.split( n=options.nJobs, nSub=options.job)
logger.info( " Run over %i/%i files for job %i/%i."%(len(sample.files), len_orig, options.job, options.nJobs))
logger.debug( "Files to be run over:\n%s", "\n".join(sample.files) )

#output directory

output_directory = os.path.join( skim_output_directory, options.version+('_small' if options.small else ''), str(options.year) ) 

leptonClasses  = [{'Name':'Prompt', 'Var': 'lep_isPromptId'}, {'Name':'NonPrompt', 'Var': 'lep_isNonPromptId'}, {'Name':'Fake', 'Var': 'lep_isFakeId'}]
leptonFlavours = [
Exemplo n.º 28
0
        comment = None

    # Remove blanks
    if not job.strip(): continue

    args = job.split()  #filter(lambda s:not s.startswith("SPLIT"), cmds)
    if comment is not None and "SPLIT" in comment:
        try:
            n = int(comment.replace("SPLIT", ""))
        except ValueError:
            n = -1
    else:
        n = -1

    if n > 0:
        logger.info("Splitting into %i jobs: %r", n, " ".join(args))
        for i in range(n):
            j_args = args + ["--nJobs", str(n), "--job", str(i)]
            logger.info("Queuing job %r", " ".join(j_args))
            jobs.append(j_args)
    else:
        logger.info("No splitting. Queuing job %r", " ".join(args))
        jobs.append(args)

extra_args = []
#if len(sys.argv)>=2:
#    extra_args = sys.argv[2:]

for cmds in jobs:
    if command != "": cmds_ = [command] + cmds + extra_args
    else: cmds_ = cmds + extra_args
Exemplo n.º 29
0
## then one can sum up over all (currently done in the combine step)
#for var in variations:
#    jobs.append((noRegions[0], "all", setupIncl.systematicClone(sys={'reweight':[var]})))

if not options.combine:
    for region in regions:
        seperateChannels = seperateChannels4l if region in regions4lB else seperateChannels3l
        for c in seperateChannels:
            #for region in regions:
            setup = setup4l if region in regions4lB else setup3l
            jobs.append((region, c, setup))
            for var in variations:
                jobs.append((region, c,
                             setup.systematicClone(sys={'reweight': [var]})))

    logger.info("Created %s jobs", len(jobs))

    if options.noMultiThreading:
        results = map(wrapper, jobs)
    else:
        from multiprocessing import Pool
        pool = Pool(processes=8)
        results = pool.map(wrapper, jobs)
        pool.close()
        pool.join()

    logger.info("All done.")

PDF_unc = []
Scale_unc = []
PS_unc = []
Exemplo n.º 30
0
    def _estimate(self, region, channel, setup):

        ''' Concrete implementation of abstract method 'estimate' as defined in Systematic
        '''

        logger.debug( "Obtain polarisation Estimate for channel %s region %s", channel, region )

        # Obtain fit template from an unbias Z sample. FIXME: Should be eta and pt reweighted
        #def_setup = setup.defaultClone() # Don't use systematic variations for templates
        sub_templates = []
        # Here, I assume the sample(!) is the same for all flavors
        template_maker = PolarisationTemplateMaker( setup.samples['TTZ']['3mu'], cacheDir = os.path.join( results_directory, 'PolarisationTemplateCache' ) )
        region_cut = region.cutString().replace('Z_pt','genZ_pt') # Make the template cut on genZ. Approximation. Don't uses sys. variations
        cuts = [region_cut]
        # If we know which Z flavor, then require it for the template
        if channel in ['3e', '2e1mu']:
            cuts.append( "genZ_daughter_flavor==11" )
        elif channel in  ['3mu', '2mu1e']:
            cuts.append( "genZ_daughter_flavor==13" )
        cut = "&&".join( cuts )

        logger.debug( "Making sub_template '%s' for polarisation fit using selection '%s' and weight '%s'", channel, cut, 'weight')
        templates = template_maker.cachedTemplate(cut, 'weight')

        # Obtain selection strings & weight from setup

        background_mc = {}
        background_mc_keys = []
        ttz_mc        = []
        data          = []
        for ch in ( [channel] if channel!='all' else channels):
            # Background MC
            for sample_name, sample in setup.samples.iteritems():
                if sample_name == 'Data':
                    pre_selection = setup.preselection('Data', channel=ch)
                    cut     = "&&".join( [ region.cutString(), pre_selection['cut'] ] )
                    weight  = pre_selection['weightStr']
                else:
                    pre_selection = setup.preselection('MC', channel=ch)
                    cut     = "&&".join( [ region.cutString(setup.sys['selectionModifier']), pre_selection['cut'] ] )
                    weight  = pre_selection['weightStr']
                    if sample_name not in background_mc_keys: background_mc_keys.append( sample_name )

                logger.info( "Get cosThetaStar histogram for sample %s channel %s cut %s weight %s" %( sample[ch].name, ch, cut, weight) )
                h = sample[ch].get1DHistoFromDraw( 'cosThetaStar', [20, -1, 1 ], selectionString = cut, weightString = weight )

                # Append & Scale
                if sample_name == 'Data':
                    data.append( h )
                elif sample_name == 'TTZ':
                    h.Scale( setup.lumi[ch]/1000. )
                    ttz_mc.append( h )
                else:
                    h.Scale( setup.lumi[ch]/1000. )
                    if background_mc.has_key(sample_name):
                        background_mc[sample_name].append(h)
                    else:
                        background_mc[sample_name] = [h]

        h_background_mc = []
        for sample_name in background_mc_keys:
            if sample_name=='TTZ': continue
            h_background_mc.append( sum_histos(background_mc[sample_name]) )
            h_background_mc[-1].style = styles.fillStyle( getattr(color, sample_name))
            h_background_mc[-1].legendText = sample_name

        h_ttz_mc        = sum_histos( ttz_mc )
        h_ttz_mc.style = styles.fillStyle( color.TTZtoLLNuNu  )
        h_ttz_mc.legendText = 'TTZ'

        h_data          = sum_histos(data)
        h_data.style = styles.errorStyle( ROOT.kBlack  )
        h_data.legendText = 'Data (%s)' % channel

        # Subtract MC from Data
        if self.usePseudoData:
            h_data_subtracted = h_ttz_mc.Clone()
            h_data_subtracted.Sumw2(0)
        else:
            scale = h_data.Integral() / (sum( [h.Integral() for h in h_background_mc ]) + h_ttz_mc.Integral())

            for h in h_background_mc:
                h.Scale( 1./scale )
            h_ttz_mc.Scale( 1./scale )

            h_data_subtracted = sum_histos( h_background_mc )
            h_data_subtracted.Scale(-1)
            h_data_subtracted.Add( h_data )

        h_data.style = styles.errorStyle( ROOT.kBlack  )
        h_data.legendText = 'Data (%s)' % channel
        h_data_subtracted.style = styles.errorStyle( ROOT.kBlack  )
        h_data_subtracted.legendText = 'Data (%s) subtr %3.1f' % ( channel, h_data_subtracted.Integral() )

        # Perform Fit

        y_p, y_m, y_L = map( u_float, ZPolarisationFit( h_data_subtracted, [templates[p] for p in ['p','m','L']], \
            fit_plot_directory = os.path.join( plot_directory,  'polFits'),  
            fit_filename       = "fit_pseudoData_%s_%s_%s"%( self.usePseudoData,  channel, region),
            sumW2Error = False # predict stat error
        ))

        templates['p'].Scale(y_p.val)
        templates['m'].Scale(y_m.val)
        templates['L'].Scale(y_L.val)
        templates['p'].style = styles.lineStyle( ROOT.kRed, width=2 )
        templates['m'].style = styles.lineStyle( ROOT.kGreen, width=2 )
        templates['L'].style = styles.lineStyle( ROOT.kMagenta, width=2 )

        h_fitresults = sum_histos( templates.values() )
        h_fitresults.style = styles.lineStyle( ROOT.kBlue, width = 2 )
        h_fitresults.legendText = "TTZ fit (sum)"

        histos = [ h_background_mc + [h_ttz_mc], [templates['p']], [templates['m']], [templates['L']], [h_fitresults], [h_data]]

        plot = Plot.fromHisto(name = "fit_plot_pseudoData_%s_%s_%s"%( self.usePseudoData,  channel, region), histos =  histos , texX = "cos#theta^{*}", texY = "Events" )  
        plotting.draw(plot, 
            plot_directory = os.path.join( plot_directory,  'polFits'), 
            logX = False, logY = False, sorting = True,
            legend      = ([0.15,0.7,0.90,0.90], 2)
            )

        templates['p'].legendText  = 'pol(+) %3.1f #pm %3.1f'%( y_p.val, y_p.sigma ) 
        templates['m'].legendText  = 'pol(-) %3.1f #pm %3.1f'%( y_m.val, y_m.sigma ) 
        templates['L'].legendText  = 'pol(L) %3.1f #pm %3.1f'%( y_L.val, y_L.sigma )

        histos = [ [h_ttz_mc], [templates['p']], [templates['m']], [templates['L']], [h_fitresults], [h_data_subtracted]]

        plot = Plot.fromHisto(name = "fit_plot_subtracted_pseudoData_%s_%s_%s"%( self.usePseudoData,  channel, region), histos =  histos , texX = "cos#theta^{*}", texY = "Events" )  
        plotting.draw(plot, 
            plot_directory = os.path.join( plot_directory,  'polFits'), 
            logX = False, logY = False, sorting = False,
            legend      = ([0.15,0.7,0.90,0.90], 2),
            yRange = (0, 30),
            )