def getDataProfile(outfname,JSON,pileup,bins,era,minbias,local=False): """Get pileup profile in data with pileupCalc.py tool.""" print '>>> getDataProfile("%s",%d,%s)'%(outfname,bins,minbias) # CREATE profile if local: JSON = copy2local(JSON) pileup = copy2local(pileup) command = "./pileupCalc.py -i %s --inputLumiJSON %s --calcMode true --maxPileupBin %d --numPileupBins %d --minBiasXsec %d %s --verbose"%(JSON,pileup,bins,bins,minbias*1000,outfname) else: command = "pileupCalc.py -i %s --inputLumiJSON %s --calcMode true --maxPileupBin %d --numPileupBins %d --minBiasXsec %d %s"%(JSON,pileup,bins,bins,minbias*1000,outfname) print ">>> executing command (this may take a while):" print ">>> " + command os.system(command) # GET profile histname = 'pileup' if not os.path.isfile(outfname): print ">>> Warning! getDataProfile: Could find output file %s!"%(outfname) return file, hist = gethist(outfname,histname,retfile=True) hist.SetName("%s_%s"%(histname,str(minbias).replace('.','p'))) hist.SetTitle("Data %s, %.1f pb"%(era,minbias)) hist.SetDirectory(0) bin0 = 100.0*hist.GetBinContent(0)/hist.Integral() bin1 = 100.0*hist.GetBinContent(1)/hist.Integral() if bin0>0.01 or bin1>0.01: print ">>> Warning! First to bins have %.2f%% (0) and %.2f%% (1)"%(bin0,bin1) hist.SetBinContent(0,0.0) hist.SetBinContent(1,0.0) print ">>> pileup profile in data with min. bias %s mb has a mean of %.1f"%(minbias,hist.GetMean()) file.Close() return hist
def getMCProfile(outfname, samples, channel, era, tag=""): """Get pileup profile in MC by adding Pileup_nTrueInt histograms from a given list of samples.""" print '>>> getMCProfile("%s")' % (outfname) nprofiles = 0 histname = 'pileup' tothist = None for sample, infname in samples: print ">>> %s" % (infname) file, hist = gethist(infname, histname, retfile=True) if not file: print ">>> Did not find %s..." % (infname) continue if not hist: print ">>> Did not find %s:%r..." % (infname, histname) continue if tothist == None: tothist = hist.Clone('pileup') tothist.SetTitle('pileup') #tothist.SetTitle('MC average') tothist.SetDirectory(0) nprofiles += 1 else: tothist.Add(hist) nprofiles += 1 file.Close() print ">>> added %d MC profiles, %d entries, %.1f mean" % ( nprofiles, tothist.GetEntries(), tothist.GetMean()) file = TFile(outfname, 'RECREATE') tothist.Write('pileup') tothist.SetDirectory(0) file.Close() return tothist
def compareMCProfiles(samples, channel, era, tag=""): """Compare MC profiles.""" print ">>> compareMCProfiles()" hname = 'pileup' htitle = 'MC average' outdir = ensuredir("plots") avehist = None hists = [] if tag and tag[0] != '_': tag = '_' + tag if 'pmx' in tag: htitle += " %s pre-mixing" % ("old" if "old" in tag else "new") # GET histograms for sample, fname in samples: print ">>> %s" % (fname) file, hist = gethist(fname, hname, retfile=True) hist.SetName(sample) hist.SetTitle(sample) hist.SetDirectory(0) if avehist == None: avehist = hist.Clone('average%s' % tag) avehist.SetTitle(htitle) avehist.SetDirectory(0) else: avehist.Add(hist) hist.Scale(1. / hist.Integral()) hists.append(hist) file.Close() # PLOT hists = [avehist] + hists colors = [kBlack] + linecolors avehist.Scale(1. / avehist.Integral()) pname = "%s/pileup_MC_%s%s" % (outdir, era, tag) xtitle = "Number of true interactions" plot = Plot(hists, ratio=True) plot.draw(xtitle=xtitle, ytitle="A.U.", rtitle="MC / Ave.", textsize=0.032, rmin=0.45, rmax=1.55, denom=2, colors=colors) plot.drawlegend('TTR', tsize=0.04, latex=False) plot.saveas(pname + ".png") plot.saveas(pname + ".pdf") plot.close(keep=True) for hist in hists: # clean memory if hist == avehist: continue if hist.GetDirectory(): gDirectory.Delete(hist.GetName()) else: hist.Delete() return avehist
def stackinputs(file, variable, processes, **kwargs): """Stack histograms from ROOT file. file: TFile or TDirectory object variable: Variables object processes: list of strings (name of processes) e.g. stackinputs(file,variable,['ZTT','TTT','W','QCD','data_obs']) """ text = kwargs.get('text', None) tag = kwargs.get('tag', "") groups = kwargs.get('group', []) # e.g. [(['^TT','ST'],'Top')] dname = kwargs.get('dname', None) # directory ('bin') name pname = kwargs.get('save', "stack$TAG.png") # save as image file wname = kwargs.get('write', "stack$TAG") # write to file style = kwargs.get('style', False) # write style to file exphists = [] datahist = None tdir = ensureTDirectory(file, dname, cd=True) if dname else file if style: gStyle.Write( 'style', TH1.kOverwrite) # write current TStyle object to reproduce plots for process in processes: hname = process hist = gethist(tdir, process, fatal=False, warn=False) if not hist: LOG.warning( "stackinputs: Could not find %r in %s. Skipping stacked plot..." % (process, tdir.GetPath())) return hist.SetDirectory(0) hist.SetLineColor(kBlack) hist.SetFillStyle(1001) # assume fill color is already correct if process == 'data_obs': datahist = hist else: exphists.append(hist) for group in groups: grouphists(exphists, *group, replace=True, regex=True, verb=0) stack = Stack(variable, datahist, exphists) stack.draw() stack.drawlegend(ncols=2, twidth=0.9) if text: stack.drawtext(text) if pname: pname = repkey(pname, TAG=tag) stack.saveas(pname, ext=['png']) if wname: wname = repkey(wname, TAG=tag) stack.canvas.Write(wname, TH1.kOverwrite) stack.close()
def comparevars(file, variable, processes, systag, **kwargs): """Compare up/down variations of input histograms from ROOT file. file: TFile or TDirectory object variable: Variables object processes: list of strings (name of processes) systag: string of systematic (file must contain up/down variation) e.g. comparevars(file,variable,['ZTT','TTT'],'TES') """ text = kwargs.get('text', None) pname = kwargs.get('pname', "stack.png") tag = kwargs.get('tag', "") groups = kwargs.get('group', []) # e.g. [(['^TT','ST'],'Top')] dname = kwargs.get('dname', None) # directory ('bin') name pname = kwargs.get('save', "plot_$PROC$SYST$TAG.png") # save as image file wname = kwargs.get('write', "plot_$PROC$SYST$TAG") # write to file processes = ensurelist(processes) uptag = systag + "Up" downtag = systag + "Down" tdir = ensureTDirectory(file, dname, cd=True) if dname else file for process in processes: hists = [] skip = False for var in [uptag, "", downtag]: hname = process + var hist = gethist(tdir, hname, fatal=False, warn=False) if not hist: skip = True break hists.append(hist) if skip: LOG.warning( "comparevars: Could not find %r in %s. Skipping shape comparison..." % (hname, tdir.GetPath())) continue plot = Plot(variable, hists) plot.draw(ratio=2, lstyle=1) plot.drawlegend() if text: plot.drawtext(text) if pname: pname_ = repkey(pname, PROC=process, TAG=tag) plot.saveas(pname_, ext=['png']) if wname: wname_ = repkey(wname, PROC=process, TAG=tag) plot.canvas.Write(wname_, TH1.kOverwrite) plot.close()
def __init__(self, trigger, wp='Medium', id='DeepTau2017v2p1', year=2016): """Load tau trigger histograms from files.""" print "Loading %s trigger SFs for %s WP of %s ID for year %d..." % ( trigger, wp, id, year) # CHECKS dms = [0, 1, 10, 11] triggers = ['ditau', 'mutau', 'etau'] years = [2016, 2017, 2018] ids = ['DeepTau2017v2p1'] wps = [ 'VVVLoose', 'VVLoose', 'VLoose', 'Loose', 'Medium', 'Tight', 'VTight', 'VVTight' ] trigger = trigger.replace('tautau', 'ditau').replace('eletau', 'etau') assert trigger in triggers, "Did not recognize '%s' trigger! Choose from: '%s' triggers." % ( trigger, "', '".join(triggers)) assert wp in wps, "Did not recognize '%s' WP! Choose from: '%s'" % ( wp, "', '".join(wps)) assert id in ids, "Did not recognize '%s' ID! Choose from: '%s'." % ( id, "', '".join(ids)) assert year in years, "Did not recognize '%s' year! Choose from: %s." % ( year, "', '".join(str(y) for y in years)) # GET DATA file = ensureTFile('%s/%d_tauTriggerEff_%s.root' % (datadir, year, id), 'r') hists_data, hists_mc, hists_sf = {}, {}, {} for dm in dms: for histtype, histdict in [('data', hists_data), ('mc', hists_mc), ('sf', hists_sf)]: histname = "%s_%s_%s_dm%d_fitted" % (histtype, trigger, wp, dm) histdict[dm] = gethist(file, histname) file.Close() self.hists_data = hists_data self.hists_mc = hists_mc self.hists_sf = hists_sf self.trigger = trigger self.year = year self.wp = wp self.id = id self.dms = dms
def main(): eras = args.eras periods = cleanEras(args.periods) channel = args.channel types = args.types verbosity = args.verbosity minbiases = [69.2] if periods else [69.2, 80.0, 69.2 * 1.046, 69.2 * 0.954] for era in args.eras: year = getyear(era) mcfilename = "MC_PileUp_%s.root" % (era) jsondir = os.path.join(datadir, 'json', str(year)) pileup = os.path.join(jsondir, "pileup_latest.txt") CMSStyle.setCMSEra(year) if era == '2016': # https://twiki.cern.ch/twiki/bin/viewauth/CMS/PdmV2017Analysis # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt" # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/Final/Cert_271036-284044_13TeV_PromptReco_Collisions16_JSON.txt # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/PileUp/pileup_latest.txt JSON = os.path.join( jsondir, "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt" ) datasets = { 'B': (272007, 275376), 'C': (275657, 276283), 'D': (276315, 276811), 'E': (276831, 277420), 'F': (277772, 278808), 'G': (278820, 280385), 'H': (280919, 284044), } campaign = "Moriond17" samples = [ ( 'TT', "TT", ), ( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DYJetsToLL_M-50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W3JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), ( 'VV', "WW", ), ( 'VV', "WZ", ), ( 'VV', "ZZ", ), ] elif '2017' in era: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/PdmV2017Analysis # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/Final/Cert_271036-284044_13TeV_PromptReco_Collisions16_JSON.txt # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/13TeV/PileUp/pileup_latest.txt JSON = os.path.join( jsondir, "Cert_294927-306462_13TeV_PromptReco_Collisions17_JSON.txt") datasets = { 'B': (297020, 299329), 'C': (299337, 302029), 'D': (302030, 303434), 'E': (303435, 304826), 'F': (304911, 306462), } samples_bug = [] samples_fix = [] if 'UL' in era: campaign = "Summer19" samples_fix = [ #( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DYJetsToLL_M-50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'DY', "DY4JetsToLL_M-50", ), #( 'TT', "TTTo2L2Nu", ), ( 'TT', "TTToHadronic", ), #( 'TT', "TTToSemiLeptonic", ), ( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W3JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), #( 'VV', "WW", ), #( 'VV', "WZ", ), #( 'VV', "ZZ", ), ] else: campaign = "Winter17_V2" samples_bug = [ ( 'DY', "DYJetsToLL_M-50", ), ( 'WJ', "W3JetsToLNu", ), ( 'VV', "WZ", ), ] samples_fix = [ ( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'DY', "DY4JetsToLL_M-50", ), ( 'TT', "TTTo2L2Nu", ), ( 'TT', "TTToHadronic", ), ( 'TT', "TTToSemiLeptonic", ), ( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), ( 'VV', "WW", ), ( 'VV', "ZZ", ), ] samples = samples_bug + samples_fix else: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/PdmV2018Analysis # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/PromptReco # /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/PileUp/pileup_latest.txt JSON = os.path.join( jsondir, "Cert_314472-325175_13TeV_PromptReco_Collisions18_JSON.txt") datasets = { 'A': (315252, 316995), 'B': (317080, 319310), 'C': (319337, 320065), 'D': (320673, 325175), } campaign = "Autumn18" samples = [ ( 'TT', "TTTo2L2Nu", ), ( 'TT', "TTToHadronic", ), ( 'TT', "TTToSemiLeptonic", ), ( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DYJetsToLL_M-50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'DY', "DY4JetsToLL_M-50", ), #( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W3JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), ( 'VV', "WW", ), ( 'VV', "WZ", ), ( 'VV', "ZZ", ), ] # SAMPLES FILENAMES fname = "$PICODIR/$SAMPLE_$CHANNEL.root" if '$PICODIR' in fname: import TauFW.PicoProducer.tools.config as GLOB CONFIG = GLOB.getconfig(verb=verbosity) fname = repkey(fname, PICODIR=CONFIG['picodir']) for i, (group, sample) in enumerate(samples): fname = repkey(fname, ERA=era, GROUP=group, SAMPLE=sample, CHANNEL=channel) samples[i] = (sample, fname) if verbosity >= 1: print ">>> samples = %r" % (samples) # JSON jsons = {} if periods: outdir = ensuredir("json") for period in periods: start, end = getPeriodRunNumbers(period, datasets) erarun = "Run%d%s" % (era, period) jsonout = "json/" + re.sub(r"\d{6}-\d{6}", erarun, JSON.split('/')[-1]) filterJSONByRunNumberRange(JSON, jsonout, start, end, verb=verbosity) jsons[erarun] = jsonout else: jsons[era] = JSON # DATA datahists = {period: [] for period in jsons} if 'data' in types: for period, json in jsons.iteritems(): for minbias in minbiases: filename = "Data_PileUp_%s_%s.root" % ( period, str(minbias).replace('.', 'p')) datahist = getDataProfile(filename, json, pileup, 100, era, minbias) datahists[period].append((minbias, datahist)) elif args.plot: for era in jsons: for minbias in minbiases: filename = "Data_PileUp_%s_%s.root" % ( era, str(minbias).replace('.', 'p')) file, hist = gethist(filename, 'pileup', retfile=True) if not file or not hist: continue hist.SetDirectory(0) file.Close() datahists[era].append((minbias, hist)) # MC if 'mc' in types: mcfilename = "MC_PileUp_%s.root" % (era) #mcfilename = "MC_PileUp_%s_%s.root"%(era,campaign) getMCProfile(mcfilename, samples, channel, era) if args.plot: mchist = compareMCProfiles(samples, channel, era) for era in jsons: for minbias, datahist in datahists[era]: compareDataMCProfiles(datahist, mchist, era, minbias) deletehist(mchist) # clean memory if era == '2017': # also check new/old pmx separately mcfilename_bug = mcfilename.replace(".root", "_old_pmx.root") mcfilename_fix = mcfilename.replace(".root", "_new_pmx.root") getMCProfile(mcfilename_bug, samples_bug, channel, era) getMCProfile(mcfilename_fix, samples_fix, channel, era) if args.plot: mchist_bug = compareMCProfiles(samples_bug, channel, era, tag="old_pmx") mchist_fix = compareMCProfiles(samples_fix, channel, era, tag="new_pmx") for era in jsons: for minbias, datahist in datahists[era]: compareDataMCProfiles(datahist, mchist_bug, era, minbias, tag="old_pmx") compareDataMCProfiles(datahist, mchist_fix, era, minbias, tag="new_pmx") # FLAT if 'flat' in types: filename = "MC_PileUp_%d_FlatPU0to75.root" % era hist_flat = getFlatProfile(filename, 75) for era in jsons: for minbias, datahist in datahists[era]: compareDataMCProfiles(datahist, hist_flat, era, minbias, tag="FlatPU0to75", rmin=0.0, rmax=3.1)
def main(): eras = args.eras periods = cleanPeriods(args.periods) channel = args.channel types = args.types verbosity = args.verbosity minbiases = [ 69.2 ] if periods else [ 69.2, 69.2*1.046, 69.2*0.954, 80.0 ] fname_ = "$PICODIR/$SAMPLE_$CHANNEL.root" # sample file name if 'mc' in types and '$PICODIR' in fname_: import TauFW.PicoProducer.tools.config as GLOB CONFIG = GLOB.getconfig(verb=verbosity) fname_ = repkey(fname_,PICODIR=CONFIG['picodir']) for era in args.eras: year = getyear(era) mcfilename = "MC_PileUp_%s.root"%(era) jsondir = os.path.join(datadir,'json',str(year)) pileup = os.path.join(jsondir,"pileup_latest.txt") jname = getJSON(era) CMSStyle.setCMSEra(era) samples_bug = [ ] # buggy samples in (pre-UL) 2017 with "old pmx" library samples_fix = [ ] # fixed samples in (pre-UL) 2017 with "new pmx" library samples = [ # default set of samples ( 'DY', "DYJetsToMuTauh_M-50" ), ( 'DY', "DYJetsToLL_M-50" ), ( 'DY', "DY4JetsToLL_M-50" ), ( 'DY', "DY3JetsToLL_M-50" ), ( 'DY', "DY2JetsToLL_M-50" ), ( 'DY', "DY1JetsToLL_M-50" ), ( 'WJ', "WJetsToLNu" ), ( 'WJ', "W4JetsToLNu" ), ( 'WJ', "W3JetsToLNu" ), ( 'WJ', "W2JetsToLNu" ), ( 'WJ', "W1JetsToLNu" ), ( 'TT', "TTToHadronic" ), ( 'TT', "TTTo2L2Nu" ), ( 'TT', "TTToSemiLeptonic" ), ( 'ST', "ST_tW_top" ), ( 'ST', "ST_tW_antitop" ), ( 'ST', "ST_t-channel_top" ), ( 'ST', "ST_t-channel_antitop" ), ( 'VV', "WW" ), ( 'VV', "WZ" ), ( 'VV', "ZZ" ), ] if era=='2016': campaign = "Moriond17" if 'UL' in era and 'preVFP' in era: campaign = "Summer19" elif 'UL' in era: campaign = "Summer19" else: samples = [ ( 'TT', "TT", ), ( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DYJetsToLL_M-50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W3JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), ( 'VV', "WW", ), ( 'VV', "WZ", ), ( 'VV', "ZZ", ), ] elif '2017' in era: if 'UL' in era: campaign = "Summer19" else: campaign = "Winter17_V2" samples_bug = [ # buggy samples in (pre-UL) 2017 ( 'DY', "DYJetsToLL_M-50", ), ( 'WJ', "W3JetsToLNu", ), ( 'VV', "WZ", ), ] samples_fix = [ # fixed samples in (pre-UL) 2017 ( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'DY', "DY4JetsToLL_M-50", ), ( 'TT', "TTTo2L2Nu", ), ( 'TT', "TTToHadronic", ), ( 'TT', "TTToSemiLeptonic", ), ( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), ( 'VV', "WW", ), ( 'VV', "ZZ", ), ] samples = samples_bug + samples_fix else: if 'UL' in era: campaign = "Summer19" else: campaign = "Autumn18" samples = [ ( 'TT', "TTTo2L2Nu", ), ( 'TT', "TTToHadronic", ), ( 'TT', "TTToSemiLeptonic", ), ( 'DY', "DYJetsToLL_M-10to50", ), ( 'DY', "DYJetsToLL_M-50", ), ( 'DY', "DY1JetsToLL_M-50", ), ( 'DY', "DY2JetsToLL_M-50", ), ( 'DY', "DY3JetsToLL_M-50", ), ( 'DY', "DY4JetsToLL_M-50", ), #( 'WJ', "WJetsToLNu", ), ( 'WJ', "W1JetsToLNu", ), ( 'WJ', "W2JetsToLNu", ), ( 'WJ', "W3JetsToLNu", ), ( 'WJ', "W4JetsToLNu", ), ( 'ST', "ST_tW_top", ), ( 'ST', "ST_tW_antitop", ), ( 'ST', "ST_t-channel_top", ), ( 'ST', "ST_t-channel_antitop", ), #( 'ST', "ST_s-channel", ), ( 'VV', "WW", ), ( 'VV', "WZ", ), ( 'VV', "ZZ", ), ] # SAMPLES FILENAMES samples_ = [ ] suberas = [era+"_preVFP",era+"_postVFP"] if era=='UL2016' else [era] for subera in suberas: for i, (group,sample) in enumerate(samples): fname = repkey(fname_,ERA=subera,GROUP=group,SAMPLE=sample,CHANNEL=channel) samples_.append((sample,fname)) samples = samples_ # replace sample list if verbosity>=1: print ">>> samples = %r"%(samples) # JSON jsons = { } if periods: for period in periods: jsonout = filterJSONByRunNumberRange(jname,era,period=period,outdir='json',verb=verbosity) jsons[erarun] = jsonout else: jsons[era] = jname # DATA datahists = { period: [ ] for period in jsons } if 'data' in types: for period, json in jsons.iteritems(): for minbias in minbiases: filename = "Data_PileUp_%s_%s.root"%(period,str(minbias).replace('.','p')) datahist = getDataProfile(filename,json,pileup,100,era,minbias) datahists[period].append((minbias,datahist)) elif args.plot: # do not create new data profiles, but just load them for era in jsons: for minbias in minbiases: filename = "Data_PileUp_%s_%s.root"%(era,str(minbias).replace('.','p')) file, hist = gethist(filename,'pileup',retfile=True) if not file or not hist: continue hist.SetDirectory(0) file.Close() datahists[era].append((minbias,hist)) # MC if 'mc' in types: assert samples, "compareMCProfiles: Did not find any samples for %r..."%(era) mcfilename = "MC_PileUp_%s.root"%(era) #mcfilename = "MC_PileUp_%s_%s.root"%(era,campaign) getMCProfile(mcfilename,samples,channel,era) if args.plot: mchist = compareMCProfiles(samples,channel,era) for era in jsons: for minbias, datahist in datahists[era]: compareDataMCProfiles(datahist,mchist,era,minbias) compareDataMCProfiles(datahists[era],mchist,era,rmin=0.4,rmax=1.5,delete=True) deletehist(mchist) # clean memory if era=='2017': #and 'UL' not in era # buggy (pre-UL) 2017: also check new/old pmx separately mcfilename_bug = mcfilename.replace(".root","_old_pmx.root") mcfilename_fix = mcfilename.replace(".root","_new_pmx.root") getMCProfile(mcfilename_bug,samples_bug,channel,era) getMCProfile(mcfilename_fix,samples_fix,channel,era) if args.plot: mchist_bug = compareMCProfiles(samples_bug,channel,era,tag="old_pmx") mchist_fix = compareMCProfiles(samples_fix,channel,era,tag="new_pmx") for era in jsons: for minbias, datahist in datahists[era]: compareDataMCProfiles(datahist,mchist_bug,era,minbias,tag="old_pmx") compareDataMCProfiles(datahist,mchist_fix,era,minbias,tag="new_pmx") # FLAT if 'flat' in types: filename = "MC_PileUp_%d_FlatPU0to75.root"%era hist_flat = getFlatProfile(filename,75) for era in jsons: for minbias, datahist in datahists[era]: compareDataMCProfiles(datahist,hist_flat,era,minbias,tag="FlatPU0to75",rmin=0.0,rmax=3.1)