def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(False) style.setGridX(True) #opts.gridX) style.setGridY(True) #opts.gridY) style.setLogX(False) #opts.logX) style.setLogY(False) #opts.logY) # Obtain dsetMgrCreator and register it to module selector dsetMgrCreator = dataset.readFromMulticrabCfg(directory=opts.mcrab) # Get list of eras, modes, and optimisation modes erasList = dsetMgrCreator.getDataEras() modesList = dsetMgrCreator.getSearchModes() optList = dsetMgrCreator.getOptimizationModes() sysVarList = dsetMgrCreator.getSystematicVariations() sysVarSrcList = dsetMgrCreator.getSystematicVariationSources() # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") else: pass optModes = optList else: optModes = [opts.optMode] # For-loop: All optimisation modes for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if 0: datasetsMgr.printSelections() # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Custom Filtering of datasets if 0: datasetsMgr.remove( filter(lambda name: "Charged" in name and not "M_500" in name, datasetsMgr.getAllDatasetNames())) # ZJets and DYJets overlap! if "ZJetsToQQ_HT600toInf" in datasetsMgr.getAllDatasetNames( ) and "DYJetsToQQ_HT180" in datasetsMgr.getAllDatasetNames(): Print( "Cannot use both ZJetsToQQ and DYJetsToQQ due to duplicate events? Investigate. Removing ZJetsToQQ datasets for now ..", True) datasetsMgr.remove( filter(lambda name: "ZJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) datasetsMgr.PrintInfo() # Get Luminosity if opts.intLumi < 0.0: if "Data" in datasetsMgr.getAllDatasetNames(): opts.intLumi = datasetsMgr.getDataset("Data").getLuminosity() else: opts.intLumi = 1.0 # Merge EWK samples if opts.dataset == "EWK": datasetsMgr.merge("EWK", aux.GetListOfEwkDatasets()) plots._plotStyles["EWK"] = styles.getAltEWKStyle() # Print dataset information datasetsMgr.PrintInfo() # Get all histogram names in the given ROOT folder histoNames = datasetsMgr.getAllDatasets()[0].getDirectoryContent( opts.folder) histoList = [ os.path.join(opts.folder, h) for h in histoNames if "_SR" in h ] # For-loop: All histos in SR nHistos = len(histoList) for i, h in enumerate(histoList, 1): msg = "{:<9} {:>3} {:<1} {:<3} {:<50}".format( "Histogram", "%i" % i, "/", "%s:" % (nHistos), h) Print(ShellStyles.SuccessStyle() + msg + ShellStyles.NormalStyle(), i == 1) PlotHistograms(datasetsMgr, h) # Inform user where the plots where saved Print( "All plots saved under directory %s" % (ShellStyles.NoteStyle() + aux.convertToURL(opts.saveDir, opts.url) + ShellStyles.NormalStyle()), True) return
def main(): if len(sys.argv) < 2: usage() paths = [sys.argv[1]] analysis = "METLeg_2015D_MET80" # datasets = dataset.getDatasetsFromMulticrabDirs(paths,analysisName=analysis) # datasets = dataset.getDatasetsFromMulticrabDirs(paths,analysisName=analysis,includeOnlyTasks="Tau\S+25ns$|TTJets$") datasets = dataset.getDatasetsFromMulticrabDirs( paths, analysisName=analysis, excludeTasks="Tau_Run2015C|Tau\S+25ns_Silver$|DYJetsToLL|WJetsToLNu$") # datasets = dataset.getDatasetsFromMulticrabDirs(paths,analysisName=analysis,includeOnlyTasks="Tau_Run2015D_PromptReco_v4_246908_260426_25ns$|DYJetsToLL_M_50$") for d in datasets.getAllDatasets(): print d.getName() style = tdrstyle.TDRStyle() dataset1 = datasets.getDataDatasets() dataset2 = datasets.getMCDatasets() eff1_MET80 = getEfficiency(dataset1) eff2_MET80 = getEfficiency(dataset2) styles.dataStyle.apply(eff1_MET80) styles.mcStyle.apply(eff2_MET80) eff1_MET80.SetMarkerSize(1) eff2_MET80.SetMarkerSize(1.5) p = plots.ComparisonPlot( histograms.HistoGraph(eff1_MET80, "eff1_MET80", "p", "P"), histograms.HistoGraph(eff2_MET80, "eff2_MET80", "p", "P")) opts = {"ymin": 0, "ymax": 1.1} opts2 = {"ymin": 0.5, "ymax": 1.5} moveLegend = {"dx": -0.55, "dy": -0.15} name = "TauMET_" + analysis + "_DataVsMC_PFMET" legend1 = "Data" # legend2 = "MC (TTJets)" legend2 = "MC" p.histoMgr.setHistoLegendLabelMany({ "eff1_MET80": legend1, "eff2_MET80": legend2 }) p.createFrame(os.path.join(plotDir, name), createRatio=True, opts=opts, opts2=opts2) p.setLegend( histograms.moveLegend(histograms.createLegend(y1=0.8), **moveLegend)) p.getFrame().GetYaxis().SetTitle("L1+HLT MET efficiency") p.getFrame().GetXaxis().SetTitle("MET Type 1 (GeV)") p.getFrame2().GetYaxis().SetTitle("Ratio") p.getFrame2().GetYaxis().SetTitleOffset(1.6) histograms.addText(0.2, 0.6, "LooseIsoPFTau50_Trk30_eta2p1_MET80", 17) # histograms.addText(0.2, 0.53, analysis.split("_")[len(analysis.split("_")) -1], 17) label = analysis.split("_")[1] histograms.addText(0.2, 0.53, label, 17) runRange = datasets.loadRunRange() histograms.addText(0.2, 0.46, "Runs " + runRange, 17) p.draw() lumi = 0.0 for d in datasets.getDataDatasets(): print "luminosity", d.getName(), d.getLuminosity() lumi += d.getLuminosity() print "luminosity, sum", lumi histograms.addStandardTexts(lumi=lumi) if not os.path.exists(plotDir): os.mkdir(plotDir) p.save(formats) pythonWriter.addParameters(plotDir, label, runRange, lumi, eff1_MET80) pythonWriter.addMCParameters(label, eff2_MET80) pythonWriter.writeJSON( os.path.join(plotDir, "metLegTriggerEfficiency2015.json")) """ #### MET120 analysis = "METLeg_2015CD_MET120" datasets = dataset.getDatasetsFromMulticrabDirs(paths,analysisName=analysis) for d in datasets.getAllDatasets(): print d.getName() style = tdrstyle.TDRStyle() dataset1 = datasets.getDataDatasets() dataset2 = datasets.getMCDatasets() eff1_MET120 = getEfficiency(dataset1) eff2_MET120 = getEfficiency(dataset2) styles.dataStyle.apply(eff1_MET120) styles.mcStyle.apply(eff2_MET120) eff1_MET120.SetMarkerSize(1) eff2_MET120.SetMarkerSize(1.5) p = plots.ComparisonPlot(histograms.HistoGraph(eff1_MET120, "eff1_MET120", "p", "P"), histograms.HistoGraph(eff2_MET120, "eff2_MET120", "p", "P")) opts = {"ymin": 0, "ymax": 1.1} opts2 = {"ymin": 0.5, "ymax": 1.5} moveLegend = {"dx": -0.55, "dy": -0.15} name = "DataVsMC_L1HLTMET_PFMET_MET120" legend1 = "Data" legend2 = "MC" p.histoMgr.setHistoLegendLabelMany({"eff1_MET120": legend1, "eff2_MET120": legend2}) p.createFrame(os.path.join(plotDir, name), createRatio=True, opts=opts, opts2=opts2) p.setLegend(histograms.moveLegend(histograms.createLegend(y1=0.8), **moveLegend)) p.getFrame().GetYaxis().SetTitle("L1+HLT MET efficiency") p.getFrame().GetXaxis().SetTitle("MET Type 1 (GeV)") p.getFrame2().GetYaxis().SetTitle("Ratio") p.getFrame2().GetYaxis().SetTitleOffset(1.6) p.draw() lumi = 0.0 histograms.addStandardTexts(lumi=lumi) if not os.path.exists(plotDir): os.mkdir(plotDir) p.save(formats) """ # CaloMET #### MET80 analysisc = "METLeg_2015D_CaloMET_MET80" datasetsc = dataset.getDatasetsFromMulticrabDirs(paths, analysisName=analysisc) datasetsc = dataset.getDatasetsFromMulticrabDirs( paths, analysisName=analysisc, excludeTasks="Tau\S+25ns_Silver$") # datasetsc = dataset.getDatasetsFromMulticrabDirs(paths,analysisName=analysisc,includeOnlyTasks="Tau\S+25ns$|TTJets$") style = tdrstyle.TDRStyle() dataset1c = datasetsc.getDataDatasets() dataset2c = datasetsc.getMCDatasets() # eff1c_MET80 = getEfficiency(dataset1c) eff2c_MET80 = getEfficiency(dataset2c) # styles.dataStyle.apply(eff1c_MET80) styles.mcStyle.apply(eff2c_MET80) # eff1c_MET80.SetMarkerSize(1) eff2c_MET80.SetMarkerSize(1.5) eff2c_MET80.SetMarkerColor(4) p = plots.ComparisonPlot( histograms.HistoGraph(eff2_MET80, "eff2_MET80", "p", "P"), histograms.HistoGraph(eff2c_MET80, "eff2c_MET80", "p", "P")) namec = "TauMET_" + analysis + "_MC_TrgBitVsCaloMET80_PFMET" legend1c = "MC, trigger bit" legend2c = "MC, CaloMET > 80" p.histoMgr.setHistoLegendLabelMany({ "eff2_MET80": legend1c, "eff2c_MET80": legend2c }) p.createFrame(os.path.join(plotDir, namec), createRatio=True, opts=opts, opts2=opts2) p.setLegend( histograms.moveLegend(histograms.createLegend(y1=0.8), **moveLegend)) p.getFrame().GetYaxis().SetTitle("L1+HLT MET efficiency") p.getFrame().GetXaxis().SetTitle("MET Type 1 (GeV)") p.getFrame2().GetYaxis().SetTitle("Ratio") p.getFrame2().GetYaxis().SetTitleOffset(1.6) p.draw() lumi = 0.0 for d in datasets.getDataDatasets(): print "luminosity", d.getName(), d.getLuminosity() lumi += d.getLuminosity() print "luminosity, sum", lumi histograms.addStandardTexts(lumi=lumi) histograms.addText(0.2, 0.6, "LooseIsoPFTau50_Trk30_eta2p1_MET80", 17) if not os.path.exists(plotDir): os.mkdir(plotDir) p.save(formats) """ #### MET120 analysisc = "METLeg_2015A_CaloMET_MET120" datasetsc = dataset.getDatasetsFromMulticrabDirs(paths,analysisName=analysisc) style = tdrstyle.TDRStyle() dataset1c = datasetsc.getDataDatasets() dataset2c = datasetsc.getMCDatasets() eff1c_MET120 = getEfficiency(dataset1c) eff2c_MET120 = getEfficiency(dataset2c) styles.dataStyle.apply(eff1c_MET120) styles.mcStyle.apply(eff1c_MET120) eff1c_MET120.SetMarkerSize(1) eff2c_MET120.SetMarkerSize(1.5) p = plots.ComparisonPlot(histograms.HistoGraph(eff2_MET120, "eff2_MET120", "p", "P"), histograms.HistoGraph(eff2c_MET120, "eff2c_MET120", "p", "P")) namec = "MC_TrgBitVsCaloMET120_L1HLTMET_PFMET" legend1c = "MC, trigger bit" legend2c = "MC, CaloMET > 120" p.histoMgr.setHistoLegendLabelMany({"eff2_MET120": legend1c, "eff2c_MET120": legend2c}) p.createFrame(os.path.join(plotDir, namec), createRatio=True, opts=opts, opts2=opts2) p.setLegend(histograms.moveLegend(histograms.createLegend(y1=0.8), **moveLegend)) p.getFrame().GetYaxis().SetTitle("L1+HLT MET efficiency") p.getFrame().GetXaxis().SetTitle("MET Type 1 (GeV)") p.getFrame2().GetYaxis().SetTitle("Ratio") p.getFrame2().GetYaxis().SetTitleOffset(1.6) p.draw() lumi = 0.0 histograms.addStandardTexts(lumi=lumi) if not os.path.exists(plotDir): os.mkdir(plotDir) p.save(formats) """ ######################################################################### namePU = "TauMET_" + analysis + "_DataVsMC_nVtx" eff1PU = getEfficiency(dataset1, "NumeratorPU", "DenominatorPU") eff2PU = getEfficiency(dataset2, "NumeratorPU", "DenominatorPU") styles.dataStyle.apply(eff1PU) styles.mcStyle.apply(eff2PU) eff1PU.SetMarkerSize(1) eff2PU.SetMarkerSize(1.5) pPU = plots.ComparisonManyPlot( histograms.HistoGraph(eff1PU, "eff1", "p", "P"), [histograms.HistoGraph(eff2PU, "eff2", "p", "P")]) pPU.histoMgr.setHistoLegendLabelMany({"eff1": legend1, "eff2": legend2}) opts = {"ymin": 0.001, "ymax": 0.1} pPU.createFrame(os.path.join(plotDir, namePU), createRatio=True, opts=opts, opts2=opts2) pPU.setLegend( histograms.moveLegend(histograms.createLegend(), **moveLegend)) pPU.getPad1().SetLogy(True) pPU.getFrame().GetYaxis().SetTitle("L1+HLT MET efficiency") pPU.getFrame().GetXaxis().SetTitle("Number of reco vertices") pPU.getFrame2().GetYaxis().SetTitle("Ratio") pPU.getFrame2().GetYaxis().SetTitleOffset(1.6) histograms.addText(0.4, 0.25, "LooseIsoPFTau50_Trk30_eta2p1_MET80", 17) histograms.addText(0.4, 0.18, analysis.split("_")[len(analysis.split("_")) - 1], 17) histograms.addText(0.4, 0.11, "Runs " + datasets.loadRunRange(), 17) pPU.draw() histograms.addStandardTexts(lumi=lumi) pPU.save(formats) print "Output written in", plotDir
def main(): if len(sys.argv) == 1: usage() rootfile = "" jsonfile = "limits.json" root_re = re.compile("(?P<rootfile>(\S*\.root))") json_re = re.compile("(?P<jsonfile>(\S*\.json))") for argv in sys.argv: match = root_re.search(argv) if match: rootfile = match.group(0) match = json_re.search(argv) if match: jsonfile = match.group(0) # jsonfile = "limits_light2016.json" jsonfile = "limits2016/limitsForMSSMplots_ICHEP_v2_light.json" # limits = limit.BRLimits(limitsfile=jsonfile,configfile="limitdata/lightHplus_configuration.json") limits = limit.BRLimits( limitsfile=jsonfile, configfile="limits2016/lightHplus_configuration.json") # Enable OpenGL ROOT.gEnv.SetValue("OpenGL.CanvasPreferGL", 1) # Apply TDR style style = tdrstyle.TDRStyle() # if limit.forPaper: # histograms.cmsTextMode = histograms.CMSMode.PAPER histograms.cmsTextMode = histograms.CMSMode.PRELIMINARY # histograms.cmsTextMode = histograms.CMSMode.PAPER # tmp #histograms.cmsTextMode = histograms.CMSMode.UNPUBLISHED # tmp limit.forPaper = True # to get GeV without c^2 # Get BR limits masses = limits.mass brs = limits.observed print "Observed masses and BR's" for i in range(len(masses)): print " ", masses[i], brs[i] global db db = BRXSDB.BRXSDatabaseInterface(rootfile) for i, m in enumerate(masses): db.addExperimentalBRLimit(m, brs[i]) graphs = {} obs = limits.observedGraph() # Remove blinded obs points for i in reversed(range(0, obs.GetN())): if obs.GetY()[i] < 0.00000001: print " REMOVING POINT", obs.GetY( )[i], " corresponding mass=", obs.GetX()[i] obs.RemovePoint(i) print graphs["exp"] = limits.expectedGraph() graphs["exp1"] = limits.expectedBandGraph(sigma=1) graphs["exp2"] = limits.expectedBandGraph(sigma=2) if obs.GetN() > 0: graphs["obs"] = obs # Get theory uncertainties on observed # obs_th_plus = limit.getObservedPlus(obs,0.21) # obs_th_minus = limit.getObservedMinus(obs,0.21) # for gr in [obs_th_plus, obs_th_minus]: # gr.SetLineWidth(2) # gr.SetLineStyle(9) # graphs["obs_th_plus"] = obs_th_plus # graphs["obs_th_minus"] = obs_th_minus # Remove m=80 for gr in graphs.values(): limit.cleanGraph(gr, 80) print "Plotting graphs" for key in graphs.keys(): for i in range(graphs[key].GetN()): xs = graphs[key].GetX() ys = graphs[key].GetY() print " ", key, xs[i], ys[i] print # Interpret in MSSM xVariable = "mHp" selection = "mu==200" # selection = "mHp > 0" # scenario = "MSSM m_{h}^{max}" scenario = os.path.split(rootfile)[-1].replace(".root", "") from JsonWriter import JsonWriter jsonWriter = JsonWriter() for key in graphs.keys(): print "Graph--------------------------------", key graphs[key] = db.graphToTanBetaCombined(graphs[key], xVariable, selection) #if key == "obs": #obsplus = db.getTheorUncert(graphs[key],xVariable,selection,"+") #graphs["obs_th_plus"] = db.graphToTanBetaCombined(obsplus,xVariable,selection) #obsminus = db.getTheorUncert(graphs[key],xVariable,selection,"-") #graphs["obs_th_minus"] = db.graphToTanBetaCombined(obsminus,xVariable,selection) print key, "done" jsonWriter.addGraph(key, graphs[key]) graphs["mintanb"] = db.minimumTanbGraph("mHp", selection) if scenario == "lowMH-LHCHXSWG": graphs["Allowed"] = db.mhLimit("mH", "mHp", selection, "125.0+-3.0") else: graphs["Allowed"] = db.mhLimit("mh", "mHp", selection, "125.0+-3.0") # graphs["isomass"] = None jsonWriter.addGraph("Allowed", graphs["Allowed"]) jsonWriter.addGraph("mintanb", graphs["mintanb"]) jsonWriter.addParameter("name", "limitsTanb_light_" + scenario) jsonWriter.addParameter("scenario", scenario) jsonWriter.addParameter("luminosity", limits.getLuminosity()) jsonWriter.addParameter("finalStateText", limits.getFinalstateText()) jsonWriter.addParameter("mHplus", limit.mHplus()) jsonWriter.addParameter("selection", selection) jsonWriter.write("MSSMLimitLight_" + scenario + ".json") limit.doTanBetaPlotLight("limitsTanb_light_" + scenario, graphs, limits.getLuminosity(), limits.getFinalstateText(), limit.mHplus(), scenario) sys.exit() # mH+ -> mA print "Replotting the graphs for (mA,tanb)" for key in graphs.keys(): print key #db.PrintGraph(graphs[key]) #print "check loop db.graphToMa" db.graphToMa(graphs[key]) graphs["isomass"] = db.getIsoMass(160) limit.doTanBetaPlotLight("limitsTanb_mA_light_" + scenario, graphs, limits.getLuminosity(), limits.getFinalstateText(), limit.mA(), scenario)
def main(argv, dsetMgr, moduleInfoString): COMBINEDHISTODIR = "ForQCDNormalization" FAKEHISTODIR = "ForQCDNormalizationEWKFakeTaus" GENUINEHISTODIR = "ForQCDNormalizationEWKGenuineTaus" comparisonList = ["AfterStdSelections"] dirs = [] dirs.append(sys.argv[1]) # Check multicrab consistency consistencyCheck.checkConsistencyStandalone(dirs[0], dsetMgr, name="QCD inverted") # As we use weighted counters for MC normalisation, we have to # update the all event count to a separately defined value because # the analysis job uses skimmed pattuple as an input dsetMgr.updateNAllEventsToPUWeighted() # Read integrated luminosities of data dsetMgr from lumi.json dsetMgr.loadLuminosities() print "Datasets list (initial):" print dsetMgr.getMCDatasetNames() print "\n" # Include only 120 mass bin of HW and HH dsetMgr dsetMgr.remove( filter(lambda name: "TTToHplus" in name and not "M120" in name, dsetMgr.getAllDatasetNames())) dsetMgr.remove( filter(lambda name: "HplusTB" in name, dsetMgr.getAllDatasetNames())) dsetMgr.remove( filter(lambda name: "DY2JetsToLL" in name, dsetMgr.getAllDatasetNames())) dsetMgr.remove( filter(lambda name: "DY3JetsToLL" in name, dsetMgr.getAllDatasetNames())) dsetMgr.remove( filter(lambda name: "DY4JetsToLL" in name, dsetMgr.getAllDatasetNames())) dsetMgr.remove( filter(lambda name: "WJetsToLNu_HT" in name, dsetMgr.getAllDatasetNames())) # DEBUG TEST: remove one dataset at a time # dsetMgr.remove(filter(lambda name: "DYJetsToQQ" in name, dsetMgr.getAllDatasetNames())) # dsetMgr.remove(filter(lambda name: "DYJetsToLL" in name, dsetMgr.getAllDatasetNames())) # dsetMgr.remove(filter(lambda name: "WZ" in name, dsetMgr.getAllDatasetNames())) # dsetMgr.remove(filter(lambda name: "ST" in name, dsetMgr.getAllDatasetNames())) print "Datasets after filter removals:" print dsetMgr.getMCDatasetNames() print "\n" # Default merging nad ordering of data and MC dsetMgr # All data dsetMgr to "Data" # All QCD dsetMgr to "QCD" # All single top dsetMgr to "SingleTop" # WW, WZ, ZZ to "Diboson" plots.mergeRenameReorderForDataMC(dsetMgr) print "Datasets after mergeRenameReorderForDataMC:" print dsetMgr.getMCDatasetNames() print "\n" # Set BR(t->H) to 0.05, keep BR(H->tau) in 1 xsect.setHplusCrossSectionsToBR(dsetMgr, br_tH=0.05, br_Htaunu=1) # Merge WH and HH dsetMgr to one (for each mass bin) plots.mergeWHandHH(dsetMgr) # Merge MC EWK samples as one EWK sample myMergeList = [] # Always use TT (or TTJets) as a part of the EWK background if "TT" in dsetMgr.getMCDatasetNames(): myMergeList.append("TT") # Powheg, no neg. weights -> large stats. else: myMergeList.append("TTJets") # Madgraph with negative weights print "Warning: using TTJets as input, but this is suboptimal. Please switch to the TT sample (much more stats.)." # Always use WJets as a part of the EWK background myMergeList.append("WJets") # For SY, single top and diboson, use only if available: if "DYJetsToQQHT" in dsetMgr.getMCDatasetNames(): myMergeList.append("DYJetsToQQHT") if "DYJetsToLL" in dsetMgr.getMCDatasetNames(): myMergeList.append("DYJetsToLL") else: print "Warning: ignoring DYJetsToLL sample (since merged sample does not exist) ..." if "SingleTop" in dsetMgr.getMCDatasetNames(): myMergeList.append("SingleTop") else: print "Warning: ignoring single top sample (since merged sample does not exist) ..." if "Diboson" in dsetMgr.getMCDatasetNames(): myMergeList.append("Diboson") else: print "Warning: ignoring diboson sample (since merged sample does not exist) ..." for item in myMergeList: if not item in dsetMgr.getMCDatasetNames(): raise Exception( "Error: tried to use dataset '%s' as part of the merged EWK dataset, but the dataset '%s' does not exist!" % (item, item)) dsetMgr.merge("EWK", myMergeList) print "\nFinal dataset list:\n" print dsetMgr.getMCDatasetNames() print "\n" # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) for HISTONAME in comparisonList: BASELINETAUHISTONAME = "NormalizationMETBaselineTau" + HISTONAME + "/NormalizationMETBaselineTau" + HISTONAME INVERTEDTAUHISTONAME = "NormalizationMETInvertedTau" + HISTONAME + "/NormalizationMETInvertedTau" + HISTONAME FITMIN = None FITMAX = None #===== Infer binning information and labels histonames = dsetMgr.getDataset("Data").getDirectoryContent( COMBINEDHISTODIR + "/NormalizationMETBaselineTau" + HISTONAME) bins = [] binLabels = [] if histonames == None: # Assume that only inclusive bin exists name = COMBINEDHISTODIR + "/NormalizationMETBaselineTau" + HISTONAME if not dsetMgr.getDataset("Data").hasRootHisto(name): raise Exception( "Error: Cannot find histogram or directory of name '%s'!" % name) BASELINETAUHISTONAME = "NormalizationMETBaselineTau" + HISTONAME INVERTEDTAUHISTONAME = "NormalizationMETInvertedTau" + HISTONAME bins = [""] binLabels = ["Inclusive"] else: for hname in histonames: binIndex = hname.replace( "NormalizationMETBaselineTau" + HISTONAME, "") # print "DEBUG: We are looking for hisrogram "+COMBINEDHISTODIR+"/"+BASELINETAUHISTONAME+binIndex hDummy = dsetMgr.getDataset("Data").getDatasetRootHisto( COMBINEDHISTODIR + "/" + BASELINETAUHISTONAME + binIndex).getHistogram() title = hDummy.GetTitle() title = title.replace("METBaseline" + HISTONAME, "") if hDummy.Integral() > 0.0: bins.append(binIndex) if binIndex == "Inclusive": binLabels.append(binIndex) else: binLabels.append( QCDNormalization.getModifiedBinLabelString(title)) if FITMIN == None: FITMIN = hDummy.GetXaxis().GetXmin() FITMAX = hDummy.GetXaxis().GetXmax() hDummy.Delete() else: print "Skipping bin '%s' (%s) because it has no entries" % ( binIndex, QCDNormalization.getModifiedBinLabelString(title)) print "\nHistogram bins available", bins # Select bins by filter if len(selectOnlyBins) > 0: oldBinLabels = binLabels[:] oldBins = bins[:] binLabels = [] bins = [] for k in selectOnlyBins: for i in range(len(oldBinLabels)): if k == oldBinLabels[i] or k == oldBins[i]: binLabels.append(oldBinLabels[i]) bins.append(oldBins[i]) print "Using bins ", bins print "\nBin labels" for i in range(len(binLabels)): line = bins[i] while len(line) < 10: line += " " line += ": " + binLabels[i] print line print #===== Initialize normalization calculator #manager = QCDNormalization.QCDNormalizationManagerExperimental1(binLabels) manager = QCDNormalization.QCDNormalizationManagerDefault( binLabels, dirs[0], moduleInfoString) #===== Create templates (EWK fakes, EWK genuine, QCD; data template is created by manager) template_EWKFakeTaus_Baseline = manager.createTemplate( "EWKFakeTaus_Baseline") template_EWKFakeTaus_Inverted = manager.createTemplate( "EWKFakeTaus_Inverted") template_EWKGenuineTaus_Baseline = manager.createTemplate( "EWKGenuineTaus_Baseline") template_EWKGenuineTaus_Inverted = manager.createTemplate( "EWKGenuineTaus_Inverted") template_EWKInclusive_Baseline = manager.createTemplate( "EWKInclusive_Baseline") template_EWKInclusive_Inverted = manager.createTemplate( "EWKInclusive_Inverted") template_QCD_Baseline = manager.createTemplate("QCD_Baseline") template_QCD_Inverted = manager.createTemplate("QCD_Inverted") #===== Define fit functions and fit parameters # The available functions are defined in the FitFunction class in the QCDMeasurement/python/QCDNormalization.py file # commented out fitter for EWK fake taus, since only the fit on inclusive EWK is used to obtain w_QCD #boundary = 100 #template_EWKFakeTaus_Baseline.setFitter(QCDNormalization.FitFunction("EWKFunctionInv", boundary=boundary, norm=1, rejectPoints=1), #FITMIN, FITMAX) #template_EWKFakeTaus_Baseline.setDefaultFitParam(defaultInitialValue=[10.0, 100, 45, 0.02], #defaultLowerLimit= [ 0.1, 70, 10, 0.001], #defaultUpperLimit= [ 30, 300, 100, 0.1]) # commented out fitter for EWK genuine taus, since only the fit on inclusive EWK is used to obtain w_QCD #boundary = 150 #template_EWKGenuineTaus_Baseline.setFitter(QCDNormalization.FitFunction("EWKFunction", boundary=boundary, norm=1, rejectPoints=1), #FITMIN, FITMAX) #template_EWKGenuineTaus_Baseline.setDefaultFitParam(defaultLowerLimit=[0.5, 90, 30, 0.0001], #defaultUpperLimit=[ 20, 150, 50, 1.0]) # Inclusive EWK boundary = 150 template_EWKInclusive_Baseline.setFitter( QCDNormalization.FitFunction("EWKFunction", boundary=boundary, norm=1, rejectPoints=1), FITMIN, FITMAX) template_EWKInclusive_Baseline.setDefaultFitParam( defaultLowerLimit=[0.5, 90, 30, 0.0001], defaultUpperLimit=[20, 150, 60, 1.0]) # Note that the same function is used for QCD only and QCD+EWK fakes template_QCD_Inverted.setFitter( QCDNormalization.FitFunction("QCDFunction", norm=1), FITMIN, FITMAX) # template_QCD_Inverted.setDefaultFitParam(defaultLowerLimit=[0.0001, 0.001, 0.1, 0.0, 10, 0.0001, 0.001], # defaultUpperLimit=[ 200, 10, 10, 150, 100, 1, 0.05]) template_QCD_Inverted.setDefaultFitParam( defaultLowerLimit=[ 30, 0.1, 0.1, 10, 10, 0.00001, 0.001 ], # new default limits to make fits more stable, defaultUpperLimit=[ 130, 20, 20, 200, 200, 0.01, 0.1 ]) # corresponding to the 7 free param. of the fit function #===== Loop over tau pT bins for i, binStr in enumerate(bins): print "\n********************************" print "*** Fitting bin %s" % binLabels[i] print "********************************\n" #===== Reset bin results manager.resetBinResults() #===== Obtain histograms for normalization # Data histoName = COMBINEDHISTODIR + "/" + BASELINETAUHISTONAME + binStr hmetBase_data = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "Data").getRootHisto().Clone(histoName) histoName = COMBINEDHISTODIR + "/" + INVERTEDTAUHISTONAME + binStr hmetInverted_data = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "Data").getRootHisto().Clone(histoName) # EWK genuine taus histoName = GENUINEHISTODIR + "/" + BASELINETAUHISTONAME + binStr hmetBase_EWK_GenuineTaus = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "EWK").getRootHisto().Clone(histoName) histoName = GENUINEHISTODIR + "/" + INVERTEDTAUHISTONAME + binStr hmetInverted_EWK_GenuineTaus = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "EWK").getRootHisto().Clone(histoName) # EWK fake taus histoName = FAKEHISTODIR + "/" + BASELINETAUHISTONAME + binStr hmetBase_EWK_FakeTaus = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "EWK").getRootHisto().Clone(histoName) histoName = FAKEHISTODIR + "/" + INVERTEDTAUHISTONAME + binStr hmetInverted_EWK_FakeTaus = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "EWK").getRootHisto().Clone(histoName) # Finalize histograms by rebinning for histogram in [ hmetBase_data, hmetInverted_data, hmetBase_EWK_GenuineTaus, hmetInverted_EWK_GenuineTaus, hmetBase_EWK_FakeTaus, hmetInverted_EWK_FakeTaus ]: histogram.Rebin(_rebinFactor) #===== Obtain inclusive EWK histograms hmetBase_EWKinclusive = hmetBase_EWK_GenuineTaus.Clone( "EWKinclusiveBase") hmetBase_EWKinclusive.Add(hmetBase_EWK_FakeTaus, 1.0) hmetInverted_EWKinclusive = hmetInverted_EWK_GenuineTaus.Clone( "EWKinclusiveInv") hmetInverted_EWKinclusive.Add(hmetInverted_EWK_FakeTaus, 1.0) #===== Obtain histograms for QCD (subtract MC EWK events from data) # QCD from baseline is usable only as a cross check hmetBase_QCD = hmetBase_data.Clone("QCDbase") hmetBase_QCD.Add(hmetBase_EWKinclusive, -1) hmetInverted_QCD = hmetInverted_data.Clone("QCDinv") hmetInverted_QCD.Add(hmetInverted_EWKinclusive, -1) #===== Set histograms to the templates template_EWKFakeTaus_Inverted.setHistogram( hmetInverted_EWK_FakeTaus, binLabels[i]) template_EWKGenuineTaus_Inverted.setHistogram( hmetInverted_EWK_GenuineTaus, binLabels[i]) template_EWKInclusive_Inverted.setHistogram( hmetInverted_EWKinclusive, binLabels[i]) template_QCD_Inverted.setHistogram(hmetInverted_QCD, binLabels[i]) template_EWKFakeTaus_Baseline.setHistogram(hmetBase_EWK_FakeTaus, binLabels[i]) template_EWKGenuineTaus_Baseline.setHistogram( hmetBase_EWK_GenuineTaus, binLabels[i]) template_EWKInclusive_Baseline.setHistogram( hmetBase_EWKinclusive, binLabels[i]) template_QCD_Baseline.setHistogram(hmetBase_QCD, binLabels[i]) #===== Make plots of templates manager.plotTemplates() #===== Fit individual templates to data fitOptions = "R BLW" # RBLW manager.calculateNormalizationCoefficients(hmetBase_data, fitOptions, FITMIN, FITMAX) #===== Calculate combined normalisation coefficient (f_fakes = w*f_QCD + (1-w)*f_EWKfakes) # Obtain histograms histoName = "ForDataDrivenCtrlPlots/shapeTransverseMass/shapeTransverseMass" + binStr dataMt = plots.DataMCPlot(dsetMgr, histoName).histoMgr.getHisto( "Data").getRootHisto().Clone(histoName) treatNegativeBins(dataMt, "Data_inverted mT") histoName = "ForDataDrivenCtrlPlotsEWKFakeTaus/shapeTransverseMass/shapeTransverseMass" + binStr ewkFakeTausMt = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "EWK").getRootHisto().Clone(histoName) treatNegativeBins(ewkFakeTausMt, "ewkFakeTaus_inverted mT") histoName = "ForDataDrivenCtrlPlotsEWKGenuineTaus/shapeTransverseMass/shapeTransverseMass" + binStr ewkGenuineTausMt = plots.DataMCPlot( dsetMgr, histoName).histoMgr.getHisto( "EWK").getRootHisto().Clone(histoName) treatNegativeBins(ewkGenuineTausMt, "ewkGenuineTaus_inverted mT") qcdMt = dataMt.Clone("QCD") qcdMt.Add(ewkFakeTausMt, -1) qcdMt.Add(ewkGenuineTausMt, -1) treatNegativeBins(qcdMt, "QCD_inverted mT") # Do calculation manager.calculateCombinedNormalizationCoefficient( qcdMt, ewkFakeTausMt) #===== Save normalization outFileName = "QCDNormalizationFactors_%s_%s.py" % (HISTONAME, moduleInfoString) print argv[1], outFileName outFileFullName = os.path.join(argv[1], outFileName) manager.writeScaleFactorFile(outFileFullName, moduleInfoString)
def main(opts): optModes = [ "", "OptChiSqrCutValue50p0", "OptChiSqrCutValue100p0", "OptChiSqrCutValue200p0" ] # optModes = ["", "OptChiSqrCutValue40", "OptChiSqrCutValue60", "OptChiSqrCutValue80", "OptChiSqrCutValue100", "OptChiSqrCutValue120", "OptChiSqrCutValue140"] # optModes = ["OptChiSqrCutValue250", "OptChiSqrCutValue150", "OptChiSqrCutValue200", "OptChiSqrCutValue180", "OptChiSqrCutValue300"] if opts.optMode != None: optModes = [opts.optMode] # For-loop: All opt Mode for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() if 0: datasetsMgr.remove( filter(lambda name: "ST" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Get Integrated Luminosity if 0: datasetsMgr.remove( filter(lambda name: "Data" in name, datasetsMgr.getAllDatasetNames())) # Re-order datasets (different for inverted than default=baseline) newOrder = ["Data"] newOrder.extend(GetListOfEwkDatasets()) datasetsMgr.selectAndReorder(newOrder) # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) # Merge EWK samples if opts.mergeEWK: datasetsMgr.merge("EWK", GetListOfEwkDatasets()) plots._plotStyles["EWK"] = styles.getAltEWKStyle() else: Print( "Cannot draw the histograms without the option --mergeEWK. Exit", True) # Print dataset information datasetsMgr.PrintInfo() # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) # Do the template comparisons #for hName in getTopSelectionHistos(opts.histoLevel, "Baseline"): for hName in getTopMassRecoHistos(opts.histoLevel, "Baseline", "ForFakeBMeasurement"): PlotTemplates(datasetsMgr, hName) #PlotTemplates(datasetsMgr, hName.split("/")[-1]) return
def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(True) style.setGridY(True) # Obtain dsetMgrCreator and register it to module selector dsetMgrCreator = dataset.readFromMulticrabCfg(directory=opts.mcrab) # Get list of eras, modes, and optimisation modes erasList = dsetMgrCreator.getDataEras() modesList = dsetMgrCreator.getSearchModes() optList = dsetMgrCreator.getOptimizationModes() sysVarList = dsetMgrCreator.getSystematicVariations() sysVarSrcList = dsetMgrCreator.getSystematicVariationSources() # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") optModes = optList else: optModes = [opts.optMode] # For-loop: All opt Mode for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Get the PSets: if 0: datasetsMgr.printSelections() #PrintPSet("BJetSelection", datasetsMgr, depth=150) # ZJets and DYJets overlap! if "ZJetsToQQ_HT600toInf" in datasetsMgr.getAllDatasetNames( ) and "DYJetsToQQ_HT180" in datasetsMgr.getAllDatasetNames(): Print( "Cannot use both ZJetsToQQ and DYJetsToQQ due to duplicate events? Investigate. Removing ZJetsToQQ datasets for now ..", True) datasetsMgr.remove( filter(lambda name: "ZJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Get luminosity if a value is not specified if opts.intLumi < 0: opts.intLumi = datasetsMgr.getDataset("Data").getLuminosity() # Remove datasets removeList = ["QCD-b", "Charged"] if not opts.useMC: removeList.append("QCD") for i, d in enumerate(removeList, 0): msg = "Removing dataset %s" % d Verbose( ShellStyles.WarningLabel() + msg + ShellStyles.NormalStyle(), i == 0) datasetsMgr.remove( filter(lambda name: d in name, datasetsMgr.getAllDatasetNames())) # Print summary of datasets to be used if 0: datasetsMgr.PrintInfo() # Merge EWK samples datasetsMgr.merge("EWK", aux.GetListOfEwkDatasets()) # Print dataset information datasetsMgr.PrintInfo() # Do the fit on the histo after ALL selections (incl. topology cuts) folderList = datasetsMgr.getDataset( datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent( opts.folder) #folderList1 = [h for h in folderList if "TetrajetPt" in h] #folderList1 = [h for h in folderList if "TetrajetMass" in h] #folderList1 = [h for h in folderList if "MET" in h] #folderList1 = [h for h in folderList if "TetrajetBJetPt" in h] folderList1 = [h for h in folderList if "TetrajetBJetEta" in h] folderList2 = [ h for h in folderList1 if "CRtwo" in h or "VR" in h or "SR" in h or "CRone" in h ] # For-loop: All folders histoPaths = [] for f in folderList2: folderPath = os.path.join(opts.folder, f) histoList = datasetsMgr.getDataset( datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent( folderPath) pathList = [os.path.join(folderPath, h) for h in histoList] histoPaths.extend(pathList) binLabels = GetBinLabels("CRone", histoPaths) PlotHistosAndCalculateTF(datasetsMgr, histoPaths, binLabels, opts) return
def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setGridX(opts.gridx) style.setGridY(opts.gridy) style.setLogX(opts.logx) style.setLogY(opts.logy) # Create legend and set style histograms.createLegend.moveDefaults(dx=-0.1, dh=-0.15) histograms.uncertaintyMode.set(histograms.Uncertainty.StatOnly) styles.ratioLineStyle.append(styles.StyleLine(lineColor=13)) # Define some variables nameList = [] allShapeNuisances = [] signalTable = {} myDatacardPattern = "" myRootfilePattern = "" # Find out the mass points if opts.cardPattern == None: mySettings = limitTools.GeneralSettings(".", []) myDatacardPattern = mySettings.getDatacardPattern( limitTools.LimitProcessType.TAUJETS) myRootfilePattern = mySettings.getRootfilePattern( limitTools.LimitProcessType.TAUJETS) else: myDatacardPattern = opts.cardPattern.replace("MMM", "M%s").replace( "MM", "%s") myRootfilePattern = opts.rootfilePattern.replace("MMM", "M%s").replace( "MM", "%s") # Get mass points to consider massPoints = DatacardReader.getMassPointsForDatacardPattern( ".", myDatacardPattern) Print( "The following masses will be considered: %s" % (ShellStyles.HighlightAltStyle() + ", ".join(massPoints) + ShellStyles.NormalStyle()), True) # For-loop: All mass points for i, m in enumerate(massPoints, 1): # Obtain luminosity from the datacard myLuminosity = float( limitTools.readLuminosityFromDatacard(".", myDatacardPattern % m)) # Do the plots doPlot(opts, int(m), nameList, allShapeNuisances, myLuminosity, myDatacardPattern, myRootfilePattern, signalTable) # Print signal table Print("Max contracted uncertainty for signal:", True) table = [] align = "{:>30} {:>15} {:>15}" hLine = "=" * 60 table.append(hLine) table.append(align.format("Systematic", "Minimum", "Maximum")) table.append(hLine) # For-loop: All signal for i, k in enumerate(signalTable.keys(), 1): # Print("Key = %s" % (k), False) minVal = "%.3f" % (signalTable[k]["min"]) maxVal = "%.3f" % (signalTable[k]["max"]) msg = align.format(k, minVal, maxVal) table.append(msg) table.append(hLine) for row in table: Print(row, False) msg = "All results under directory %s" % ( ShellStyles.SuccessStyle() + opts.dirName + ShellStyles.NormalStyle()) Print(msg, True) return
def main(opts): Verbose("main function") comparisonList = ["AfterStdSelections"] # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Custom Filtering of datasets if 1: datasetsMgr.remove(filter(lambda name: "HplusTB" in name and not "M_500" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) datasetsMgr.PrintInfo() # Get Integrated Luminosity if opts.mcOnly: # Determine integrated lumi if opts.intLumi < 0.0: opts.intLumi = GetLumi(datasetsMgr) # Remove data datasets datasetsMgr.remove(filter(lambda name: "Data" in name, datasetsMgr.getAllDatasetNames())) # Re-order datasets (different for inverted than default=baseline) newOrder = ["Data"] newOrder.extend(GetListOfEwkDatasets()) newOrder.extend(["QCD"]) #GetListOfQcdDatasets()) if opts.mcOnly: newOrder.remove("Data") datasetsMgr.selectAndReorder(newOrder) # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) # Merge EWK samples if opts.mergeEWK: datasetsMgr.merge("EWK", GetListOfEwkDatasets()) plots._plotStyles["EWK"] = styles.getAltEWKStyle() # Print dataset information datasetsMgr.PrintInfo() # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) # Do the Baseline Vs Inverted histograms if opts.mergeEWK: for hName in getTopSelectionHistos(opts.histoLevel): name = hName.split("/")[-1] #if hName.split("/")[-1] not in ["LdgTrijetMass_Before", "LdgTrijetMass_After"]: #continue if "mass" in name.lower(): pass elif "pt" in name.lower(): pass else: continue QCDVsInvertedComparison(datasetsMgr, name) else: Print("Cannot draw the Baseline Vs Inverted histograms without the option --mergeEWK. Exit", True) return
def main(argv): dirs = [] if len(sys.argv) < 2: usage() dirs.append(sys.argv[1]) analysis = "signalAnalysisInvertedTau" optModes = [] #optModes.append("") #optModes.append("OptQCDTailKillerLoosePlus") #optModes.append("OptQCDTailKillerMediumPlus") optModes.append("OptQCDTailKillerTightPlus") color = 1 #plot = plots.PlotBase() jetRatios = [] for HISTONAME in histonameList: for optMode in optModes: plot = plots.PlotBase() datasets = dataset.getDatasetsFromMulticrabDirs( dirs, dataEra=dataEra, searchMode=searchMode, analysisName=analysis, optimizationMode=optMode) datasets.updateNAllEventsToPUWeighted() datasets.loadLuminosities() plots.mergeRenameReorderForDataMC(datasets) datasets.merge( "EWK", ["TTJets", "WJets", "DYJetsToLL", "SingleTop", "Diboson"]) histonames = datasets.getDataset("EWK").getDirectoryContent( HISTONAME) mtplot = plots.DataMCPlot(datasets, HISTONAME) mt = mtplot.histoMgr.getHisto("EWK").getRootHisto().Clone( HISTONAME) #legendName = legendName.replace("Plus","") mt.SetName("JetBalance") mt.SetLineColor(color) if HISTONAME == "InvertedAllCutsJetBalance": qinv = mt.GetBinContent(1) ginv = mt.GetBinContent(3) else: qbase = mt.GetBinContent(1) gbase = mt.GetBinContent(3) jetRatios.append( mt.GetBinContent(1) / (mt.GetBinContent(1) + mt.GetBinContent(3))) plot.histoMgr.appendHisto(histograms.Histo(mt, mt.GetName())) color = color + 1 style = tdrstyle.TDRStyle() plot.createFrame(HISTONAME) #plot.createFrame(HISTONAME.replace("shape","final")) #plot.createFrame(optMode.replace("Opt","Mt_DataDrivenVsMC_")) #moveLegend={"dx": -0.3,"dy": 0.} #plot.setLegend(histograms.moveLegend(histograms.createLegend(), **moveLegend)) histograms.addCmsPreliminaryText() histograms.addEnergyText() lumi = datasets.getDataset("Data").getLuminosity() histograms.addLuminosityText(x=None, y=None, lumi=lumi) plot.draw() plot.save() print "Baseline All Cuts", qbase + gbase print "Inverted All Cuts", qinv + ginv gsf = qinv * gbase / (ginv * qbase) #print "Gluon jet SF:", gsf #print "Corrected Inverted Jet Balance:", qinv/(qinv+gsf*ginv), ", Baseline Jet Balance:", qbase/(qbase+gbase) for i in range(0, len(jetRatios)): print histonameList[i], ":", jetRatios[i]
def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(False) style.setGridY(False) # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") else: pass optModes = optList else: optModes = [opts.optMode] # For-loop: All optimisation modes for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Print dataset information before removing anything? if 0: datasetsMgr.PrintInfo() # Remove datasets filterKeys = ["TTW"] for key in filterKeys: datasetsMgr.remove( filter(lambda name: key in name, datasetsMgr.getAllDatasetNames())) # Re-order datasets datasetOrder = [] haveQCD = False for d in datasetsMgr.getAllDatasets(): if "QCD" in d.getName(): haveQCD = True datasetOrder.append(d.getName()) datasetsMgr.selectAndReorder(datasetOrder) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Merge EWK datasets EWKlist = ["WJetsHT", "DYJetsToLL", "Diboson"] datasetsMgr.merge("EWK", EWKlist) # Print dataset information after merging datasetsMgr.PrintInfo() # Determine integrated Lumi before removing data intLumi = datasetsMgr.getDataset('Data').getLuminosity() # Define Numerator & Denominator Histograms numerator_name = "AfterAllSelections_LeadingTrijet_Pt" denominator_name = "AfterStandardSelections_LeadingTrijet_Pt" # Do the fit on the histo after ALL selections (incl. topology cuts) folderListIncl = datasetsMgr.getDataset( datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent( opts.folder) folderList = [ h for h in folderListIncl if "AfterAllSelections_LeadingTrijet_Pt" in h ] folderPath = os.path.join(opts.folder, "") folderPathGen = os.path.join(opts.folder + "Genuine", "") folderPathFake = os.path.join(opts.folder + "Fake", "") histoList = folderList num_pathList = [os.path.join(folderPath, h) for h in histoList] num_pathList.extend( [os.path.join(folderPathGen, h) for h in histoList]) num_pathList.extend( [os.path.join(folderPathFake, h) for h in histoList]) for h in num_pathList: if "lowMET" in h: num_pathList.remove(h) histoList = [ h for h in folderListIncl if "AfterStandardSelections_LeadingTrijet_Pt" in h ] den_pathList = [os.path.join(folderPath, h) for h in histoList] den_pathList.extend( [os.path.join(folderPathGen, h) for h in histoList]) den_pathList.extend( [os.path.join(folderPathFake, h) for h in histoList]) for h in den_pathList: if "lowMET" in h: den_pathList.remove(h) # Calculate Scale Factors GetScaleFactors(datasetsMgr, num_pathList, den_pathList, intLumi) return
default=[], help="MSSM scenarios") parser.add_option("-m", "--mass", dest="massPoints", action="append", default=[], help="mass values (will scan only these)") parser.add_option("--mAtanbeta", dest="mAtanbeta", action="store_true", default=False, help="do mA,tanbeta plot (default=mHp,tanbeta)") (opts, args) = parser.parse_args() # Apply TDR style style = tdrstyle.TDRStyle() # Parse selected models myModelNames = tbtools.findModelNames(".") mySelectedModels = myModelNames[:] if len(opts.scenarios) > 0: mySelectedModels = opts.scenarios[:] # Loop over scenario models myPrintReminderStatus = False for m in mySelectedModels: print "Considering model: %s" % m # result structure: dictionary(key=m_tb, value=Result)) print tbtools._resultsPattern % m if os.path.exists(tbtools._resultsPattern % m): # Results text file exists, read them readTextResults(opts.massPoints, m, opts.mAtanbeta) else:
def main(): style = tdrstyle.TDRStyle() # Set ROOT batch mode boolean ROOT.gROOT.SetBatch(parseOpts.batchMode) # Get all datasets from the mcrab dir datasetsMgr = GetDatasetsFromDir(parseOpts.mcrab, kwargs.get("analysis")) # Determine Integrated Luminosity (If Data datasets present) intLumi = GetLumi(datasetsMgr) # Update to PU datasetsMgr.updateNAllEventsToPUWeighted() # Remove datasets datasetsMgr.remove(kwargs.get("rmDataset")) # datasetsMgr.remove(filter(lambda name: not "QCD" in name, datasetsMgr.getAllDatasetNames())) # datasetsMgr.remove(filter(lambda name: "QCD" in name in name, datasetsMgr.getAllDatasetNames())) # Set custom XSections # d.getDataset("TT_ext3").setCrossSection(831.76) # Default merging & ordering: "Data", "QCD", "SingleTop", "Diboson" plots.mergeRenameReorderForDataMC(datasetsMgr) #WARNING: Merged MC histograms must be normalized to something! # Remove datasets (for merged names) datasetsMgr.remove(kwargs.get("rmDataset")) # For-loop: All Histogram names for counter, hName in enumerate(hNames): # Get the save path and name savePath, saveName = GetSavePathAndName(hName, **kwargs) # Get Histos for Plotter refHisto, otherHistos = GetHistosForPlotter(datasetsMgr, hName, **kwargs) # Create a comparison plot p = plots.ComparisonManyPlot(refHisto, otherHistos) # Remove negative contributions #RemoveNegativeBins(datasetsMgr, hName, p) # Create a frame opts = {"ymin": 0.0, "ymaxfactor": 1.2} ratioOpts = {"ymin": 0.0, "ymax": 2.0} p.createFrame(saveName, createRatio=kwargs.get("createRatio"), opts=opts, opts2=ratioOpts) # Customise Legend moveLegend = {"dx": -0.1, "dy": +0.0, "dh": -0.1} p.setLegend(histograms.moveLegend(histograms.createLegend(), **moveLegend)) #p.removeLegend() # Customise frame p.getFrame().GetYaxis().SetTitle( getTitleY(refHisto, **kwargs) ) #p.setEnergy("13") if kwargs.get("createRatio"): p.getFrame2().GetYaxis().SetTitle("Ratio") p.getFrame2().GetYaxis().SetTitleOffset(1.6) # SetLog SetLogAndGrid(p, **kwargs) # Add cut line/box _kwargs = { "lessThan": kwargs.get("cutLessThan")} p.addCutBoxAndLine(cutValue=kwargs.get("cutValue"), fillColor=kwargs.get("cutFillColour"), box=kwargs.get("cutBox"), line=kwargs.get("cutLine"), **_kwargs) # Move the refDataset to first in the draw order (back) histoNames = [h.getName() for h in p.histoMgr.getHistos()] p.histoMgr.reorder(filter(lambda n: plots._legendLabels[kwargs.get("refDataset") ] not in n, histoNames)) # Draw plots p.draw() # Customise text histograms.addStandardTexts(lumi=intLumi) # histograms.addText(0.4, 0.9, "Alexandros Attikis", 17) # histograms.addText(0.4, 0.11, "Runs " + datasetsMgr.loadRunRange(), 17) # Save canvas under custom dir SaveAs(p, savePath, saveName, kwargs.get("saveFormats")) return
def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setGridX(False) style.setGridY(False) style.setOptStat(False) # Obtain dsetMgrCreator and register it to module selector dsetMgrCreator = dataset.readFromMulticrabCfg(directory=opts.mcrab) # Get list of eras, modes, and optimisation modes erasList = dsetMgrCreator.getDataEras() modesList = dsetMgrCreator.getSearchModes() optList = dsetMgrCreator.getOptimizationModes() sysVarList = dsetMgrCreator.getSystematicVariations() sysVarSrcList = dsetMgrCreator.getSystematicVariationSources() # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") else: pass optModes = optList else: optModes = [opts.optMode] # For-loop: All optimisation modes for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if 0: datasetsMgr.printSelections() # Print PSets used for FakeBMeasurement if 0: datasetsMgr.printSelections() PrintPSet("BJetSelection", datasetsMgr) PrintPSet("TopSelectionBDT", datasetsMgr) PrintPSet("FakeBMeasurement", datasetsMgr) sys.exit() # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() datasetsMgr.PrintInfo() # Filter the datasets datasetsMgr.remove( filter(lambda name: "Charged" in name, datasetsMgr.getAllDatasetNames())) # datasetsMgr.remove(filter(lambda name: "Charged" in name and not "M_500" in name, datasetsMgr.getAllDatasetNames())) # ZJets and DYJets overlap! if "ZJetsToQQ_HT600toInf" in datasetsMgr.getAllDatasetNames( ) and "DYJetsToQQ_HT180" in datasetsMgr.getAllDatasetNames(): Print( "Cannot use both ZJetsToQQ and DYJetsToQQ due to duplicate events? Investigate. Removing ZJetsToQQ datasets for now ..", True) datasetsMgr.remove( filter(lambda name: "ZJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Get Luminosity if opts.intLumi < 0: if "Data" in datasetsMgr.getAllDatasetNames(): opts.intLumi = datasetsMgr.getDataset("Data").getLuminosity() else: opts.intLumi = 1.0 # Re-order datasets (different for inverted than default=baseline) if 0: newOrder = ["Data"] newOrder.extend(aux.GetListOfEwkDatasets()) datasetsMgr.selectAndReorder(newOrder) # Print post-merged data dataset summary datasetsMgr.PrintInfo() # Merge EWK samples datasetsMgr.merge("EWK", aux.GetListOfEwkDatasets()) plots._plotStyles["EWK"] = styles.getAltEWKStyle() # Print post EWK-merge dataset summary datasetsMgr.PrintInfo() # Get all histograms from the in the selected folder inside the ROOT files allHistos = datasetsMgr.getAllDatasets()[0].getDirectoryContent( opts.folder) hList = [ h for h in allHistos if "CRSelections" in h and "_Vs" not in h ] hList.extend( [h for h in allHistos if "AllSelections" in h and "_Vs" not in h]) # hList.extend([h for h in allHistos if "StandardSelections" in h and "_Vs" not in h]) # Create a list with strings included in the histogram names you want to plot myHistos = [ "LdgTrijetPt", "LdgTrijetMass", "TetrajetBJetPt", "TetrajetBJetEta", "LdgTetrajetPt", "LdgTetrajetMass", "MVAmax2", "MVAmax1", "HT", "MET" ] #myHistos = ["LdgTrijetPt", "LdgTrijetMass", "LdgTetrajetMass", "MVAmax2", "MVAmax1", "Njets", "NBjets", # "Bjet3Bdisc", "Bjet2Bdisc", "Bjet1Bdisc", "Bjet3Pt", "Bjet2Pt", "Bjet1Pt"] # For-loop: All histos for i, h in enumerate(myHistos, 1): hGraphList = [] for b in ["Baseline_", "Inverted_"]: #for r in ["_AfterAllSelections", "_AfterCRSelections"]: for r in ["_AfterCRSelections", "_AfterAllSelections"]: histoName = b + h + r hgQCD, kwargs = GetPurityHistoGraph( datasetsMgr, opts.folder, histoName) # Do not draw SR in multigraph plot! if GetControlRegionLabel(histoName) != "SR": hGraphList.append(hgQCD) # Plot individual purity graphs? if 0: PlotHistoGraph(hgQCD, kwargs) msg = "{:<9} {:>3} {:<1} {:<3} {:<50}".format( "Histogram", "%i" % i, "/", "%s:" % (len(myHistos)), h) Print(ShellStyles.SuccessStyle() + msg + ShellStyles.NormalStyle(), i == 1) PlotHistoGraphs(hGraphList, kwargs) Print( "All plots saved under directory %s" % (ShellStyles.NoteStyle() + aux.convertToURL(opts.saveDir, opts.url) + ShellStyles.NormalStyle()), True) return
def main(argv): dirs = [] if len(sys.argv) < 2: usage() dirs.append(sys.argv[1]) comparisonList = ["AfterStdSelections"] # Create all datasets from a multicrab task datasets = dataset.getDatasetsFromMulticrabDirs(dirs,dataEra=dataEra, searchMode=searchMode, analysisName=analysis) #print datasets.getDatasetNames() #print datasets # Check multicrab consistency consistencyCheck.checkConsistencyStandalone(dirs[0],datasets,name="QCD inverted") # As we use weighted counters for MC normalisation, we have to # update the all event count to a separately defined value because # the analysis job uses skimmed pattuple as an input datasets.updateNAllEventsToPUWeighted() # Read integrated luminosities of data datasets from lumi.json datasets.loadLuminosities() # Include only 120 mass bin of HW and HH datasets datasets.remove(filter(lambda name: "TTToHplus" in name and not "M120" in name, datasets.getAllDatasetNames())) datasets.remove(filter(lambda name: "HplusTB" in name, datasets.getAllDatasetNames())) datasets.remove(filter(lambda name: "Hplus_taunu_t-channel" in name, datasets.getAllDatasetNames())) datasets.remove(filter(lambda name: "Hplus_taunu_tW-channel" in name, datasets.getAllDatasetNames())) #datasets.remove(filter(lambda name: "TTJets_SemiLept" in name, datasets.getAllDatasetNames())) #datasets.remove(filter(lambda name: "TTJets_FullLept" in name, datasets.getAllDatasetNames())) #datasets.remove(filter(lambda name: "TTJets_Hadronic" in name, datasets.getAllDatasetNames())) # Default merging nad ordering of data and MC datasets # All data datasets to "Data" # All QCD datasets to "QCD" # All single top datasets to "SingleTop" # WW, WZ, ZZ to "Diboson" plots.mergeRenameReorderForDataMC(datasets) # Set BR(t->H) to 0.05, keep BR(H->tau) in 1 xsect.setHplusCrossSectionsToBR(datasets, br_tH=0.05, br_Htaunu=1) # Merge WH and HH datasets to one (for each mass bin) # TTToHplusBWB_MXXX and TTToHplusBHminusB_MXXX to "TTToHplus_MXXX" plots.mergeWHandHH(datasets) datasets.merge("EWK", [ "TTJets", "WJets", "DYJetsToLL", "SingleTop", "Diboson" ]) # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) for HISTONAME in comparisonList: invertedQCD = InvertedTauID() invertedQCD.setLumi(datasets.getDataset("Data").getLuminosity()) invertedQCD.setInfo([dataEra,searchMode,HISTONAME]) histonames = datasets.getDataset("Data").getDirectoryContent("ForQCDNormalization/NormalizationMETBaselineTau"+HISTONAME) bins = [] binLabels = [] for histoname in histonames: bins.append(histoname.replace("NormalizationMETBaselineTau"+HISTONAME,"")) title = datasets.getDataset("Data").getDatasetRootHisto("ForQCDNormalization/NormalizationMETBaselineTau"+HISTONAME+"/"+histoname).getHistogram().GetTitle() title = title.replace("METBaseline"+HISTONAME,"") title = title.replace("#tau p_{T}","taup_T") title = title.replace("#tau eta","taueta") title = title.replace("<","lt") title = title.replace(">","gt") title = title.replace("=","eq") title = title.replace("..","to") title = title.replace(".","p") title = title.replace("/","_") binLabels.append(title) binLabels = bins # for this data set print print "Histogram bins available",bins # bins = ["Inclusive"] # bins = ["taup_Tleq50","taup_Teq50to60"] print "Using bins ",bins print print "Bin labels" for i in range(len(binLabels)): line = bins[i] while len(line) < 10: line += " " line += ": "+binLabels[i] print line print for i,bin in enumerate(bins): invertedQCD.setLabel(binLabels[i]) metBase = plots.DataMCPlot(datasets, "ForQCDNormalization/NormalizationMETBaselineTau"+HISTONAME+"/NormalizationMETBaselineTau"+HISTONAME+bin) metInver = plots.DataMCPlot(datasets, "ForQCDNormalization/NormalizationMETInvertedTau"+HISTONAME+"/NormalizationMETInvertedTau"+HISTONAME+bin) # Rebin before subtracting RebinFactor = 10 metBase.histoMgr.forEachHisto(lambda h: h.getRootHisto().Rebin(RebinFactor)) metInver.histoMgr.forEachHisto(lambda h: h.getRootHisto().Rebin(RebinFactor)) metInverted_data = metInver.histoMgr.getHisto("Data").getRootHisto().Clone("ForQCDNormalization/NormalizationMETBaselineTau"+HISTONAME+"/NormalizationMETBaselineTau"+HISTONAME+bin) metInverted_EWK = metInver.histoMgr.getHisto("EWK").getRootHisto().Clone("ForQCDNormalization/NormalizationMETBaselineTau"+HISTONAME+"/NormalizationMETBaselineTau"+HISTONAME+bin) metBase_data = metBase.histoMgr.getHisto("Data").getRootHisto().Clone("ForQCDNormalization/NormalizationMETInvertedTau"+HISTONAME+"/NormalizationMETInvertedTau"+HISTONAME+bin) metBase_EWK = metBase.histoMgr.getHisto("EWK").getRootHisto().Clone("ForQCDNormalization/NormalizationMETInvertedTau"+HISTONAME+"/NormalizationMETInvertedTau"+HISTONAME+bin) metBase_QCD = metBase_data.Clone("QCD") metBase_QCD.Add(metBase_EWK,-1) metInverted_QCD = metInverted_data.Clone("QCD") metInverted_QCD.Add(metInverted_EWK,-1) metInverted_data = addlabels(metInverted_data) metInverted_EWK = addlabels(metInverted_EWK) metBase_data = addlabels(metBase_data) metBase_EWK = addlabels(metBase_EWK) metInverted_QCD = addlabels(metInverted_QCD) invertedQCD.plotHisto(metInverted_data,"inverted") invertedQCD.plotHisto(metInverted_EWK,"invertedEWK") invertedQCD.plotHisto(metBase_data,"baseline") invertedQCD.plotHisto(metBase_EWK,"baselineEWK") fitOptions = "RB" invertedQCD.fitEWK(metInverted_EWK,fitOptions) invertedQCD.fitEWK(metBase_EWK,fitOptions) invertedQCD.fitQCD(metInverted_QCD,fitOptions) invertedQCD.fitData(metBase_data) invertedQCD.getNormalization() invertedQCD.Summary() normalizationFileName = HISTONAME #.replace("TauIdAfterCollinearCuts","") if HISTONAME == "TauIdAfterCollinearCutsPlusFilteredEWKFakeTaus": normalizationFileName = normalizationFileName.replace("Plus","") invertedQCD.WriteNormalizationToFile("QCDInvertedNormalizationFactors" + normalizationFileName + ".py") invertedQCD.WriteLatexOutput("fits" + normalizationFileName + ".tex")
def main(opts, signalMass): # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(False) style.setGridY(False) # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") else: pass optModes = optList else: optModes = [opts.optMode] # For-loop: All optimisation modes for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) datasetsMgr.PrintInfo() # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Print dataset information before removing anything? if 0: datasetsMgr.PrintInfo() # Determine integrated Lumi before removing data if "Data" in datasetsMgr.getAllDatasetNames(): intLumi = datasetsMgr.getDataset("Data").getLuminosity() else: intLumi = 35920 # Remove datasets filterKeys = ["Data", "QCD", "TTZToQQ", "TTWJets", "TTTT", "ZJetsToQQ_HT600toInf", "DYJetsToQQHT", "SingleTop", "WJetsToQQ_HT_600ToInf", "Diboson"] for key in filterKeys: datasetsMgr.remove(filter(lambda name: key in name, datasetsMgr.getAllDatasetNames())) # Re-order datasets datasetOrder = [] for d in datasetsMgr.getAllDatasets(): #if "TT" in d.getName(): # continue if "M_" in d.getName(): if d not in signalMass: continue datasetOrder.append(d.getName()) for m in signalMass: datasetOrder.insert(0, m) #datasetsMgr.selectAndReorder(datasetOrder) # Append signal datasets #for m in signalMass: # if "_ext1"in signalMass: # continue datasetOrder.insert(0, "TT") #datasetsMgr.selectAndReorder(datasetOrder) # Print dataset information datasetsMgr.PrintInfo() # Define list with Numerators - Denominators ''' Numerator = ["AllTopQuarkPt_MatchedBDT", "TrijetFakePt_BDT", "AssocTopQuarkPt_MatchedBDT", "HiggsTopQuarkPt_MatchedBDT", "AssocTopQuarkPt_Matched", "HiggsTopQuarkPt_Matched", "AllTopQuarkPt_MatchedBDT", "AllTopQuarkPt_Matched", ] Denominator = ["AllTopQuarkPt_Matched", "TrijetFakePt", "AssocTopQuarkPt_Matched", "HiggsTopQuarkPt_Matched", "AssocTopQuarkPt", "HiggsTopQuarkPt", "TopQuarkPt", "TopQuarkPt", ] ''' Numerator = [#"AllTopQuarkPt_MatchedBDT", "TrijetFakePt_BDT", #"AssocTopQuarkPt_MatchedBDT", "HiggsTopQuarkPt_MatchedBDT", #"AllTopQuarkPt_MatchedBDT", #"AllTopQuarkPt_Matched", #"TrijetaPt_LdgOrSldg_Matched", ##"TrijetPt_LdgOrSldg_Unmatched", #"TrijetPt_LdgOrSldg_MatchedBDT", #"TrijetPt_LdgOrSldg_MatchedBDT", #"TrijetPt_LdgOrSldg_UnmatchedBDT", #"TrijetPt_LdgOrSldg_UnmatchedBDT", #"TrijetPt_Ldg_Matched", #"TrijetPt_Ldg_MatchedBDT", ##"TrijetPt_Ldg_MatchedBDT", #"TrijetPt_Ldg_UnmatchedBDT", #"TrijetPt_Sldg_Matched", #"TrijetPt_Sldg_MatchedBDT", #"TrijetPt_Sldg_MatchedBDT", #"TrijetPt_Sldg_UnmatchedBDT", ] Denominator = [#"AllTopQuarkPt_Matched", "TrijetFakePt", #"AssocTopQuarkPt_Matched", "HiggsTopQuarkPt_Matched", #"TopQuarkPt", #"TopQuarkPt", #"TrijetPt_LdgOrSldg", ##"TrijetPt_LdgOrSldg", #"TrijetPt_LdgOrSldg", #"TrijetPt_LdgOrSldg_Matched", #"TrijetPt_LdgOrSldg", #"TrijetPt_LdgOrSldg_Unmatched", #"TrijetPt_Ldg", #"TrijetPt_Ldg", ##"TrijetPt_Ldg_Matched", #"TrijetPt_Ldg_Unmatched", #"TrijetPt_Subldg", #"TrijetPt_Subldg", #"TrijetPt_Sldg_Matched", #"TrijetPt_Sldg_Unmatched", ] # For-loop: All numerator-denominator pairs for i in range(len(Numerator)): numerator = os.path.join(opts.folder, Numerator[i]) denominator = os.path.join(opts.folder, Denominator[i]) PlotEfficiency(datasetsMgr, numerator, denominator, intLumi) #CalcEfficiency(datasetsMgr, numerator, denominator, intLumi) return
def main(opts, signalMass): optModes = ["OptChiSqrCutValue100"] if opts.optMode != None: optModes = [opts.optMode] # For-loop: All optimisation modes for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json datasetsMgr_signal = GetDatasetsFromDir(opts) datasetsMgr_signal.updateNAllEventsToPUWeighted() datasetsMgr_signal.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) datasetsMgr_signal.getDataset(d.getName()).setCrossSection(1.0) # Determine integrated Lumi before removing data # intLumi = datasetsMgr.getDataset("Data").getLuminosity() intLumi = 35200 # Remove datasets if 1: datasetsMgr.remove( filter(lambda name: "Data" in name, datasetsMgr.getAllDatasetNames())) #datasetsMgr.remove(filter(lambda name: "QCD_b" in name, datasetsMgr.getAllDatasetNames())) #datasetsMgr.remove(filter(lambda name: "QCD_HT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "SingleTop" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "DYJetsToQQHT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "TTZToQQ" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "TTWJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "WJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "Diboson" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "TTTT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "FakeBMeasurementTrijetMass" in name, datasetsMgr.getAllDatasetNames())) #datasetsMgr.remove(filter(lambda name: "M_" in name and "M_" + str(opts.signalMass) not in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "Data" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "QCD_b" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "QCD_HT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "SingleTop" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "DYJetsToQQHT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "TTZToQQ" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "TTWJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "WJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "Diboson" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "TTTT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "TT" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr_signal.remove( filter(lambda name: "FakeBMeasurementTrijetMass" in name, datasetsMgr.getAllDatasetNames())) if opts.noQCD: datasetsMgr.remove( filter(lambda name: "QCD_b" in name, datasetsMgr.getAllDatasetNames())) datasetsMgr.remove( filter(lambda name: "QCD_HT" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Merge EWK samples if opts.mergeEWK: datasetsMgr.merge("EWK", GetListOfEwkDatasets()) plots._plotStyles["EWK"] = styles.getAltEWKStyle() # Re-order datasets datasetOrder = [] for d in datasetsMgr.getAllDatasets(): if "M_" in d.getName(): if d not in signalMass: continue datasetOrder.append(d.getName()) #newOrder = ["TT", "QCD"] #newOrder = ["TT", "QCD"] for m in signalMass: #newOrder.insert(0, m) datasetOrder.insert(0, m) #datasetsMgr.selectAndReorder(newOrder) datasetsMgr.selectAndReorder(datasetOrder) datasetsMgr_signal.selectAndReorder(datasetOrder) # Print dataset information datasetsMgr.PrintInfo() # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(False) style.setGridY(False) # Do the topSelection histos folder = opts.folder histoPaths1 = ["H2"] if folder != "": histoList = datasetsMgr.getDataset( datasetOrder[0]).getDirectoryContent(folder) # hList0 = [x for x in histoList if "TrijetMass" in x] # hList1 = [x for x in histoList if "TetrajetMass" in x] # hList2 = [x for x in histoList if "TetrajetBJetPt" in x] # histoPaths1 = [os.path.join(folder, h) for h in hList0+hList1+hList2] histoPaths1 = [os.path.join(folder, h) for h in histoList] folder = "" histoList = datasetsMgr.getDataset( datasetOrder[0]).getDirectoryContent(folder) hList0 = [x for x in histoList if "TrijetMass" in x] hList1 = [x for x in histoList if "TetrajetMass" in x] hList2 = [x for x in histoList if "TetrajetBjetPt" in x] histoPaths2 = [ os.path.join(folder, h) for h in hList0 + hList1 + hList2 ] histoPaths = histoPaths1 + histoPaths2 for h in histoPaths: if "vs" in h.lower(): # Skip TH2D continue ''' if "true" in h.lower(): PlotMC(datasetsMgr_signal, h, intLumi) elif "match" in h.lower(): PlotMC(datasetsMgr_signal, h, intLumi) ''' #else: #if "phi" not in h.lower(): # continue if 1: PlotMC(datasetsMgr, h, intLumi) return
def __init__(self, opts, config, dirname, luminosity, observation, datasetGroups, verbose=False): self._validateDatacard(config) self._config = config self._verbose = verbose self._opts = opts self._dirname = dirname self._luminosity = luminosity self._observation = observation self._datasetGroups = datasetGroups # Define label options myStyle = tdrstyle.TDRStyle() myStyle.setOptStat(False) plots._legendLabels["MCStatError"] = "Bkg. stat." plots._legendLabels["MCStatSystError"] = "Bkg. stat.#oplussyst." plots._legendLabels["BackgroundStatError"] = "Bkg. stat. unc" plots._legendLabels[ "BackgroundStatSystError"] = "Bkg. stat.#oplussyst. unc." # Make control plots self.Verbose( ShellStyles.HighlightStyle() + "Generating control plots" + ShellStyles.NormalStyle(), True) # Definitions massPoints = [] massPoints.extend(self._config.MassPoints) if self._config.OptionDoWithoutSignal: massPoints.append(-1) # for plotting with no signal nMasses = len(massPoints) nPlots = len(self._config.ControlPlots) counter = 0 # For-loop: All mass points for m in massPoints: # Initialize flow plot selectionFlow = SelectionFlowPlotMaker(self._opts, self._config, m) # For-loop: All control plots for i in range(0, nPlots): counter += 1 # Skip if control plot does not exist if observation.getControlPlotByIndex(i) == None: continue # Get the control plot myCtrlPlot = self._config.ControlPlots[i] # The case m < 0 is for plotting hitograms without any signal if m > 0: saveName = "%s/DataDrivenCtrlPlot_M%d_%02d_%s" % ( self._dirname, m, i, myCtrlPlot.title) msg = "Control Plot %d/%d (m=%s GeV)" % (counter, nMasses * nPlots, str(m)) else: saveName = "%s/DataDrivenCtrlPlot_%02d_%s" % ( self._dirname, i, myCtrlPlot.title) msg = "Control Plot %d/%d (no signal)" % (counter, nMasses * nPlots) # Inform the user of progress self.PrintFlushed( ShellStyles.AltStyle() + msg + ShellStyles.NormalStyle(), counter == 1) if counter == len(massPoints) * nPlots: print # Initialize histograms hData = None hSignal = None hFakeB = None hQCDMC = None myStackList = [] # For-loop: All dataset columns (to find histograms) for c in self._datasetGroups: self.Verbose( "Dataset is %s for plot %s" % (myCtrlPlot.title, c.getLabel()), False) # Skip plot? bDoPlot = (m < 0 or c.isActiveForMass( m, self._config)) and not c.typeIsEmptyColumn( ) and not c.getControlPlotByIndex(i) == None if not bDoPlot: continue # Clone histo h = c.getControlPlotByIndex(i)["shape"].Clone() if c.typeIsSignal(): self.Verbose( "Scaling histogram labelled \"%s\" with BR=%.2f" % (c.getLabel(), self._config.OptionBr), False) h.Scale(self._config.OptionBr) if hSignal == None: hSignal = h.Clone() else: hSignal.Add(h) elif c.typeIsFakeB(): if hFakeB == None: hFakeB = h.Clone() else: hFakeB.Add(h) elif c.typeIsQCDMC(): if hQCD == None: hQCDMC = h.Clone() else: hQCDMC.Add(h) elif c.typeIsEWKMC() or c.typeIsGenuineB(): myHisto = histograms.Histo(h, c._datasetMgrColumn) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.append(myHisto) # FIXME: what's this exactly? if len( myStackList ) < 1 or self._config.OptionFakeBMeasurementSource != "DataDriven": continue # Stack all the histograms if hFakeB != None: myHisto = histograms.Histo(hFakeB, "FakeB", legendLabel=_legendLabelFakeB) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.insert(0, myHisto) elif hQCDMC != None: myHisto = histograms.Histo(hQCDMC, "QCDMC", legendLabel=_legendLabelQCDMC) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.insert(0, myHisto) hData = observation.getControlPlotByIndex(i)["shape"].Clone() hDataUnblinded = hData.Clone() # Apply blinding & Get blinding string myBlindingString = self._applyBlinding(myCtrlPlot, myStackList, hData, hSignal) # Data myDataHisto = histograms.Histo(hData, "Data") myDataHisto.setIsDataMC(isData=True, isMC=False) myStackList.insert(0, myDataHisto) # Add signal if m > 0: mySignalLabel = "HplusTB_M%d" % m myHisto = histograms.Histo(hSignal, mySignalLabel) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.insert(1, myHisto) # Add data to selection flow plot selectionFlow.addColumn(myCtrlPlot.flowPlotCaption, hDataUnblinded, myStackList[1:]) # Make plot myStackPlot = None myParams = myCtrlPlot.details.copy() myStackPlot = plots.DataMCPlot2(myStackList) myStackPlot.setLuminosity(self._luminosity) myStackPlot.setEnergy("%d" % self._config.OptionSqrtS) myStackPlot.setDefaultStyles() # Tweak paramaters if not "unit" in myParams.keys(): myParams["unit"] = "" if myParams["unit"] != "": myParams["xlabel"] = "%s (%s)" % (myParams["xlabel"], myParams["unit"]) # Apply various settings to my parameters self._setBlingingString(myBlindingString, myParams) self._setYlabelWidthSuffix(hData, myParams) self._setLegendPosition(myParams) self._setRatioLegendPosition(myParams) # Remove non-dientified keywords del myParams["unit"] # Ratio axis if not "opts2" in myParams.keys(): myParams["opts2"] = {"ymin": 0.3, "ymax": 1.7} # Make sure BR is indicated if anyting else but BR=1.0 if m > 0 and self._config.OptionBr != 1.0: myStackPlot.histoMgr.setHistoLegendLabelMany({ #mySignalLabel: "H^{+} m_{H^{+}}=%d GeV (x %s)" % (m, self._config.OptionBr) mySignalLabel: "m_{H^{+}}=%d GeV (x %s)" % (m, self._config.OptionBr) }) # Do plotting drawPlot(myStackPlot, saveName, **myParams) # Do selection flow plot selectionFlow.makePlot(self._dirname, m, len(self._config.ControlPlots), self._luminosity) return
def main(opts, signalMass): # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(True) style.setGridY(True) # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") else: pass optModes = optList else: optModes = [opts.optMode] # For-loop: All optimisation modes for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Print dataset information before removing anything? if 0: datasetsMgr.PrintInfo() # Determine integrated Lumi before removing data if "Data" in datasetsMgr.getAllDatasetNames(): intLumi = datasetsMgr.getDataset("Data").getLuminosity() # Remove datasets filterKeys = ["Data", "QCD", "TTZToQQ", "TTWJets", "TTTT"] for key in filterKeys: datasetsMgr.remove( filter(lambda name: key in name, datasetsMgr.getAllDatasetNames())) # Re-order datasets datasetOrder = [] for d in datasetsMgr.getAllDatasets(): if "M_" in d.getName(): if d not in signalMass: continue datasetOrder.append(d.getName()) # Append signal datasets for m in signalMass: datasetOrder.insert(0, m) datasetsMgr.selectAndReorder(datasetOrder) # Print dataset information datasetsMgr.PrintInfo() # Define the mapping histograms in numerator->denominator pairs HistoMap = { "AllTopQuarkPt_MatchedBDT": "AllTopQuarkPt_Matched", "TrijetFakePt_BDT": "TrijetFakePt", "AllTopQuarkPt_Matched": "TopQuarkPt", "EventTrijetPt2T_MatchedBDT": "EventTrijetPt2T_BDT", "EventTrijetPt2T_MatchedBDT": "EventTrijetPt2T_Matched", "EventTrijetPt2T_MatchedBDT": "EventTrijetPt2T", "AllTopQuarkPt_MatchedBDT": "TopQuarkPt", "SelectedTrijetsPt_BjetPassCSVdisc_afterCuts": "SelectedTrijetsPt_afterCuts", "TrijetPt_PassBDT_BJetPassCSV": "TrijetPt_PassBDT", } # For-loop: All numerator-denominator pairs for key in HistoMap: numerator = os.path.join(opts.folder, key) denominator = os.path.join(opts.folder, HistoMap[key]) PlotEfficiency(datasetsMgr, numerator, denominator) return
def main(opts): optModes = [""] if opts.optMode != None: optModes = [opts.optMode] # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(False) style.setGridX(opts.gridX) style.setGridY(opts.gridY) style.setLogX(opts.logX) style.setLogY(opts.logY) style.setLogZ(opts.logZ) style.setWide(True, 0.15) # style.setPadRightMargin()#0.13) # For-loop: All opt Mode for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() datasetsMgr.PrintInfo() # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Print merged datasets and MC samples if 0: datasetsMgr.PrintInfo() # Get Luminosity if opts.intLumi < 0: opts.intLumi = datasetsMgr.getDataset("Data").getLuminosity() # Merge EWK samples if opts.dataset == "EWK": datasetsMgr.merge("EWK", aux.GetListOfEwkDatasets()) plots._plotStyles["EWK"] = styles.getAltEWKStyle() # Re-order datasets (different for inverted than default=baseline) newOrder = [] for d in datasetsMgr.getAllDatasets(): if d.getName() == opts.dataset: newOrder.append(d.getName()) # Sanity check on selected dataset nDatasets = len(newOrder) if nDatasets < 1: msg = "Please select a valid dataset. Dataset \"%s\" does not exist!" % (opts.dataset) Print(ShellStyles.ErrorStyle() + msg + ShellStyles.NormalStyle(), True) datasetsMgr.PrintInfo() sys.exit() if nDatasets > 1: msg = "Please select only 1 valid dataset. Requested %i datasets for plotting!" % (nDatasets) Print(ShellStyles.ErrorStyle() + msg + ShellStyles.NormalStyle(), True) datasetsMgr.PrintInfo() sys.exit() # Select only given dataset datasetsMgr.selectAndReorder(newOrder) # Print dataset information msg = "Plotting for single dataset \"%s\". Integrated luminosity is %.2f 1/fb" % (opts.dataset, opts.intLumi) Print(ShellStyles.NoteStyle() + msg + ShellStyles.NormalStyle(), True) datasetsMgr.PrintInfo() # Get list of histogram paths folder = opts.folder histoList = datasetsMgr.getDataset(datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent(folder) histoPaths = [os.path.join(folder, h) for h in histoList] # For-loop: All histograms for h in histoPaths: if "_Vs_" not in h: continue Plot2dHistograms(datasetsMgr, h) Print("All plots saved under directory %s" % (ShellStyles.NoteStyle() + aux.convertToURL(opts.saveDir, opts.url) + ShellStyles.NormalStyle()), True) return
def main(): if len(sys.argv) == 1: usage() rootfile = "" jsonfile = "limits.json" root_re = re.compile("(?P<rootfile>(\S*\.root))") json_re = re.compile("(?P<jsonfile>(\S*\.json))") for argv in sys.argv: match = root_re.search(argv) if match: rootfile = match.group(0) match = json_re.search(argv) if match: jsonfile = match.group(0) limits = limit.BRLimits( limitsfile=jsonfile, configfile="limitdata/lightHplus_configuration.json") # Enable OpenGL ROOT.gEnv.SetValue("OpenGL.CanvasPreferGL", 1) # Apply TDR style style = tdrstyle.TDRStyle() if limit.forPaper: histograms.cmsTextMode = histograms.CMSMode.PAPER limit.forPaper = True # to get GeV without c^2 # Get BR limits masses = limits.mass brs = limits.observed print "Observed masses and BR's" for i in range(len(masses)): print " ", masses[i], brs[i] global db db = BRXSDB.BRXSDatabaseInterface(rootfile, program="2HDMC", BRvariable="BR_tHpb") for i, m in enumerate(masses): db.addExperimentalBRLimit(m, brs[i]) graphs = {} obs = limits.observedGraph() # Remove blinded obs points for i in reversed(range(0, obs.GetN())): if obs.GetY()[i] < 0.00000001: print " REMOVING POINT", obs.GetY( )[i], " corresponding mass=", obs.GetX()[i] obs.RemovePoint(i) print graphs["exp"] = limits.expectedGraph() graphs["exp1"] = limits.expectedBandGraph(sigma=1) graphs["exp2"] = limits.expectedBandGraph(sigma=2) if obs.GetN() > 0: graphs["obs"] = obs # Get theory uncertainties on observed obs_th_plus = limit.getObservedPlus(obs, 0.21) obs_th_minus = limit.getObservedMinus(obs, 0.21) for gr in [obs_th_plus, obs_th_minus]: gr.SetLineWidth(2) gr.SetLineStyle(9) graphs["obs_th_plus"] = obs_th_plus graphs["obs_th_minus"] = obs_th_minus # Remove m=80 for gr in graphs.values(): limit.cleanGraph(gr, minX=90) print "Plotting graphs" for key in graphs.keys(): for i in range(graphs[key].GetN()): xs = graphs[key].GetX() ys = graphs[key].GetY() print " ", key, xs[i], ys[i] print # Interpret in MSSM xVariable = "mHp" # selection = "mu==200" selection = "mHp>0" # scenario = "MSSM m_{h}^{max}" scenario = os.path.split(rootfile)[-1].replace(".root", "") for key in graphs.keys(): print "Graph--------------------------------", key # graphs[key] = db.graphToTanBetaCombined(graphs[key],xVariable,selection) graphs[key] = db.graphToTanBeta(graphs[key], xVariable, selection, False) print key, "done" graphs["mintanb"] = db.minimumTanbGraph("mHp", selection) doPlot("limitsTanb_light_" + scenario, graphs, limits, limit.mHplus(), scenario)
def main(argv): dirs = [] dirs_signal = [] if len(sys.argv) < 2: usage() dirs.append(sys.argv[1]) #dirs_signal = ["../../SignalAnalysis_140605_143702/"] dirs_signal.append(sys.argv[2]) #dirs_signal = ["/mnt/flustre/epekkari/SignalFakeTauLimits_140808_095404"] QCDInvertedNormalization = sort(QCDInvertedNormalizationFactors.QCDInvertedNormalization) labels,QCDInvertedNormalizationFilteredEWKFakeTaus = getSortedLabelsAndFactors(QCDInvertedNormalizationFactorsFilteredEWKFakeTaus.QCDInvertedNormalization) analysis_inverted = "signalAnalysisInvertedTau" analysis = "signalAnalysis" optModes = [] #optModes.append("OptQCDTailKillerZeroPlus") optModes.append("OptQCDTailKillerLoosePlus") #optModes.append("OptQCDTailKillerMediumPlus") #optModes.append("OptQCDTailKillerTightPlus") varHistoName = "shapeEWKGenuineTausTransverseMass" nomHistoName = "shapeTransverseMass" signalHistoName = "shapeEWKFakeTausTransverseMass" #w_list = [0.66, 0.67, 0.75] # golden old #w_list = [0.95, 0.955, 0.96, 0.965, 0.97, 0.975, 0.98, 0.985, 0.99, 0.995, 1] #w_list = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] #w_list = [0.74, 0.741, 0.742, 0.743, 0.744, 0.745, 0.746, 0.747] w_list = [0.743] #defaultBinning = systematics.getBinningForPlot("shapeTransverseMass") defaultBinning = [0,20,40,60,80,100,120,140,160,200,400] defaultBinning_array = array.array("d",defaultBinning) diff_opt = [] for optMode in optModes: diff_list = [] for w in w_list: var_values = [] nom_values = [] # baseline fake taus mt_baseline_faketaus_data = getDataSets(dirs_signal, dataEra, searchMode, analysis, optMode) mtplot_signalfaketaus = plots.DataMCPlot(mt_baseline_faketaus_data, signalHistoName) mt_signalfaketaus = mtplot_signalfaketaus.histoMgr.getHisto("EWK").getRootHisto().Clone(signalHistoName) # inverted fake taus mt_inverted_faketaus_data = getDataSets(dirs, dataEra, searchMode, analysis_inverted, optMode) histonames_var = mt_inverted_faketaus_data.getDataset("Data").getDirectoryContent(varHistoName) histonames_nom = mt_inverted_faketaus_data.getDataset("Data").getDirectoryContent(nomHistoName) bins_var = getBins(histonames_var, varHistoName) bins_nom = getBins(histonames_nom, nomHistoName) normalization_var_qg = getNormalization(bins_var,w,QCDInvertedNormalizationFilteredEWKFakeTaus,True,True) normalization_var = getNormalization(bins_var,w,QCDInvertedNormalizationFilteredEWKFakeTaus,True,False) normalization_nom = getNormalization(bins_nom,w,QCDInvertedNormalization,False,False) mt_var_qg = getMt(mt_inverted_faketaus_data,bins_var,varHistoName,normalization_var_qg) mt_var = getMt(mt_inverted_faketaus_data,bins_var,varHistoName,normalization_var) mt_nom = getMt(mt_inverted_faketaus_data,bins_nom,nomHistoName,normalization_nom) mt_nom.Add(mt_signalfaketaus) mt_var_qg.SetName("QCD(Data)+EWK+t#bar{t}(Data, mis-ID. #tau), corr.") mt_var.SetName("QCD(Data)+EWK+t#bar{t}(Data, mis-ID. #tau)") mt_nom.SetName("QCD(Data)+EWK+t#bar{t}(MC, mis-ID. #tau)") mt_var_qg.SetLineWidth(4) mt_var.SetLineColor(14) mt_nom.SetLineColor(2) mt_var_qg = mt_var_qg.Rebin(len(defaultBinning)-1,"",defaultBinning_array) mt_var = mt_var.Rebin(len(defaultBinning)-1,"",defaultBinning_array) mt_nom = mt_nom.Rebin(len(defaultBinning)-1,"",defaultBinning_array) for i in range(0,mt_nom.GetSize()): var_values.append(mt_var_qg.GetBinContent(i)) nom_values.append(mt_nom.GetBinContent(i)) style = tdrstyle.TDRStyle() varPlots = [mt_var, mt_var_qg] plot = plots.ComparisonManyPlot(mt_nom,varPlots) plot.createFrame(optMode.replace("Opt","Mt_DataDrivenVsMC_"+"w="+str(w)+"_"), createRatio=True) moveLegend={"dx": -0.325,"dy": 0.02,"dw":-0.14,"dh":-0.12} plot.setLegend(histograms.moveLegend(histograms.createLegend(), **moveLegend)) #plot.setLegend(histograms.createLegend(x1=0.5, y1=0.7, x2=0.7, y2=0.95, textSize=0.1)) histograms.addText(0.65, 0.3, optMode.replace("OptQCDTailKiller","R_{BB} ").replace("Plus",""), 25) histograms.addCmsPreliminaryText() histograms.addEnergyText() lumi=mt_inverted_faketaus_data.getDataset("Data").getLuminosity() histograms.addLuminosityText(x=None, y=None, lumi=lumi) plot.draw() plot.save() mt_var_qg.Delete() mt_var.Delete() mt_nom.Delete() mt_baseline_faketaus_data.close() mt_inverted_faketaus_data.close() ROOT.gROOT.CloseFiles() ROOT.gROOT.GetListOfCanvases().Delete() ROOT.gDirectory.GetList().Delete() # difference metrics num = 0 denom = 0 for i in range(0,len(nom_values)): num += var_values[i]*(var_values[i]-nom_values[i])**2 denom += var_values[i] diff = num/denom diff_list.append(diff) diff_opt.append(diff_list) #os.system("rm MtOptimal/*") #os.system("mkdir -p MtOptimal") print "\nWeights:\t",w_list,'\n' optimalWeights = {} for i in range(0,len(diff_opt)): print optModes[i] print "Differences:\t",diff_opt[i],"- Optimal: w =",w_list[diff_opt[i].index(min(diff_opt[i]))] optimalWeights[optModes[i]] = w_list[diff_opt[i].index(min(diff_opt[i]))] #command = "cp *" + str(w_list[diff_opt[i].index(min(diff_opt[i]))])+"*"+optModes[i].replace("Opt","") + ".eps MtOptimal" #os.system(command) print optimalWeights optimalNormalization = getNormalization(bins_var,optimalWeights["OptQCDTailKillerLoosePlus"],QCDInvertedNormalizationFilteredEWKFakeTaus,True,True) writeNormalizationToFile("QCDPlusEWKFakeTauNormalizationFactors.py",optimalNormalization,labels)
def __init__(self, h, label, fitFuncName, fitmin, fitmax, applyFitFrom, doPlots=False, luminosity=None): self._label = label self._fittedRate = None self._centralParams = None self._eigenVectors = None self._eigenValues = None self._fitmin = fitmin self._hRate = aux.Clone(h) self._luminosity = luminosity self._datasetNames = {} # Initialize style myStyle = tdrstyle.TDRStyle() myStyle.setOptStat(True) myStyle.tdrStyle.SetOptFit(True) # Calculate scale factor by integrating over the area to be fitted scaleFactor = h.Integral(h.FindBin(fitmin), h.GetNbinsX() + 1) # Set fit function self._myFitFuncObject = self._findFitFunction(fitFuncName, scaleFactor) # Obtain bin list for fine binning (compatibility with fine binning) myBinList = [] for i in range(1, h.GetNbinsX() + 1): myBinList.append(h.GetXaxis().GetBinLowEdge(i)) myBinList.append(h.GetXaxis().GetBinUpEdge(h.GetNbinsX())) # Do fit myFitResult = self._doFit(h, myBinList, fitFuncName, fitmin, fitmax) # Calculate eigenvectors and values self._calculateEigenVectorsAndValues(myFitResult, printStatus=True) # Dataset names (used for plotting, maps dataset label to its legend) self._datasetNames["ttbar"] = "t#bar{t}" self._datasetNames["W"] = "W+Jets" self._datasetNames["singleTop"] = "Single t" self._datasetNames["DY"] = "Z/#gamma*+jets" self._datasetNames["VV"] = "Diboson" self._datasetNames["QCDandFakeTau"] = "Mis-ID. #tau_{h} (data)" # Create varied histograms (fit uncertainty up and down, total uncertainty up and donw) and make plots if doPlots: (hFitUncertaintyUp, hFitUncertaintyDown) = self.calculateVariationHistograms( myBinList, applyFitFrom) self.makeVariationPlotWithSeparateUncertainties( "_FineBinning", self._hRate, self._hFitFineBinning, hFitUncertaintyUp, hFitUncertaintyDown, applyFitFrom) (hupTotal, hdownTotal) = self.calculateTotalVariationHistograms( self._hFitFineBinning, hFitUncertaintyUp, hFitUncertaintyDown) self.makeVariationPlotWithTotalUncertainties( "_FineBinning", self._hRate, self._hFitFineBinning, hupTotal, hdownTotal, applyFitFrom)
def main(opts): # Suppress warnings about weight being re-applied ROOT.gErrorIgnoreLevel = ROOT.kError # Obtain dsetMgrCreator and register it to module selector dsetMgrCreator = dataset.readFromMulticrabCfg(directory=opts.mcrab) # Get list of eras, modes, and optimisation modes erasList = dsetMgrCreator.getDataEras() modesList = dsetMgrCreator.getSearchModes() optList = dsetMgrCreator.getOptimizationModes() sysVarList = dsetMgrCreator.getSystematicVariations() sysVarSrcList = dsetMgrCreator.getSystematicVariationSources() # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") else: pass optModes = optList else: optModes = [opts.optMode] opts.optMode = "" mcrabName = opts.mcrab RunEra = mcrabName.split("_")[1] # Setup ROOT and style ROOT.gROOT.SetBatch(opts.batchMode) style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(True) style.setGridY(True) # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) # Remove some QCD samples (the cross sections of these samples are not calculated) if 0: msg = "Removing following samples:" Print(ShellStyles.ErrorStyle() + msg + ShellStyles.NormalStyle(), True) for d in getDatasetsToExclude(): Print(d, False) datasetsMgr.remove(d) # Get run-range minRunRange, maxRunRange, runRange = GetRunRange(datasetsMgr) # Get int lumi intLumi = GetLumi(datasetsMgr) # Update to PU & load luminosities datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() #datasetsMgr.normalizeMCByLuminosity() # Print luminisoties and cross-sections datasetsMgr.PrintLuminosities() datasetsMgr.PrintCrossSections() # Default merging & ordering: "Data", "QCD", "SingleTop", "Diboson" plots.mergeRenameReorderForDataMC(datasetsMgr) # Get datasets datasetsMgr.mergeMC() dataset_Data = datasetsMgr.getDataDatasets() dataset_MC = datasetsMgr.getMCDatasets() # Define lists of Triggers to be plotted and Variables xVars = ["pt6thJet", "eta6thJet", "phi6thJet", "Ht", "nBTagJets", "pu", "JetMulti", "BJetMulti"] trgList = ["1BTag", "2BTag", "OR", "OR_PFJet450"] if opts.fast: trgList = ["OR_PFJet450"] xVars = ["pt6thJet", "Ht"] nPlots = len(trgList)*len(xVars) counter = 0 # For-loop: All signal triggers for i, trg in enumerate(trgList, 1): # For-loop: All x-variables for j, xVar in enumerate(xVars, 1): counter+=1 msg = "{:<9} {:>3} {:<1} {:<3} {:<50}".format("Histogram", "%i" % counter, "/", "%s:" % (nPlots), "%s Vs %s" % (trg, xVar)) Print(ShellStyles.SuccessStyle() + msg + ShellStyles.NormalStyle(), counter==1) # Define names hNumerator = "hNum_%s_RefTrg_OfflineSel_Signal%s" % (xVar, trg) hDenominator = "hDen_%s_RefTrg_OfflineSel" % (xVar) plotName = "Eff_%s_%s" % (xVar, trg) # Get Efficiency Plots _kwargs = GetHistoKwargs(xVar, opts) eff_Data = GetEfficiency(datasetsMgr, dataset_Data, hNumerator, hDenominator , **_kwargs) eff_MC = GetEfficiency(datasetsMgr, dataset_MC, hNumerator, hDenominator, **_kwargs) # Apply Styles styles.dataStyle.apply(eff_Data) styles.mcStyle.apply(eff_MC) # Create the plot p = plots.ComparisonPlot(histograms.HistoGraph(eff_Data, "eff_Data", "p", "P"), histograms.HistoGraph(eff_MC, "eff_MC" , "p", "P"), saveFormats=[]) # Define the legend entries p.histoMgr.setHistoLegendLabelMany( { "eff_Data": "Data", "eff_MC" : "Simulation" } ) # Draw and save the plot p.setLuminosity(intLumi) plots.drawPlot(p, plotName, **_kwargs) # Draw histograms.addText(0.65, 0.06, "Runs "+ runRange, 17) histograms.addText(0.65, 0.10, "2016", 17) # Save the canvas to a file SavePlot(p, plotName, os.path.join(opts.saveDir, opts.optMode), saveFormats=[".pdf", ".png", ".C"] ) Print("All plots saved under directory %s" % (ShellStyles.NoteStyle() + aux.convertToURL(opts.saveDir, opts.url) + ShellStyles.NormalStyle()), True) return
def main(): if len(sys.argv) == 1: usage() rootfile = "" jsonfile = "limits.json" root_re = re.compile("(?P<rootfile>(\S*\.root))") json_re = re.compile("(?P<jsonfile>(\S*\.json))") for argv in sys.argv: match = root_re.search(argv) if match: rootfile = match.group(0) match = json_re.search(argv) if match: jsonfile = match.group(0) # jsonfile = "limits_heavy2016.json" # jsonfile = "limits2016/limitsForMSSMplots_ICHEP_v3_heavy.json" # jsonfile = "limits2016/limits_heavy_20171011.json" # jsonfile = "limits2016/limits_heavy_180131.json" # jsonfile = "limits2016/limits_heavy_180318.json" # jsonfile = "limits2016/limits_int_180202.json" jsonfile = "limits2016/limits_int_180429.json" # limits = limit.BRLimits(limitsfile=jsonfile,configfile="configurationHeavy.json") limits = limit.BRLimits( limitsfile=jsonfile, configfile="limits2016/intermediateHplus_configuration.json") # Enable OpenGL ROOT.gEnv.SetValue("OpenGL.CanvasPreferGL", 1) # Apply TDR style style = tdrstyle.TDRStyle() #if limit.forPaper: # histograms.cmsTextMode = histograms.CMSMode.PAPER #histograms.cmsTextMode = histograms.CMSMode.PAPER # tmp #histograms.cmsTextMode = histograms.CMSMode.UNPUBLISHED # tmp histograms.cmsTextMode = histograms.CMSMode.PRELIMINARY limit.forPaper = True # to get GeV without c^2 # Get BR limits masses = limits.mass brs = limits.observed print "Observed masses and sigma*BR's" for i in range(len(masses)): print " ", masses[i], brs[i] global db db = BRXSDB.BRXSDatabaseInterface(rootfile) db.BRvariable = "2*0.001*tHp_xsec*BR_Hp_taunu" # XSEC only for H-, multiply with 2 to get H+ and H- ; multiply by 0.001 to fb -> pb for i, m in enumerate(masses): db.addExperimentalBRLimit(m, brs[i]) graphs = {} obs = limits.observedGraph() # Remove blinded obs points for i in reversed(range(0, obs.GetN())): if obs.GetY()[i] < 0.00000001: print " REMOVING POINT", obs.GetY( )[i], " corresponding mass=", obs.GetX()[i] obs.RemovePoint(i) print graphs["exp"] = limits.expectedGraph() graphs["exp1"] = limits.expectedBandGraph(sigma=1) graphs["exp2"] = limits.expectedBandGraph(sigma=2) if obs.GetN() > 0: graphs["obs"] = obs # Get theory uncertainties on observed # obs_th_plus = limit.getObservedPlus(obs,0.32) # obs_th_minus = limit.getObservedMinus(obs,0.32) # for gr in [obs_th_plus, obs_th_minus]: # gr.SetLineWidth(2) # gr.SetLineStyle(9) # graphs["obs_th_plus"] = obs_th_plus # graphs["obs_th_minus"] = obs_th_minus # Remove m=180,190 # for gr in graphs.values(): # limit.cleanGraph(gr, 750) # limit.cleanGraph(gr, 800) # limit.cleanGraph(gr, 1000) # limit.cleanGraph(gr, 1500) # limit.cleanGraph(gr, 2000) # limit.cleanGraph(gr, 2500) # limit.cleanGraph(gr, 3000) print "Plotting graphs" for key in graphs.keys(): for i in range(graphs[key].GetN()): xs = graphs[key].GetX() ys = graphs[key].GetY() print " ", key, xs[i], ys[i] print # Interpret in MSSM xVariable = "mHp" selection = "mHp > 0 && mu==200" # selection = "mHp > 0 && mu==500" # scenario = "MSSM m_{h}^{max}" scenario = os.path.split(rootfile)[-1].replace(".root", "") print scenario from JsonWriter import JsonWriter jsonWriter = JsonWriter() for key in graphs.keys(): print "Graph--------------------------------", key graphs[key] = db.graphToTanBeta(graphs[key], xVariable, selection, highTanbRegion=True) #if key == "obs": #obsplus = db.getTheorUncert(graphs[key],xVariable,selection,"+") #graphs["obs_th_plus"] = db.graphToTanBeta(obsplus,xVariable,selection) #obsminus = db.getTheorUncert(graphs[key],xVariable,selection,"-") #graphs["obs_th_minus"] = db.graphToTanBeta(obsminus,xVariable,selection) print key, "done" jsonWriter.addGraph(key, graphs[key]) # graphs["mintanb"] = db.minimumTanbGraph("mHp",selection) if scenario == "lowMH-LHCHXSWG": graphs["Allowed"] = db.mhLimit("mH", "mHp", selection, "125.0+-3.0") else: graphs["Allowed"] = db.mhLimit("mh", "mHp", selection + "&&mHp>175", "125.0+-3.0") if scenario == "tauphobic-LHCHXSWG": # Fix a buggy second upper limit (the order of points is left to right, then right to left; remove further passes to fix the bug) decreasingStatus = False i = 0 while i < graphs["Allowed"].GetN(): removeStatus = False y = graphs["Allowed"].GetY()[i] if i > 0: if graphs["Allowed"].GetY()[i - 1] - y < 0: decreasingStatus = True else: if decreasingStatus: graphs["Allowed"].RemovePoint(i) removeStatus = True if not removeStatus: i += 1 #for i in range(0, graphs["Allowed"].GetN()): #print graphs["Allowed"].GetX()[i], graphs["Allowed"].GetY()[i] # del graphs["isomass"] jsonWriter.addGraph("Allowed", graphs["Allowed"]) jsonWriter.addParameter("name", "limitsTanb_intermediate_" + scenario) jsonWriter.addParameter("scenario", scenario) jsonWriter.addParameter("luminosity", limits.getLuminosity()) jsonWriter.addParameter("finalStateText", limits.getFinalstateText()) jsonWriter.addParameter("mHplus", limit.mHplus()) jsonWriter.addParameter("selection", selection) jsonWriter.addParameter("regime", "heavy") jsonWriter.write("MSSMLimitIntermediate_" + scenario + ".json") limit.doTanBetaPlotHeavy("limitsTanb_heavy_" + scenario, graphs, limits.getLuminosity(), limits.getFinalstateText(), limit.mHplus(), scenario) sys.exit() # mH+ -> mA print "Replotting the graphs for (mA,tanb)" for key in graphs.keys(): print key #db.PrintGraph(graphs[key]) #print "check loop db.graphToMa" db.graphToMa(graphs[key]) graphs["isomass"] = db.getIsoMass(200) # doPlot("limitsTanb_mA_heavy_"+scenario, graphs, limits, limit.mA(),scenario) limit.doTanBetaPlotHeavy("limitsTanb_mA_heavy_" + scenario, graphs, limits.getLuminosity(), limits.getFinalstateText(), limit.mA(), scenario)
def main(opts): style = tdrstyle.TDRStyle() # Set ROOT batch mode boolean ROOT.gROOT.SetBatch(opts.batchMode) # Get all datasets from the mcrab dir # def GetDatasetsFromDir(mcrab, opts, **kwargs): #iro datasetsMgr = GetDatasetsFromDir(opts.mcrab, opts, **kwargs) #kwargs.get("analysis")) # Determine Integrated Luminosity (If Data datasets present) intLumi = GetLumi(datasetsMgr) # Update to PU datasetsMgr.updateNAllEventsToPUWeighted() # Remove datasets datasetsMgr.remove(kwargs.get("rmDataset")) # datasetsMgr.remove(filter(lambda name: not "QCD" in name, datasetsMgr.getAllDatasetNames())) # datasetsMgr.remove(filter(lambda name: "QCD" in name in name, datasetsMgr.getAllDatasetNames())) # Set custom XSections #datasetsMgr.getDataset("QCD_bEnriched_HT1000to1500").setCrossSection(1.0) #datasetsMgr.getDataset("QCD_bEnriched_HT1500to2000").setCrossSection(1.0) #datasetsMgr.getDataset("QCD_bEnriched_HT2000toInf").setCrossSection(1.0) #datasetsMgr.getDataset("QCD_bEnriched_HT300to500").setCrossSection(1.0) #datasetsMgr.getDataset("QCD_bEnriched_HT500to700").setCrossSection(1.0) #datasetsMgr.getDataset("QCD_bEnriched_HT700to1000").setCrossSection(1.0) # Default merging & ordering: "Data", "QCD", "SingleTop", "Diboson" plots.mergeRenameReorderForDataMC(datasetsMgr) # Remove datasets (for merged names) datasetsMgr.remove(kwargs.get("rmDataset")) # Print the cross datasetsMgr.PrintCrossSections() for h_prefix in ["reco", "gen"]: # Get ref histo here and the fixed histos (TT, QCD, QCD-b inclusiveHisto, fixedHistos = GetHistosForPlotter(datasetsMgr, h_prefix + "MET_Et", **kwargs) inclusiveHisto.setName("Inclusive") inclusiveHisto.setLegendLabel("Inclusive") #for hi in fixedHistos: # print(type(hi), hi.getName()) #return # For-loop: All Histogram names for counter, hName in enumerate(hNames): # Get the save path and name savePath, saveName = GetSavePathAndName(h_prefix + hName[0], **kwargs) saveName = savePath + h_prefix + hSaveNames[counter] # Get Histos for Plotter refHisto_, otherHistos_ = GetHistosForPlotter(datasetsMgr, h_prefix + hName[0], **kwargs) refHisto2, otherHistos2 = GetHistosForPlotter(datasetsMgr, h_prefix + hName[1], **kwargs) refHisto = inclusiveHisto # customize histos refHisto_.setName(hLegends[counter][0]) refHisto_.setLegendLabel(hLegends[counter][0]) refHisto_.setDrawStyle("P") refHisto_.setLegendStyle("P") styleDict[hStyles[0]].apply(refHisto_.getRootHisto()) refHisto2.setName(hLegends[counter][1]) refHisto2.setLegendLabel(hLegends[counter][1]) refHisto2.setDrawStyle("P") refHisto2.setLegendStyle("P") styleDict[hStyles[1]].apply(refHisto2.getRootHisto()) otherHistos = [refHisto_, refHisto2] #for hi in otherHistos: # print hi.getName() #return # Create a comparison plot p = plots.ComparisonManyPlot(refHisto, otherHistos) # Remove negative contributions #RemoveNegativeBins(datasetsMgr, hName, p) # Create a frame if kwargs.get("logY")==True: opts = {"ymin": 8e-5, "ymax": 2} #opts = {"ymin": 1e-3, "ymax": 1} else: opts = {"ymin": 8.e-5, "ymax": 2} ratioOpts = {"ymin": 0.1, "ymax": 10.0} p.createFrame(saveName, createRatio=kwargs.get("createRatio"), opts=opts, opts2=ratioOpts) # Customise Legend moveLegend = {"dx": -0.2, "dy": +0.0, "dh": -0.1} p.setLegend(histograms.moveLegend(histograms.createLegend(), **moveLegend)) #p.removeLegend() # Customise frame p.getFrame().GetYaxis().SetTitle( getTitleY(refHisto, **kwargs) ) #p.setEnergy("13") if kwargs.get("createRatio"): p.getFrame2().GetYaxis().SetTitle("Ratio") p.getFrame2().GetYaxis().SetTitleOffset(1.7) # SetLog SetLogAndGrid(p, **kwargs) # Add cut line/box _kwargs = { "lessThan": kwargs.get("cutLessThan")} p.addCutBoxAndLine(cutValue=kwargs.get("cutValue"), fillColor=kwargs.get("cutFillColour"), box=kwargs.get("cutBox"), line=kwargs.get("cutLine"), **_kwargs) # Move the refDataset to first in the draw order (back) histoNames = [h.getName() for h in p.histoMgr.getHistos()] p.histoMgr.reorder(filter(lambda n: plots._legendLabels[kwargs.get("refDataset") ] not in n, histoNames)) # Draw plots p.draw() # Customise text histograms.addStandardTexts(lumi=intLumi) # histograms.addText(0.4, 0.9, "Alexandros Attikis", 17) histograms.addText(0.2, 0.88, plots._legendLabels[kwargs.get("refDataset")], 17) # Save canvas under custom dir if not os.path.exists(savePath): os.mkdir(savePath) SaveAs(p, savePath, saveName, kwargs.get("saveFormats")) return
def __init__(self, opts, config, dirname, luminosity, observation, datasetGroups): plots._legendLabels["MCStatError"] = "Bkg. stat." plots._legendLabels["MCStatSystError"] = "Bkg. stat.#oplussyst." plots._legendLabels["BackgroundStatError"] = "Bkg. stat. unc" plots._legendLabels["BackgroundStatSystError"] = "Bkg. stat.#oplussyst. unc." if config.ControlPlots == None: return myStyle = tdrstyle.TDRStyle() myStyle.setOptStat(False) self._opts = opts self._config = config if config.OptionSqrtS == None: raise Exception(ShellStyles.ErrorLabel()+"Please set the parameter OptionSqrtS = <integer_value_in_TeV> in the config file!"+ShellStyles.NormalStyle()) self._dirname = dirname self._luminosity = luminosity self._observation = observation self._datasetGroups = datasetGroups #myEvaluator = SignalAreaEvaluator() # Make control plots print "\n"+ShellStyles.HighlightStyle()+"Generating control plots"+ShellStyles.NormalStyle() # Loop over mass points massPoints = [] massPoints.extend(self._config.MassPoints) massPoints.append(-1) # for plotting with no signal for m in massPoints: print "... mass = %d GeV"%m # Initialize flow plot selectionFlow = SelectionFlowPlotMaker(self._opts, self._config, m) myBlindedStatus = False for i in range(0,len(self._config.ControlPlots)): if observation.getControlPlotByIndex(i) != None: myCtrlPlot = self._config.ControlPlots[i] print "......", myCtrlPlot.title myMassSuffix = "_M%d"%m # Initialize histograms hSignal = None hQCD = None hQCDdata = None hEmbedded = None hEWKfake = None hData = None # Loop over dataset columns to find histograms myStackList = [] for c in self._datasetGroups: if (m < 0 or c.isActiveForMass(m,self._config)) and not c.typeIsEmptyColumn() and not c.getControlPlotByIndex(i) == None: h = c.getControlPlotByIndex(i)["shape"].Clone() if c.typeIsSignal(): #print "signal:",c.getLabel() # Scale light H+ signal if m < 179: if c.getLabel()[:2] == "HH": h.Scale(self._config.OptionBr**2) elif c.getLabel()[:2] == "HW": h.Scale(2.0*self._config.OptionBr*(1.0-self._config.OptionBr)) if hSignal == None: hSignal = h.Clone() else: hSignal.Add(h) elif c.typeIsQCDinverted(): if hQCDdata == None: hQCDdata = h.Clone() else: hQCDdata.Add(h) elif c.typeIsQCD(): if hQCD == None: hQCD = h.Clone() else: hQCD.Add(h) elif c.typeIsEWK(): #print "EWK genuine:",c.getLabel(),h.getRootHisto().Integral(0,h.GetNbinsX()+2) if not self._config.OptionGenuineTauBackgroundSource == "DataDriven": myHisto = histograms.Histo(h,c._datasetMgrColumn) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.append(myHisto) else: if hEmbedded == None: hEmbedded = h.Clone() else: hEmbedded.Add(h) elif c.typeIsEWKfake(): #print "EWK fake:",c.getLabel(),h.getRootHisto().Integral(0,h.GetNbinsX()+2) if hEWKfake == None: hEWKfake = h.Clone() else: hEWKfake.Add(h) if len(myStackList) > 0 or self._config.OptionGenuineTauBackgroundSource == "DataDriven": if hQCDdata != None: myHisto = histograms.Histo(hQCDdata,"QCDdata",legendLabel=_legendLabelQCDdata) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.insert(0, myHisto) elif hQCD != None: myHisto = histograms.Histo(hQCD,"QCDdata",legendLabel=_legendLabelQCD) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.insert(0, myHisto) if hEmbedded != None: myHisto = histograms.Histo(hEmbedded,"Embedding",legendLabel=_legendLabelEmbedding) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.append(myHisto) if hEWKfake != None: myHisto = histograms.Histo(hEWKfake,"EWKfakes",legendLabel=_legendLabelEWKFakes) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.append(myHisto) hData = observation.getControlPlotByIndex(i)["shape"].Clone() hDataUnblinded = hData.Clone() # Apply blinding myBlindingString = None if self._config.BlindAnalysis: if len(myCtrlPlot.blindedRange) > 0: myBlindingString = self._applyBlinding(hData,myCtrlPlot.blindedRange) if self._config.OptionBlindThreshold != None: for k in xrange(1, hData.GetNbinsX()+1): myExpValue = 0.0 for item in myStackList: myExpValue += item.getRootHisto().GetBinContent(k) if hSignal.getRootHisto().GetBinContent(k) >= myExpValue * self._config.OptionBlindThreshold: hData.getRootHisto().SetBinContent(k, -1.0) hData.getRootHisto().SetBinError(k, 0.0) # Data myDataHisto = histograms.Histo(hData,"Data") myDataHisto.setIsDataMC(isData=True, isMC=False) myStackList.insert(0, myDataHisto) # Add signal if m > 0: mySignalLabel = "TTToHplus_M%d"%m if m > 179: mySignalLabel = "HplusTB_M%d"%m myHisto = histograms.Histo(hSignal,mySignalLabel) myHisto.setIsDataMC(isData=False, isMC=True) myStackList.insert(1, myHisto) # Add data to selection flow plot #if myBlindedStatus: # selectionFlow.addColumn(myCtrlPlot.flowPlotCaption,None,myStackList[1:]) #else: selectionFlow.addColumn(myCtrlPlot.flowPlotCaption,hDataUnblinded,myStackList[1:]) if len(myCtrlPlot.blindedRange) > 0: myBlindedStatus = True else: myBlindedStatus = False # Make plot myStackPlot = None myParams = myCtrlPlot.details.copy() #if not isinstance(hData, ROOT.TH2): #for j in range(1,myStackList[0].getRootHisto().GetNbinsY()+1): #for i in range(1,myStackList[0].getRootHisto().GetNbinsX()+1): #mySum = 0.0 #for h in range(2, len(myStackList)): #mySum += myStackList[h].getRootHisto().GetBinContent(i,j) #if mySum > 0.0: #myStackList[0].getRootHisto().SetBinContent(i,j,myStackList[0].getRootHisto().GetBinContent(i,j) / mySum) #else: #myStackList[0].getRootHisto().SetBinContent(i,j,-10.0) #myStackList[0].getRootHisto().SetMinimum(-1.0) #myStackList[0].getRootHisto().SetMaximum(1.0) #myStackList = [myStackList[0]] #myStackPlot = plots.PlotBase(myStackList) #if "ylabelBinInfo" in myParams: #del myParams["ylabelBinInfo"] #del myParams["unit"] #drawPlot2D(myStackPlot, "%s/DataDrivenCtrlPlot_M%d_%02d_%s"%(self._dirname,m,i,myCtrlPlot.title), **myParams) myStackPlot = plots.DataMCPlot2(myStackList) myStackPlot.setLuminosity(self._luminosity) myStackPlot.setEnergy("%d"%self._config.OptionSqrtS) myStackPlot.setDefaultStyles() # Tweak paramaters if not "unit" in myParams.keys(): myParams["unit"] = "" if myParams["unit"] != "": myParams["xlabel"] = "%s (%s)"%(myParams["xlabel"],myParams["unit"]) ylabelBinInfo = True if "ylabelBinInfo" in myParams: ylabelBinInfo = myParams["ylabelBinInfo"] del myParams["ylabelBinInfo"] if ylabelBinInfo: myMinWidth = 10000.0 myMaxWidth = 0.0 for j in range(1,hData.getRootHisto().GetNbinsX()+1): w = hData.getRootHisto().GetBinWidth(j) if w < myMinWidth: myMinWidth = w if w > myMaxWidth: myMaxWidth = w myWidthSuffix = "" myMinWidthString = "%d"%myMinWidth myMaxWidthString = "%d"%myMaxWidth if myMinWidth < 1.0: myFormat = "%%.%df"%(abs(int(log10(myMinWidth)))+1) myMinWidthString = myFormat%myMinWidth if myMaxWidth < 1.0: myFormat = "%%.%df"%(abs(int(log10(myMaxWidth)))+1) myMaxWidthString = myFormat%myMaxWidth myWidthSuffix = "%s-%s"%(myMinWidthString,myMaxWidthString) if abs(myMinWidth-myMaxWidth) < 0.001: myWidthSuffix = "%s"%(myMinWidthString) if not (myParams["unit"] == "" and myWidthSuffix == "1"): myParams["ylabel"] = "%s / %s %s"%(myParams["ylabel"],myWidthSuffix,myParams["unit"]) if myBlindingString != None: if myParams["unit"] != "" and myParams["unit"][0] == "^": myParams["blindingRangeString"] = "%s%s"%(myBlindingString, myParams["unit"]) else: myParams["blindingRangeString"] = "%s %s"%(myBlindingString, myParams["unit"]) if "legendPosition" in myParams.keys(): if myParams["legendPosition"] == "NE": myParams["moveLegend"] = {"dx": -0.10, "dy": -0.02} elif myParams["legendPosition"] == "SE": myParams["moveLegend"] = {"dx": -0.10, "dy": -0.56} elif myParams["legendPosition"] == "SW": myParams["moveLegend"] = {"dx": -0.53, "dy": -0.56} elif myParams["legendPosition"] == "NW": myParams["moveLegend"] = {"dx": -0.53, "dy": -0.02} else: raise Exception("Unknown value for option legendPosition: %s!", myParams["legendPosition"]) del myParams["legendPosition"] elif not "moveLegend" in myParams: myParams["moveLegend"] = {"dx": -0.10, "dy": -0.02} # default: NE if "ratioLegendPosition" in myParams.keys(): if myParams["ratioLegendPosition"] == "left": myParams["ratioMoveLegend"] = {"dx": -0.51, "dy": 0.03} elif myParams["ratioLegendPosition"] == "right": myParams["ratioMoveLegend"] = {"dx": -0.08, "dy": 0.03} elif myParams["ratioLegendPosition"] == "SE": myParams["ratioMoveLegend"] = {"dx": -0.08, "dy": -0.33} else: raise Exception("Unknown value for option ratioLegendPosition: %s!", myParams["ratioLegendPosition"]) del myParams["ratioLegendPosition"] else: if not "ratioMoveLegend" in myParams: myParams["ratioMoveLegend"] = {"dx": -0.51, "dy": 0.03} # default: left # Remove non-dientified keywords del myParams["unit"] # Ratio axis if not "opts2" in myParams.keys(): myParams["opts2"] = {"ymin": 0.3, "ymax": 1.7} # Do plotting if m > 0: drawPlot(myStackPlot, "%s/DataDrivenCtrlPlot_M%d_%02d_%s"%(self._dirname,m,i,myCtrlPlot.title), **myParams) else: drawPlot(myStackPlot, "%s/DataDrivenCtrlPlot_%02d_%s"%(self._dirname,i,myCtrlPlot.title), **myParams) # Do selection flow plot selectionFlow.makePlot(self._dirname,m,len(self._config.ControlPlots),self._luminosity) #myEvaluator.save(dirname) print "Control plots done"
def main(hName, opts): # Setup the style style = tdrstyle.TDRStyle() # Set ROOT batch mode boolean ROOT.gROOT.SetBatch(opts.batchMode) # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts.mcrab, opts, **kwargs) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Set/Overwrite cross-sections for d in datasetsMgr.getAllDatasets(): if "ChargedHiggs" in d.getName(): datasetsMgr.getDataset(d.getName()).setCrossSection(1.0) # Merge datasets: All JetHT to "Data", QCD_Pt to "QCD", QCD_bEnriched to "QCD_b", single-top to "SingleTop", WW, WZ, ZZ to "Diboson" plots.mergeRenameReorderForDataMC(datasetsMgr) # Remove datasets # datasetsMgr.remove("QCD-b") # datasetsMgr.remove("QCD") # Print dataset information datasetsMgr.PrintInfo() # Create data-MC comparison plot, with the default p = plots.DataMCPlot(datasetsMgr, hName) # Create a comparison plot ratioOpts = {"ymin": 0.0, "ymax": 2.0} if kwargs.get("logY") == True: #canvOpts = {"xmin": 0.0, "xmax": 50.0, "ymin": 1e-1, "ymaxfactor": 10} canvOpts = {"xmin": 0.0, "ymin": 1e-1, "ymaxfactor": 10} else: canvOpts = {"ymin": 0.0, "ymaxfactor": 1.2} # Draw a customised plot & Save it plots.drawPlot( p, "Plots/" + hName.replace("/", "_").replace(" ", "_").replace( "(", "_").replace(")", ""), xlabel=kwargs.get("xlabel"), ylabel=kwargs.get("ylabel"), rebinX=kwargs.get("rebinX"), rebinY=kwargs.get("rebinY"), xlabelsize=kwargs.get("xlabelsize"), ratio=kwargs.get("ratio"), stackMCHistograms=kwargs.get("stackMCHistograms"), ratioYlabel=kwargs.get("ratioYlabel"), ratioInvert=kwargs.get("ratioInvert"), addMCUncertainty=kwargs.get("addMCUncertainty"), addLuminosityText=kwargs.get("addLuminosityText"), addCmsText=kwargs.get("addCmsText"), opts=canvOpts, opts2=ratioOpts, log=kwargs.get("logY"), errorBarsX=kwargs.get("errorBarsX"), cmsExtraText=kwargs.get("cmsExtraText"), moveLegend=kwargs.get("moveLegend"), #cutLine=kwargs.get("cutValue"), cutBox={ "cutValue": kwargs.get("cutValue"), "fillColor": kwargs.get("cutFillColour"), "box": kwargs.get("cutBox"), "line": kwargs.get("cutLine"), "lessThan": kwargs.get("cutLessthan") }, ) # Remove legend? if kwargs.get("removeLegend"): p.removeLegend() # Additional text # histograms.addText(0.4, 0.9, "Alexandros Attikis", 17) # histograms.addText(0.4, 0.11, "Runs " + datasetsMgr.loadRunRange(), 17) if not opts.batchMode: raw_input("=== plotControlPlots.py:\n\tPress any key to quit ROOT ...") return
def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setGridX(False) style.setGridY(False) optModes = [""] # For-loop: All opt Mode for opt in optModes: opts.optMode = opt # Numerator & Denominator dataset manager noSF_datasetsMgr = GetDatasetsFromDir(opts, opts.noSFcrab) withCR1SF_datasetsMgr = GetDatasetsFromDir(opts, opts.withCR1SFcrab) withCR2SF_datasetsMgr = GetDatasetsFromDir(opts, opts.withCR2SFcrab) # Update all events to PU weighting noSF_datasetsMgr.updateNAllEventsToPUWeighted() withCR1SF_datasetsMgr.updateNAllEventsToPUWeighted() withCR2SF_datasetsMgr.updateNAllEventsToPUWeighted() # Load Luminosities noSF_datasetsMgr.loadLuminosities() withCR1SF_datasetsMgr.loadLuminosities() withCR2SF_datasetsMgr.loadLuminosities() if 0: noSF_datasetsMgr.PrintCrossSections() noSF_datasetsMgr.PrintLuminosities() # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(noSF_datasetsMgr) plots.mergeRenameReorderForDataMC(withCR1SF_datasetsMgr) plots.mergeRenameReorderForDataMC(withCR2SF_datasetsMgr) # Get luminosity if a value is not specified if opts.intLumi < 0: opts.intLumi = noSF_datasetsMgr.getDataset("Data").getLuminosity() # Remove datasets removeList = [] #removeList = ["TTWJetsToLNu_", "TTWJetsToQQ"] for i, d in enumerate(removeList, 0): msg = "Removing dataset %s" % d Print(ShellStyles.WarningLabel() + msg + ShellStyles.NormalStyle(), i == 0) noSF_datasetsMgr.remove( filter(lambda name: d in name, noSF_datasetsMgr.getAllDatasetNames())) withCR1SF_datasetsMgr.remove( filter(lambda name: d in name, withCR1SF_datasetsMgr.getAllDatasetNames())) # Print summary of datasets to be used if 0: noSF_datasetsMgr.PrintInfo() withCR1SF_datasetsMgr.PrintInfo() withCR2SF_datasetsMgr.PrintInfo() # Merge EWK samples EwkDatasets = ["Diboson", "DYJetsToLL", "WJetsHT"] noSF_datasetsMgr.merge("EWK", EwkDatasets) withCR1SF_datasetsMgr.merge("EWK", EwkDatasets) withCR2SF_datasetsMgr.merge("EWK", EwkDatasets) # Get histosgram names folderListIncl = withCR1SF_datasetsMgr.getDataset( withCR1SF_datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent( opts.folder) folderList = [ h for h in folderListIncl if "AfterAllSelections_LeadingTrijet_Pt" in h ] # For-loop: All histo paths for h in folderList: if "lowMET" in h: folderList.remove(h) folderPath = os.path.join(opts.folder, "") folderPathGen = os.path.join(opts.folder + "Genuine") folderPathFake = os.path.join(opts.folder + "Fake") histoList = folderList num_pathList = [os.path.join(folderPath, h) for h in histoList] num_pathList.extend( [os.path.join(folderPathGen, h) for h in histoList]) num_pathList.extend( [os.path.join(folderPathFake, h) for h in histoList]) # Denominator Histogram (To be used in the estimation of QCD Data-Driven) histoList = [ h for h in folderListIncl if "AfterStandardSelections_LeadingTrijet_Pt" in h ] den_pathList = [os.path.join(folderPath, h) for h in histoList] den_pathList.extend( [os.path.join(folderPathGen, h) for h in histoList]) den_pathList.extend( [os.path.join(folderPathFake, h) for h in histoList]) # For-loop: All histo paths for h in den_pathList: if "lowMET" in h: den_pathList.remove(h) # Do the histograms PlotHistos(noSF_datasetsMgr, withCR1SF_datasetsMgr, withCR2SF_datasetsMgr, num_pathList, den_pathList, opts) return
def main(opts): # Apply TDR style style = tdrstyle.TDRStyle() style.setOptStat(True) style.setGridX(True) style.setGridY(True) # Obtain dsetMgrCreator and register it to module selector dsetMgrCreator = dataset.readFromMulticrabCfg(directory=opts.mcrab) # Get list of eras, modes, and optimisation modes erasList = dsetMgrCreator.getDataEras() modesList = dsetMgrCreator.getSearchModes() optList = dsetMgrCreator.getOptimizationModes() sysVarList = dsetMgrCreator.getSystematicVariations() sysVarSrcList = dsetMgrCreator.getSystematicVariationSources() # If user does not define optimisation mode do all of them if opts.optMode == None: if len(optList) < 1: optList.append("") optModes = optList else: optModes = [opts.optMode] # For-loop: All opt Mode for opt in optModes: opts.optMode = opt # Setup & configure the dataset manager datasetsMgr = GetDatasetsFromDir(opts) datasetsMgr.updateNAllEventsToPUWeighted() datasetsMgr.loadLuminosities() # from lumi.json if opts.verbose: datasetsMgr.PrintCrossSections() datasetsMgr.PrintLuminosities() # Get the PSets: if 0: datasetsMgr.printSelections() #PrintPSet("BJetSelection", datasetsMgr, depth=150) # ZJets and DYJets overlap! if "ZJetsToQQ_HT600toInf" in datasetsMgr.getAllDatasetNames( ) and "DYJetsToQQ_HT180" in datasetsMgr.getAllDatasetNames(): Print( "Cannot use both ZJetsToQQ and DYJetsToQQ due to duplicate events? Investigate. Removing ZJetsToQQ datasets for now ..", True) datasetsMgr.remove( filter(lambda name: "ZJetsToQQ" in name, datasetsMgr.getAllDatasetNames())) # Merge histograms (see NtupleAnalysis/python/tools/plots.py) plots.mergeRenameReorderForDataMC(datasetsMgr) # Get luminosity if a value is not specified if opts.intLumi < 0: opts.intLumi = datasetsMgr.getDataset("Data").getLuminosity() # Remove datasets removeList = ["QCD-b", "Charged"] if not opts.useMC: removeList.append("QCD") for i, d in enumerate(removeList, 0): msg = "Removing dataset %s" % d Verbose( ShellStyles.WarningLabel() + msg + ShellStyles.NormalStyle(), i == 0) datasetsMgr.remove( filter(lambda name: d in name, datasetsMgr.getAllDatasetNames())) # Print summary of datasets to be used if 0: datasetsMgr.PrintInfo() # Merge EWK samples datasetsMgr.merge("EWK", aux.GetListOfEwkDatasets()) # Print dataset information datasetsMgr.PrintInfo() # List of TDirectoryFile (_CRone, _CRtwo, _VR, _SR) tdirs = [ "LdgTrijetPt_", "LdgTrijetMass_", "LdgTrijetBJetBdisc_", "TetrajetBJetPt_", "TetrajetBJetEta_", "TetrajetBJetBdisc_", "LdgTetrajetPt_", "LdgTetrajetMass_" ] region = ["CRone", "CRtwo"] hList = [] for d in tdirs: for r in region: hList.append(d + r) # Get the folders with the binned histograms folderList_ = datasetsMgr.getDataset( datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent( opts.folder) folderList = [h for h in folderList_ if h in hList] # For-loop: All folders histoPaths = [] for f in folderList: folderPath = os.path.join(opts.folder, f) histoList = datasetsMgr.getDataset( datasetsMgr.getAllDatasetNames()[0]).getDirectoryContent( folderPath) pathList = [os.path.join(folderPath, h) for h in histoList] histoPaths.extend(pathList) # Get all the bin labels binLabels = GetBinLabels("CRone", histoPaths) for i, t in enumerate(tdirs, 1): myList = [] for p in histoPaths: if t in p: myList.append(p) msg = "{:<9} {:>3} {:<1} {:<3} {:<50}".format( "Histogram", "%i" % i, "/", "%s:" % (len(tdirs)), t.replace("_", "")) Print(ShellStyles.SuccessStyle() + msg + ShellStyles.NormalStyle(), i == 1) PlotHistograms(datasetsMgr, myList, binLabels, opts) # Save the plots Print( "All plots saved under directory %s" % (ShellStyles.NoteStyle() + aux.convertToURL(opts.saveDir, opts.url) + ShellStyles.NormalStyle()), True) return
def main(): style = tdrstyle.TDRStyle() #style.setWide(True) style.setPaletteMy() ROOT.gStyle.SetNumberContours(20) # tdrstyle.setDeepSeaPalette() # tdrstyle.setRainBowPalette() # tdrstyle.setDarkBodyRadiatorPalette() # tdrstyle.setGreyScalePalette() # tdrstyle.setTwoColorHuePalette() # Set ROOT batch mode boolean ROOT.gROOT.SetBatch(parseOpts.batchMode) # Get all datasets from the mcrab dir datasetsMgr = GetDatasetsFromDir(parseOpts.mcrab, kwargs.get("analysis")) # Determine Integrated Luminosity (If Data datasets present) intLumi = GetLumi(datasetsMgr) # Update to PU datasetsMgr.updateNAllEventsToPUWeighted() # Remove datasets datasetsMgr.remove(kwargs.get("rmDataset")) # datasetsMgr.remove(filter(lambda name: not kwargs.get("refDataset") in name, datasetsMgr.getAllDatasetNames())) # Set custom XSections # d.getDataset("TT_ext3").setCrossSection(831.76) # Default merging & ordering: "Data", "QCD", "SingleTop", "Diboson" plots.mergeRenameReorderForDataMC( datasetsMgr ) #WARNING: Merged MC histograms must be normalized to something! # Remove datasets (for merged names) datasetsMgr.remove(kwargs.get("rmDataset")) # For-loop: All Histogram names for hName in hNames: savePath, saveName = GetSavePathAndName(hName, **kwargs) # Get Histos for Plotter refHisto, otherHistos = GetHistosForPlotter(datasetsMgr, hName, **kwargs) # Create a plot p = plots.PlotBase([refHisto], kwargs.get("saveFormats")) # Remove negative contributions #RemoveNegativeBins(datasetsMgr, hName, p) # Customize # p.histoMgr.setHistoDrawStyleAll("COL") #"CONT4" "COLZ" "COL" # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().RebinX(kwargs.get("rebinX"))) # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().RebinY(kwargs.get("rebinY"))) # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().GetXaxis().SetRangeUser(1.0, 5.0)) # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().GetYaxis().SetRangeUser(1.0, 5.0)) # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().GetZaxis().SetRangeUser(0.0, 0.015)) # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().SetMinimum(kwargs.get("zMin"))) # p.histoMgr.forEachHisto(lambda h: h.getRootHisto().SetMaximum(kwargs.get("zMax"))) # Create a frame opts = {"ymin": 0.0, "ymaxfactor": 1.0} p.createFrame(saveName, opts=opts) # Customise frame p.getFrame().GetXaxis().SetTitle(getTitleX(refHisto, **kwargs)) p.getFrame().GetYaxis().SetTitle(getTitleY(refHisto, **kwargs)) # p.getFrame().GetZaxis().SetTitle( getTitleZ(refHisto, **kwargs) ) #does not work # SetLog SetLogAndGrid(p, **kwargs) # Add cut line/box _kwargs = {"lessThan": kwargs.get("cutLessThan")} p.addCutBoxAndLine(cutValue=kwargs.get("cutValue"), fillColor=kwargs.get("cutFillColour"), box=kwargs.get("cutBox"), line=kwargs.get("cutLine"), **_kwargs) # Customise Legend moveLegend = {"dx": -0.1, "dy": +0.0, "dh": -0.1} p.setLegend( histograms.moveLegend(histograms.createLegend(), **moveLegend)) p.removeLegend() # Add MC Uncertainty (Invalid method for a 2-d histogram) #p.addMCUncertainty() # Draw plots p.draw() # Customise text histograms.addStandardTexts(lumi=intLumi) histograms.addText(0.16, 0.95, plots._legendLabels[kwargs.get("refDataset")], 22) #histograms.addText(0.73, 0.88, plots._legendLabels[kwargs.get("refDataset")], 17) # Save canvas under custom dir SaveAs(p, savePath, saveName, kwargs.get("saveFormats")) return