ddSample = Sample("DataDriven",kGreen+2) ddSample.addShapeFactor("DDShape") sigSample = Sample("Sig",kPink) sigSample.setNormFactor("mu_Sig",1.,0.2,1.5) sigSample.buildHisto(nSigSR,"SR","cuts",0.5) sigSample.setNormByTheory(True) sigSample.addSystematic(sigxsec) dataSample = Sample("Data",kBlack) dataSample.setData() dataSample.buildHisto(nDataCR,"CR","cuts",0.5) dataSample.buildHisto(nDataSR,"SR","cuts",0.5) # Define top-level ana = configMgr.addFitConfig("SPlusB") ana.addSamples([bkgSample,ddSample,dataSample]) # Define measurement meas = ana.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039) meas.addPOI("mu_Sig") meas.addParamSetting("Lumi",True) # Add the channels chanCR = ana.addChannel("cuts",["CR"],2,0.5,2.5) chanSR = ana.addChannel("cuts",["SR"],2,0.5,2.5) chanSR.addSample(sigSample) chanSR.getSample("DataDriven").addSystematic(xtrap) ana.addBkgConstrainChannels([chanCR])
configMgr.weights = ["eventweight"] ############################################################## ## Set up the systematics ## ############################################################## userPrint("Configuring systematic") configMgr.nomName = "_CENTRAL" sysObj = SystematicObject(configMgr, userOpts.doShape, userOpts.splitMCSys) ############################################################## ## Set up Samples and normalization factors ## ############################################################## userPrint("Setting up samples, norm factors, and systematics") # specify the parameter of interest tlx = configMgr.addFitConfig("TopLvlXML") meas = tlx.addMeasurement(name="NormalMeasurement", lumi=1., lumiErr=0.028) meas.addPOI("mu_SIG") ## EXCL: mu_SIG, upper limi table # determine if we should use stat useStat = True if userOpts.splitMCSys : useStat = False # If using stat set some limits tlx.statErrThreshold = 0.001 # define quantities to make configuration below easier SR = userOpts.signalRegion lepChan = userOpts.leptonChannel
#Systematics discoveryFitConfig.getSample("Top").addSystematic(topKtScale) discoveryFitConfig.getSample("WZ").addSystematic(wzKtScale) discoveryFitConfig.addSystematic(jes) #Channel srBin = discoveryFitConfig.addChannel("cuts",["SR"],1,0.5,1.5) discoveryFitConfig.setSignalChannels([srBin]) srBin.addDiscoverySamples(["Discovery"],[1.],[0.],[10000.],[kMagenta]) #************** # Exclusion fit #************** if myFitType==FitType.Exclusion: # Fit config instance exclusionFitConfig = configMgr.addFitConfig("Exclusion") meas=exclusionFitConfig.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039) meas.addPOI("mu_SIG") # Samples exclusionFitConfig.addSamples([topSample,wzSample,dataSample]) # Systematics exclusionFitConfig.getSample("Top").addSystematic(topKtScale) exclusionFitConfig.getSample("WZ").addSystematic(wzKtScale) exclusionFitConfig.addSystematic(jes) # Channel srBin = exclusionFitConfig.addChannel("cuts",["SR"],1,0.5,1.5) exclusionFitConfig.setSignalChannels([srBin])
print "background only fit not implemented" #************** # Discovery fit #************** if myFitType == FitType.Discovery: print "discovery fit not implemented" #************** # Exclusion fit #************** if myFitType == FitType.Exclusion: from fitConfig import fitConfig exclusionFitConfig = configMgr.addFitConfig("ExclusionTemplate") meas = exclusionFitConfig.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=0.029) meas.addPOI("mu_SIG") # Set stuff common to all fitConfigs exclusionFitConfig.addSamples([ wgammaSample, dibosonSample, tribosonSample, topXSample, higgsSample, qFlipSample, fLepSample, dataSample ]) #exclusionFitConfig.addSamples([wgammaSample,dibosonSample,tribosonSample,topXSample,higgsSample,dataSample]) #exclusionFitConfig.addSamples([dibosonSample,ttbarSample,cflipSample,fakelepSample,dataSample]) exclusionFitConfig.setTreeName("BDT_PP1_evt2l") cutName = "CR_%s_dm%s" % (options.channel, options.dm)
def process(self, cut): configMgr.analysisName = "MonoJet_"+str(cut)+"GeV" in_file_path = "data/"+configMgr.analysisName+".root" configMgr.histBackupCacheFile = "data/"+configMgr.analysisName+"_template.root" configMgr.outputFileName = "results/%s_Output.root"%configMgr.analysisName # define unnecessary cuts configMgr.cutsDict["SR"] = "1." configMgr.cutsDict["WMUNU"] = "1." configMgr.cutsDict["WENU"] = "1." configMgr.cutsDict["ZMM"] = "1." configMgr.weights = "1." yield_mgr = SampleHist(cut) # get systematics yield_mgr.create_common_sys() # create MC samples yield_mgr.create_samples() # define background-only bkg = configMgr.addFitConfig("BkgOnly") if self.use_stat: bkg.statErrThreshold = 0.05 else: bkg.statErrThreshold = None all_samples = yield_mgr.bkg_samples[:] all_samples.append(yield_mgr.data_sample) bkg.addSamples(all_samples) meas = bkg.addMeasurement(name="Normal", lumi=1.0, lumiErr=0.039) meas.addPOI("mu") meas.addParamSetting("Lumi", True, 1) constraint_chan = [] for region in regions: if "SR" in region: continue cstr_ch = bkg.addChannel("cuts", [region], 1, 0.5,1.5) cstr_ch.minY = 1e-4 cstr_ch.maxY = 400 constraint_chan.append(cstr_ch) bkg.setBkgConstrainChannels(constraint_chan) #Discovery if myFitType == FitType.Discovery: discovery = configMgr.addFitConfigClone(bkg, "Discovery") discovery.addSamples(yield_mgr.signal_sample) discovery.setSignalSample(yield_mgr.signal_sample) sig_channel = discovery.addChannel("cuts",["SR"], 1, 0.5, 1.5) sig_channel.minY = 1e-4 sig_channel.maxY = 2500 discovery.setSignalChannels([sig_channel]) if myFitType == FitType.Exclusion: print "In exclusion mode" exclusion = configMgr.addFitConfigClone(bkg, "Exclusion") exclusion.addSamples(yield_mgr.signal_sample) exclusion.setSignalSample(yield_mgr.signal_sample) sig_channel = exclusion.addChannel("cuts",["SR"], 1, 0.5, 1.5) sig_channel.minY = 1e-4 sig_channel.maxY = 2500 exclusion.setSignalChannels([sig_channel]) if configMgr.executeHistFactory: pass
ddSample = Sample("DataDriven",kGreen+2) ddSample.addShapeFactor("DDShape") sigSample = Sample("Sig",kPink) sigSample.setNormFactor("mu_Sig",1.,0.2,1.5) sigSample.buildHisto(nSigSR,"SR","cuts",0.5) sigSample.setNormByTheory(True) sigSample.addSystematic(sigxsec) dataSample = Sample("Data",kBlack) dataSample.setData() dataSample.buildHisto(nDataCR,"CR","cuts",0.5) dataSample.buildHisto(nDataSR,"SR","cuts",0.5) # Define top-level ana = configMgr.addFitConfig("SPlusB") ana.addSamples([bkgSample,ddSample,dataSample]) # Define measurement meas = ana.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039) meas.addPOI("mu_Sig") meas.addParamSetting("Lumi",True) # Add the channels chanCR = ana.addChannel("cuts",["CR"],2,0.5,2.5) chanSR = ana.addChannel("cuts",["SR"],2,0.5,2.5) chanSR.addSample(sigSample) chanSR.getSample("DataDriven").addSystematic(xtrap) ana.setBkgConstrainChannels([chanCR])
userPrint("Configuring Systematic") # Add all sys to SystematicObject # where you can access the systematics # through this object when initializing # the backgrounds sysObj = SystematicObject(configMgr, userOpts.doShape, userOpts.splitMCSys) configMgr.nomName = "_CENTRAL" ############################################################# ## Setting up Samples and normalization factors # ############################################################# userPrint("Setting up samples and norm factors") # Specify the paramater of interest tlx = configMgr.addFitConfig("TopLvlXML") meas = tlx.addMeasurement(name="NormalMeasurement",lumi=1.,lumiErr=0.028) meas.addPOI("mu_SIG") ## EXCL:mu_SIG, upper limit table # Determine if we should use stat useStat = True if userOpts.splitMCSys: useStat = False # If using stat set some limits tlx.statErrThreshold = 0.001 # Now specify the samples based on if 2lep is set # I am beginning to think the flag is useless? # Define two quantities useful for configuring
configMgr.histBackupCacheFile = "data/ZL_%s_Background/ZL_%s_Background.root" % (anaName, anaName) configMgr.useHistBackupCacheFile = True log.info("setting configMgr.histBackupCacheFile to %s" % configMgr.histBackupCacheFile) for point in allpoints: if point == "": continue # Fit config instance name = "Fit_%s_%s" % (grid, point) if grid == "SM_SS_direct_compressedPoints": name = "Fit_SM_SS_direct_%s" % (point) myFitConfig = configMgr.addFitConfig(name) myFitConfig.statErrThreshold = zlFitterConfig.statErrThreshold meas = myFitConfig.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=zlFitterConfig.luminosityEr) meas.addPOI("mu_SIG") #------------------------------------------------- # Fix parameters #------------------------------------------------- # fix diboson to MC prediction meas.addParamSetting("mu_"+zlFitterConfig.dibosonSampleName, True, 1) # fix Lumi if not exclusion fit if myFitType != FitType.Exclusion: meas.addParamSetting("Lumi", True, zlFitterConfig.luminosity)
if do_mc_syst: photonjet_sample.addSystematic(syst_gamjet_theo_all) # if not do_detector_syst and not do_dd_syst and not do_mc_syst: # ucb = Systematic("uncorrl_bkg", None, 1+0.30, 1-0.30, "user", "userOverallSys") # 30% error up and down # for sample in bkg_samples: # sample.addSystematic(ucb) #--------- # Fit #--------- # Background only fit if myFitType == FitType.Background: fitconfig = configMgr.addFitConfig('BkgOnlyFit') # Discovery fit elif myFitType == FitType.Discovery: fitconfig = configMgr.addFitConfig('DiscoveryFit') # Exclusion fit elif myFitType == FitType.Exclusion: fitconfig = configMgr.addFitConfig('ExclusionFit') fitconfig.addSamples(bkg_samples + data_samples) # Stat uncertainties fitconfig.statErrThreshold = 0.01 #fitconfig.statStatErrorType('Poisson')
commonSamples = [allbkgSample,dataSample] configMgr.plotColours = [kGreen,kBlack] ## Parameters of the Measurement measName = "BasicMeasurement" measLumi = 1. measLumiError = 0.039 ## Parameters of Channels cutsRegions = ["SR"] cutsNBins = 1 cutsBinLow = 0.0 cutsBinHigh = 1.0 ## Bkg-only fit bkgOnly = configMgr.addFitConfig("SimpleChannel_BkgOnly") bkgOnly.statErrThreshold=None #0.5 bkgOnly.addSamples(commonSamples) bkgOnly.addSystematic(jes) meas = bkgOnly.addMeasurement(measName,measLumi,measLumiError) meas.addPOI("mu_SIG") cutsChannel = bkgOnly.addChannel("cuts",cutsRegions,cutsNBins,cutsBinLow,cutsBinHigh) ## Discovery fit #discovery = configMgr.adFitConfigClone(bkgOnly,"SimpleChannel_Discovery") #discovery.clearSystematics() #sigSample = Sample("discoveryMode",kBlue) #sigSample.setNormFactor("mu_SIG",1.0, 0.0, 5.0) #sigSample.setNormByTheory() #discovery.addSamples(sigSample) #discovery.setSignalSample(sigSample)
def common_setting(mass): from configManager import configMgr from ROOT import kBlack, kGray, kRed, kPink, kViolet, kBlue, kAzure, kGreen, \ kOrange from configWriter import Sample from systematic import Systematic import os color_dict = { "Zbb": kAzure, "Zbc": kAzure, "Zbl": kAzure, "Zcc": kAzure, "Zcl": kBlue, "Zl": kBlue, "Wbb": kGreen, "Wbc": kGreen, "Wbl": kGreen, "Wcc": kGreen, "Wcl": kGreen, "Wl": kGreen, "ttbar": kOrange, "stop": kOrange, "stopWt": kOrange, "ZZPw": kGray, "WZPw": kGray, "WWPw": kGray, "fakes": kPink, "Zjets": kAzure, "Wjets": kGreen, "top": kOrange, "diboson": kGray, "$Z\\tau\\tau$+HF": kAzure, "$Z\\tau\\tau$+LF": kBlue, "$W$+jets": kGreen, "$Zee$": kViolet, "Zhf": kAzure, "Zlf": kBlue, "Zee": kViolet, "others": kViolet, signal_prefix + "1000": kRed, signal_prefix + "1100": kRed, signal_prefix + "1200": kRed, signal_prefix + "1400": kRed, signal_prefix + "1600": kRed, signal_prefix + "1800": kRed, signal_prefix + "2000": kRed, signal_prefix + "2500": kRed, signal_prefix + "3000": kRed, # Add your new processes here "VH": kGray + 2, "VHtautau": kGray + 2, "ttH": kGray + 2, } ########################## # Setting the parameters of the hypothesis test configMgr.doExclusion = True # True=exclusion, False=discovery configMgr.nTOYs = 10000 # default=5000 configMgr.calculatorType = 0 # 2=asymptotic calculator, 0=frequentist calculator configMgr.testStatType = 3 # 3=one-sided profile likelihood test statistic (LHC default) configMgr.nPoints = 30 # number of values scanned of signal-strength for upper-limit determination of signal strength. configMgr.writeXML = False configMgr.seed = 40 configMgr.toySeedSet = True configMgr.toySeed = 400 # Pruning # - any overallSys systematic uncertainty if the difference of between the up variation and the nominal and between # the down variation and the nominal is below a certain (user) given threshold # - for histoSys types, the situation is more complex: # - a first check is done if the integral of the up histogram - the integral of the nominal histogram is smaller # than the integral of the nominal histogram and the same for the down histogram # - then a second check is done if the shape of the up, down and nominal histograms is very similar Only when both # conditions are fulfilled the systematics will be removed. # default is False, so the pruning is normally not enabled configMgr.prun = True # The threshold to decide if an uncertainty is small or not is set by configMgr.prunThreshold = 0.005 # where the number gives the fraction of deviation with respect to the nominal histogram below which an uncertainty # is considered to be small. The default is currently set to 0.01, corresponding to 1 % (This might be very aggressive # for the one or the other analyses!) configMgr.prunThreshold = 0.005 # method 1: a chi2 test (this is still a bit experimental, so watch out if this is working or not) # method 2: checking for every bin of the histograms that the difference between up variation and nominal and down (default) configMgr.prunMethod = 2 # variation and nominal is below a certain threshold. # Smoothing: HistFitter does not provide any smoothing tools. # More Details: https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/HistFitterAdvancedTutorial#Pruning_in_HistFitter ########################## # Keep SRs also in background fit confuguration configMgr.keepSignalRegionType = True configMgr.blindSR = BLIND # Give the analysis a name configMgr.analysisName = "bbtautau" + "X" + mass configMgr.histCacheFile = "data/" + configMgr.analysisName + ".root" configMgr.outputFileName = "results/" + configMgr.analysisName + "_Output.root" # Define cuts configMgr.cutsDict["SR"] = "1." # Define weights configMgr.weights = "1." # Define samples list_samples = [] yields_mass = yields[mass] for process, yields_process in yields_mass.items(): if process == 'data' or signal_prefix in process: continue # print("-> {} / Colour: {}".format(process, color_dict[process])) bkg = Sample(str(process), color_dict[process]) bkg.setStatConfig(stat_config) # OLD: add lumi uncertainty (bkg/sig correlated, not for data-driven fakes) # NOW: add lumi by hand bkg.setNormByTheory(False) noms = yields_process["nEvents"] errors = yields_process["nEventsErr"] if use_mcstat else [0.0] # print(" nEvents (StatError): {} ({})".format(noms, errors)) bkg.buildHisto(noms, "SR", my_disc, 0.5) bkg.buildStatErrors(errors, "SR", my_disc) if not stat_only and not no_syst: if process == 'fakes': key_here = "ATLAS_FF_1BTAG_SIDEBAND_Syst_hadhad" if not impact_check_continue(dict_syst_check, key_here): bkg.addSystematic( Systematic(key_here, configMgr.weights, 1.50, 0.50, "user", syst_type)) else: key_here = "ATLAS_Lumi_Run2_hadhad" if not impact_check_continue(dict_syst_check, key_here): bkg.addSystematic( Systematic(key_here, configMgr.weights, 1.017, 0.983, "user", syst_type)) for key, values in yields_process.items(): if 'ATLAS' not in key: continue if impact_check_continue(dict_syst_check, key): continue # this should not be applied on the Sherpa if process == 'Zhf' and key == 'ATLAS_DiTauSF_ZMODEL_hadhad': continue if process == 'Zlf' and key == 'ATLAS_DiTauSF_ZMODEL_hadhad': continue ups = values[0] downs = values[1] systUpRatio = [ u / n if n != 0. else float(1.) for u, n in zip(ups, noms) ] systDoRatio = [ d / n if n != 0. else float(1.) for d, n in zip(downs, noms) ] bkg.addSystematic( Systematic(str(key), configMgr.weights, systUpRatio, systDoRatio, "user", syst_type)) list_samples.append(bkg) # FIXME: This is unusual! top = Sample('top', kOrange) top.setStatConfig(False) # No stat error top.setNormByTheory(False) # consider lumi for it top.buildHisto([0.00001], "SR", my_disc, 0.5) # small enough # HistFitter can accept such large up ratio # Systematic(name, weight, ratio_up, ratio_down, syst_type, syst_fistfactory_type) if not stat_only and not no_syst: key_here = 'ATLAS_TTBAR_YIELD_UPPER_hadhad' if not impact_check_continue(dict_syst_check, key_here): top.addSystematic( Systematic(key_here, configMgr.weights, unc_ttbar[mass], 0.9, "user", syst_type)) list_samples.append(top) sigSample = Sample("Sig", kRed) sigSample.setNormFactor("mu_Sig", 1., 0., 100.) #sigSample.setStatConfig(stat_config) sigSample.setStatConfig(False) sigSample.setNormByTheory(False) noms = yields_mass[signal_prefix + mass]["nEvents"] errors = yields_mass[signal_prefix + mass]["nEventsErr"] if use_mcstat else [0.0] sigSample.buildHisto([n * MY_SIGNAL_NORM * 1e-3 for n in noms], "SR", my_disc, 0.5) #sigSample.buildStatErrors(errors, "SR", my_disc) for key, values in yields_mass[signal_prefix + mass].items(): if 'ATLAS' not in key: continue if impact_check_continue(dict_syst_check, key): continue ups = values[0] downs = values[1] systUpRatio = [ u / n if n != 0. else float(1.) for u, n in zip(ups, noms) ] systDoRatio = [ d / n if n != 0. else float(1.) for d, n in zip(downs, noms) ] if not stat_only and not no_syst: sigSample.addSystematic( Systematic(str(key), configMgr.weights, systUpRatio, systDoRatio, "user", syst_type)) if not stat_only and not no_syst: key_here = "ATLAS_SigAccUnc_hadhad" if not impact_check_continue(dict_syst_check, key_here): sigSample.addSystematic( Systematic(key_here, configMgr.weights, [1 + unc_sig_acc[mass] for i in range(my_nbins)], [1 - unc_sig_acc[mass] for i in range(my_nbins)], "user", syst_type)) key_here = "ATLAS_Lumi_Run2_hadhad" if not impact_check_continue(dict_syst_check, key_here): sigSample.addSystematic( Systematic(key_here, configMgr.weights, 1.017, 0.983, "user", syst_type)) list_samples.append(sigSample) # Set observed and expected number of events in counting experiment n_SPlusB = yields_mass[signal_prefix + mass]["nEvents"][0] + sum_of_bkg(yields_mass)[0] n_BOnly = sum_of_bkg(yields_mass)[0] if BLIND: # configMgr.useAsimovSet = True # Use the Asimov dataset # configMgr.generateAsimovDataForObserved = True # Generate Asimov data as obsData for UL # configMgr.useSignalInBlindedData = False ndata = sum_of_bkg(yields_mass) else: try: ndata = yields_mass["data"]["nEvents"] except: ndata = [0. for _ in range(my_nbins)] lumiError = 0.017 # Relative luminosity uncertainty dataSample = Sample("Data", kBlack) dataSample.setData() dataSample.buildHisto(ndata, "SR", my_disc, 0.5) list_samples.append(dataSample) # Define top-level ana = configMgr.addFitConfig("SPlusB") ana.addSamples(list_samples) ana.setSignalSample(sigSample) # Define measurement meas = ana.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=lumiError / 100000.) # make it very small so that pruned # we use the one added by hand meas.addPOI("mu_Sig") #meas.statErrorType = "Poisson" # Fix the luminosity in HistFactory to constant meas.addParamSetting("Lumi", True, 1) # Add the channel chan = ana.addChannel(my_disc, ["SR"], my_nbins, my_xmin, my_xmax) chan.blind = BLIND #chan.statErrorType = "Poisson" ana.addSignalChannels([chan]) # These lines are needed for the user analysis to run # Make sure file is re-made when executing HistFactory if configMgr.executeHistFactory: if os.path.isfile("data/%s.root" % configMgr.analysisName): os.remove("data/%s.root" % configMgr.analysisName)
configMgr.analysisName = "ZL2013_%s_%s_%s" % (anaName, grid, allpoints[0]) configMgr.histCacheFile = "data/%s.root" % configMgr.analysisName configMgr.outputFileName = "results/%s_Output.root " % configMgr.analysisName # Note that we do not create fitConfigClones from som basic fitConfig with only the backgrounds # As a consequence, memory usage goes through the roof for more than ~10 points. # This NEEDS to be rewritten, but initial attempts caused different results, so for the draft INT # we leave the code as is. On my TODO list. --GJ, 16/12/12 for point in allpoints: if point == "": continue # Fit config instance name = "Fit_%s" % point myFitConfig = configMgr.addFitConfig(name) meas = myFitConfig.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=0.039) meas.addPOI("mu_SIG") meas.addParamSetting("mu_Diboson", True, 1) # fix diboson to MC prediction if not useCRQ: meas.addParamSetting("mu_Multijets", True, 1) # fix QCD if not useCRWTY: meas.addParamSetting("mu_Z", True, 1) # fix diboson to MC prediction meas.addParamSetting("mu_W", True, 1) # fix diboson to MC prediction meas.addParamSetting("mu_Top", True, 1) # fix diboson to MC prediction if useFlatBkgError:
commonSamples = [allbkgSample,dataSample] configMgr.plotColours = [kGreen,kBlack] ## Parameters of the Measurement measName = "BasicMeasurement" measLumi = 1. measLumiError = 0.039 ## Parameters of Channels cutsRegions = ["SR"] cutsNBins = 1 cutsBinLow = 0.0 cutsBinHigh = 1.0 ## Bkg-only fit bkgOnly = configMgr.addFitConfig("SimpleChannel_BkgOnly") bkgOnly.statErrThreshold=None #0.5 bkgOnly.addSamples(commonSamples) bkgOnly.addSystematic(jes) meas = bkgOnly.addMeasurement(measName,measLumi,measLumiError) meas.addPOI("mu_SIG") cutsChannel = bkgOnly.addChannel("cuts",cutsRegions,cutsNBins,cutsBinLow,cutsBinHigh) ## Discovery fit #discovery = configMgr.adFitConfigClone(bkgOnly,"SimpleChannel_Discovery") #discovery.clearSystematics() #sigSample = Sample("discoveryMode",kBlue) #sigSample.setNormFactor("mu_SIG",0.5,0.,1.) #sigSample.setNormByTheory() #discovery.addSamples(sigSample) #discovery.setSignalSample(sigSample)
for region_name in yields_dict: s.buildHisto([yields_dict[region_name][s.name]], region_name, "cuts", 0.5) if "bkg0" not in s.name.lower(): continue if s.name in stat_err_dict[region_name]: s.buildStatErrors([stat_err_dict[region_name][s.name]], region_name, "cuts") else: print "Setting stat errors for %s in region %s --> %.5f" % ( s.name, region_name, float(sqrt(yields_dict[region_name][s.name]))) s.buildStatErrors([sqrt(yields_dict[region_name][s.name])], region_name, "cuts") # measuremnt tlx = configMgr.addFitConfig("BkgOnly") meas = tlx.addMeasurement(name="NormalMeasurement", lumi=1., lumiErr=0.01) meas.addPOI("mu_SIG") tlx.statErrThreshold = 0.001 # setup the channels all_channels = [] cr_channels = [] vr_channels = [] sr_channels = [] for isample, sample in enumerate(all_samples): if "data" in sample.name.lower(): continue sample.setStatConfig(False)
wzSample = Sample("WZ",kAzure+1) #wzSample.setNormFactor("mu_WZ",1.,0.,5.) dataSample = Sample("Data",kBlack) dataSample.setData() dataSample.buildHisto([0.,1.,5.,15.,4.,0.],"SR","metmeff2Jet",0.1,0.1) #dataSample.buildStatErrors([1.,1.,2.4,3.9,2.,0.],"SR","metmeff2Jet") #************** # Exclusion fit #************** if myFitType==FitType.Exclusion: # loop over all signal points for sig in sigSamples: # Fit config instance exclusionFitConfig = configMgr.addFitConfig("Exclusion_"+sig) meas=exclusionFitConfig.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039) meas.addPOI("mu_SIG") # Samples exclusionFitConfig.addSamples([topSample,wzSample,dataSample]) # Systematics exclusionFitConfig.getSample("Top").addSystematic(topKtScale) exclusionFitConfig.getSample("WZ").addSystematic(wzKtScale) exclusionFitConfig.addSystematic(jes) # Channel srBin = exclusionFitConfig.addChannel("met/meff2Jet",["SR"],6,0.1,0.7) srBin.useOverflowBin=True srBin.useUnderflowBin=True
wzSample = Sample("WZ", kAzure + 1) #wzSample.setNormFactor("mu_WZ",1.,0.,5.) dataSample = Sample("Data", kBlack) dataSample.setData() dataSample.buildHisto([0., 1., 5., 15., 4., 0.], "SR", "metmeff2Jet", 0.1, 0.1) #dataSample.buildStatErrors([1.,1.,2.4,3.9,2.,0.],"SR","metmeff2Jet") #************** # Exclusion fit #************** if myFitType == FitType.Exclusion: # loop over all signal points for sig in sigSamples: # Fit config instance exclusionFitConfig = configMgr.addFitConfig("Exclusion_" + sig) meas = exclusionFitConfig.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=0.039) meas.addPOI("mu_SIG") # Samples exclusionFitConfig.addSamples([topSample, wzSample, dataSample]) # Systematics exclusionFitConfig.getSample("Top").addSystematic(topKtScale) exclusionFitConfig.getSample("WZ").addSystematic(wzKtScale) exclusionFitConfig.addSystematic(jes) # Channel srBin = exclusionFitConfig.addChannel("met/meff2Jet", ["SR"], 6, 0.1,
for gsyst in syst_to_all: for sample in bkg_samples: if sample.name.startswith('efake') or sample.name.startswith('jfake'): continue sample.addSystematic(gsyst) #--------- # Fit #--------- # Background only fit if myFitType == FitType.Background: fitconfig = configMgr.addFitConfig('BkgOnlyFit') # Discovery fit elif myFitType == FitType.Discovery: fitconfig = configMgr.addFitConfig('DiscoveryFit') unitary_sample = Sample('Unitary', ROOT.kViolet+5) unitary_sample.setNormFactor('mu_SIG', 1, 0, 10) unitary_sample.buildHisto([1,], 'SR', '0.5') fitconfig.addSamples(unitary_sample) fitconfig.setSignalSample(unitary_sample) # Exclusion fit elif myFitType == FitType.Exclusion: fitconfig = configMgr.addFitConfig('ExclusionFit')
bkgSample.setStatConfig(True) bkgSample.buildHisto([nbkg], "SR", "cuts", 0.5) bkgSample.addSystematic(ucb) sigSample = Sample("GGM_GG_bhmix_%d_%d" % (args.m3, args.mu), ROOT.kOrange + 3) sigSample.setNormFactor("mu_SIG", 1., 0., 10.) #sigSample.setStatConfig(True) sigSample.setNormByTheory() sigSample.buildHisto([nsig], "SR", "cuts", 0.5) dataSample = Sample("Data", ROOT.kBlack) dataSample.setData() dataSample.buildHisto([ndata], "SR", "cuts", 0.5) # Define top-level ana = configMgr.addFitConfig("Disc") ana.addSamples([bkgSample, sigSample, dataSample]) ana.setSignalSample(sigSample) # Define measurement meas = ana.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=lumiError) meas.addPOI("mu_SIG") meas.addParamSetting("Lumi", True) # Add the channel chan = ana.addChannel("cuts", ["SR"], 1, 0.5, 1.5) ana.setSignalChannels([chan]) # These lines are needed for the user analysis to run
#Systematics discoveryFitConfig.getSample("Top").addSystematic(topKtScale) discoveryFitConfig.getSample("WZ").addSystematic(wzKtScale) discoveryFitConfig.addSystematic(jes) #Channel srBin = discoveryFitConfig.addChannel("cuts", ["SR"], 1, 0.5, 1.5) discoveryFitConfig.setSignalChannels([srBin]) srBin.addDiscoverySamples(["Discovery"], [1.], [0.], [10000.], [kMagenta]) #************** # Exclusion fit #************** if myFitType == FitType.Exclusion: # Fit config instance exclusionFitConfig = configMgr.addFitConfig("Exclusion") meas = exclusionFitConfig.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=0.039) meas.addPOI("mu_SIG") # Samples exclusionFitConfig.addSamples([topSample, wzSample, dataSample]) # Systematics exclusionFitConfig.getSample("Top").addSystematic(topKtScale) exclusionFitConfig.getSample("WZ").addSystematic(wzKtScale) exclusionFitConfig.addSystematic(jes) # Channel srBin = exclusionFitConfig.addChannel("cuts", ["SR"], 1, 0.5, 1.5)
TColor.GetColor(255, 0, 0) ] # turn off any additional selection cuts for region_name in region_names: configMgr.cutsDict[region_name] = "1." configMgr.weights = "1." samples = [] channels = [] POIs = [] signal_sample = None # prepare the fit configuration ana = configMgr.addFitConfig("shape_fit") meas = ana.addMeasurement(name="shape_fit", lumi=1.0, lumiErr=0.01) # load all MC templates ... for sample_name, template_name, template_color, is_floating, is_signal in zip( sample_names, template_names, template_colors, normalization_floating, signal_samples): cur_sample = Sample(sample_name, template_color) if is_floating: normalization_name = "mu_" + sample_name cur_sample.setNormFactor(normalization_name, 1, 0, 100) if is_signal: POIs.append(normalization_name)
meffBinLowSR4 = 800. meffBinHighSR4 = 1600. lepPtNBins = 6 lepPtLow = 20. lepPtHigh = 600. srNBins = 1 srBinLow = 0.5 srBinHigh = 1.5 #************ #Bkg only fit #************ bkt = configMgr.addFitConfig("BkgOnly") if useStat: bkt.statErrThreshold=0.05 else: bkt.statErrThreshold=None bkt.addSamples([topSample,wzSample,qcdSample,bgSample,dataSample]) # Systematics to be applied globally within this topLevel bkt.getSample("Top").addSystematic(topKtScale) bkt.getSample("WZ").addSystematic(wzKtScale) meas=bkt.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039) meas.addPOI("mu_SIG") meas.addParamSetting("mu_BG",True,1) #-------------------------------------------------
addSystematic([ttbar_sample], ([ttbar_xsec_uncert])) addSystematic([single_top_sample], ([single_top_xsec_uncert])) theory_uncert = {} theory_uncert['CR_top'] = theory_uncert_adder.getUncertainties('CR_TOP') theory_uncert['CR_Z'] = theory_uncert_adder.getUncertainties('CR_Z') theory_uncert['VR_top_1'] = theory_uncert_adder.getUncertainties('VR_TOP_1') theory_uncert['VR_top_2'] = theory_uncert_adder.getUncertainties('VR_TOP_2') theory_uncert['VR_top_3'] = theory_uncert_adder.getUncertainties('VR_TOP_3') theory_uncert['VR_Z'] = theory_uncert_adder.getUncertainties('VR_Z') theory_uncert['SR_400'] = theory_uncert_adder.getUncertainties('SR_400') theory_uncert['SR_600'] = theory_uncert_adder.getUncertainties('SR_600') # ------------------------------------------------------------------------------ # Configure the background only fit background_config = configMgr.addFitConfig("BkgOnly") if use_stat: background_config.statErrThreshold = 0.05 else: background_config.statErrThreshold = None background_config.addSamples(sample_list_bkg) background_config.addSamples(sample_list_data) meas = background_config.addMeasurement(name = "NormalMeasurement", lumi = 1.0, lumiErr = 0.039) meas.addPOI("mu_SIG") # ------------------------------------------------------------------------------ def addChannel(config, expression, name, the_binning): """
# Give the analysis a name configMgr.analysisName = "MyUABCDExample" configMgr.outputFileName = "results/%s_Output.root" % configMgr.analysisName # Define cuts configMgr.cutsDict["A"] = "1." configMgr.cutsDict["B"] = "1." configMgr.cutsDict["C"] = "1." configMgr.cutsDict["D"] = "1." # Define weights configMgr.weights = "1." # Define top-level ana = configMgr.addFitConfig("ABCD") # Define measurement meas = ana.addMeasurement(name="NormalMeasurement", lumi=1.0, lumiErr=lumiError) meas.addPOI("mu_A") """ meas.addParamSetting("mu_dummy_D",True,1) meas.addParamSetting("mu_dummy_B",True,1) meas.addParamSetting("mu_dummy_C",True,1) """ #meas.addParamSetting("Lumi",True,1) #create test data dataSample = Sample("Data", kBlack) dataSample.setData()