Example #1
0
    def create_samples(self):
        print "total samples:",len(sample_names)
        for index in range(len(sample_names)):
            sample_name = sample_names[index]
            sample = Sample(sample_name, sample_colors[index])
            
            file_name = input_dir+sample_name+"_combined.root"
            yields_dic = self.read_hist(file_name)
            print "Sample:", sample_name+";",
            if "data" in sample_name:
                sample.setData()
                self.data_sample = sample
                for region in regions:
                    nevts, nerror = yields_dic[region]
                    sample.buildHisto([nevts], region, "cuts", 0.5)
                    print region,str(round(nevts,3))+";",
                print
                continue

            sample.setNormByTheory()
            for region in regions:
                nevts, nerror = yields_dic[region]
                nevts *= weight
                nerror *= weight
                if "Dijets" in sample_name and "SR" in region:
                    if self.cut == 10:
                        nevts = 4.07
                        nerror = math.sqrt(nevts)
                    if self.cut == 14:
                        nevts = 2.42
                        nerror = math.sqrt(nevts)
                sample.buildHisto([nevts], region, "cuts", 0.5)
                sample.buildStatErrors([nerror], region, "cuts")
                print region,str(round(nevts,3))+";",
            print
            #sample.setStatConfig(True)
            sample.setFileList([in_file_path])
            ## add systematic??
            sample.addSystematic(Systematic(sample_name+"_stats",\
                        configMgr.weights, 1.2, 0.8, "user", "userOverallSys"))
            #for systematic in self.sys_common:
                #sample.addSystematic(systematic)
            self.set_norm_factor(sample)
Example #2
0
# Give the analysis a name
configMgr.analysisName = "MyUserAnalysis_ShapeFactor"
configMgr.outputFileName = "results/%s_Output.root"%configMgr.analysisName

# Define cuts
configMgr.cutsDict["CR"] = "1."
configMgr.cutsDict["SR"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg",kGreen-9)
bkgSample.setNormByTheory(True)
bkgSample.buildHisto(nBkgCR,"CR","cuts",0.5)
bkgSample.buildHisto(nBkgSR,"SR","cuts",0.5)
bkgSample.addSystematic(bg1xsec)

ddSample = Sample("DataDriven",kGreen+2)
ddSample.addShapeFactor("DDShape")

sigSample = Sample("Sig",kPink)
sigSample.setNormFactor("mu_Sig",1.,0.2,1.5)
sigSample.buildHisto(nSigSR,"SR","cuts",0.5)
sigSample.setNormByTheory(True)
sigSample.addSystematic(sigxsec)

dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto(nDataCR,"CR","cuts",0.5)
Example #3
0
topSample.setStatConfig(useStat)
topSample.setNormRegions([("SLWR", "nJet"), ("SLTR", "nJet")])
wzSample = Sample("WZ", kAzure + 1)
wzSample.setNormFactor("mu_WZ", 1., 0., 5.)
wzSample.setStatConfig(useStat)
wzSample.setNormRegions([("SLWR", "nJet"), ("SLTR", "nJet")])
bgSample = Sample("BG", kYellow - 3)
bgSample.setNormFactor("mu_BG", 1., 0., 5.)
bgSample.setStatConfig(useStat)
bgSample.setNormRegions([("SLWR", "nJet"), ("SLTR", "nJet")])
qcdSample = Sample("QCD", kGray + 1)
qcdSample.setQCD(True, "histoSys")
qcdSample.setStatConfig(useStat)
dataSample = Sample("Data", kBlack)
dataSample.setData()
dataSample.buildHisto([86., 66., 62., 35., 11., 7., 2., 0.], "SLTR", "nJet", 2)
dataSample.buildHisto([1092., 426., 170., 65., 27., 9., 4., 1.], "SLWR",
                      "nJet", 2)

# set the file from which the samples should be taken
for sam in [topSample, wzSample, bgSample, qcdSample, dataSample]:
    sam.setFileList(bgdFiles)

#Binnings
nJetBinLowHard = 3
nJetBinLowSoft = 2
nJetBinHighTR = 10
nJetBinHighWR = 10

nBJetBinLow = 0
nBJetBinHigh = 4
Example #4
0
                        ktScaleTopLowWeights, "weight", "overallSys")

#JES (tree-based)
jes = Systematic("JES", "_NoSys", "_JESup", "_JESdown", "tree", "overallSys")
configMgr.nomName = "_NoSys"

#-------------------------------------------
# List of samples and their plotting colours
#-------------------------------------------
topSample = Sample("Top", kGreen - 9)
#topSample.setNormFactor("mu_Top",1.,0.,5.)
wzSample = Sample("WZ", kAzure + 1)
#wzSample.setNormFactor("mu_WZ",1.,0.,5.)
dataSample = Sample("Data", kBlack)
dataSample.setData()
dataSample.buildHisto([3.], "SR", "cuts", 0.5)

#**************
# Discovery fit
#**************

if myFitType == FitType.Discovery:

    #Fit config instance
    discoveryFitConfig = configMgr.addTopLevelXML("Discovery")
    meas = discoveryFitConfig.addMeasurement(name="NormalMeasurement",
                                             lumi=1.0,
                                             lumiErr=0.039)
    meas.addPOI("mu_Discovery")

    #Samples
Example #5
0
    #                   ZX SAMPLE                    #
    #------------------------------------------------#

    zxSample.setStatConfig(useStat)
    if userOpts.splitMCSys:
        zxSample.addSystematic(sysObj.AR_mcstat_ZX)
    zxSample.setNormByTheory()
    zxSample = addSys(zxSample, False, sysObj)

    #------------------------------------------------#
    #                     FAKES                      #
    #------------------------------------------------#
    # As we don't have the final fake estimates, use the preliminary results per region
    # and take negative yields to be 0.1 (we expect the fake to not contribute so we take
    # this small value and use the relative uncertainty of stat+syst to cover our asses)
    fakeSample.buildHisto([0.24],   "eeSuper0a",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper0a",  "cuts")
    fakeSample.buildHisto([0.37],   "emSuper0a",  "cuts")
    fakeSample.buildHisto([0.1],   "eeSuper0b",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper0b",  "cuts")
    fakeSample.buildHisto([0.1],   "emSuper0b",  "cuts")
    fakeSample.buildHisto([0.02],  "eeSuper0c",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper0c",  "cuts")
    fakeSample.buildHisto([0.1],   "emSuper0c",  "cuts")
    fakeSample.buildHisto([3.85],  "eeSuper1a",  "cuts")
    fakeSample.buildHisto([8.09],  "mmSuper1a",  "cuts")
    fakeSample.buildHisto([5.76],  "emSuper1a",  "cuts")
    fakeSample.buildHisto([0.65],  "eeSuper1b",  "cuts")
    fakeSample.buildHisto([0.09],   "mmSuper1b",  "cuts")
    fakeSample.buildHisto([0.64],  "emSuper1b",  "cuts")
    fakeSample.buildHisto([1.88],  "eeSuper1c",  "cuts")
# Give the analysis a name
configMgr.analysisName = "MyUserAnalysis_ShapeFactor"
configMgr.outputFileName = "results/%s_Output.root"%configMgr.analysisName

# Define cuts
configMgr.cutsDict["CR"] = "1."
configMgr.cutsDict["SR"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg",kGreen-9)
bkgSample.setNormByTheory(True)
bkgSample.buildHisto(nBkgCR,"CR","cuts",0.5)
bkgSample.buildHisto(nBkgSR,"SR","cuts",0.5)
bkgSample.addSystematic(bg1xsec)

ddSample = Sample("DataDriven",kGreen+2)
ddSample.addShapeFactor("DDShape")

sigSample = Sample("Sig",kPink)
sigSample.setNormFactor("mu_Sig",1.,0.2,1.5)
sigSample.buildHisto(nSigSR,"SR","cuts",0.5)
sigSample.setNormByTheory(True)
sigSample.addSystematic(sigxsec)

dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto(nDataCR,"CR","cuts",0.5)
##########################

# Give the analysis a name
configMgr.analysisName = "MyUpperLimitAnalysis_SS"
configMgr.outputFileName = "results/%s_Output.root" % configMgr.analysisName

# Define cuts
configMgr.cutsDict["UserRegion"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg", kGreen - 9)
bkgSample.setStatConfig(True)
bkgSample.buildHisto([nbkg], "UserRegion", "cuts", 0.5)

bkgSample.addSystematic(ucb)

sigSample = Sample("Sig", kPink)
sigSample.setNormFactor("mu_SS", 1., 0., 10.)
#sigSample.setStatConfig(True)
sigSample.setNormByTheory()
sigSample.buildHisto([nsig], "UserRegion", "cuts", 0.5)

dataSample = Sample("Data", kBlack)
dataSample.setData()
dataSample.buildHisto([ndata], "UserRegion", "cuts", 0.5)

# Define top-level
ana = configMgr.addFitConfig("SPlusB")
Example #8
0
##########################

# Give the analysis a name
configMgr.analysisName = "MyUserAnalysis"
configMgr.outputFileName = "results/%s_Output.root" % configMgr.analysisName

# Define cuts
configMgr.cutsDict["UserRegion"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg", kGreen - 9)
bkgSample.setStatConfig(False)
bkgSample.buildHisto([nbkg], "UserRegion", "cuts")
# bkgSample.buildStatErrors([nbkgErr],"UserRegion","cuts")
# bkgSample.addSystematic(corb)
bkgSample.addSystematic(ucb)

sigSample = Sample("Sig", kPink)
sigSample.setNormFactor("mu_Sig", 1.0, 0.0, 100.0)
sigSample.setStatConfig(False)
sigSample.setNormByTheory(False)
sigSample.buildHisto([nsig], "UserRegion", "cuts")
# sigSample.buildStatErrors([nsigErr],"UserRegion","cuts")
# sigSample.addSystematic(cors)
# sigSample.addSystematic(ucs)

dataSample = Sample("Data", kBlack)
dataSample.setData()
Example #9
0
gammajets = Sample("gammajets",28) # brown
gammajets.setStatConfig(True)
gammajets.setNormByTheory(False)
#gammajets.addSystematic(qcdElNorm)
#gammajets.setQCD()

data = Sample("data",kBlack)
data.setData()


commonSamples = [ttbargamma, Wgamma, Wjets, ttbarDilep, singletop, Zgamma, Zjets, diboson, gammajets, data]

for lepton in ['El']:
    #for region in ("WCRlHT","WCRhHT", "HMEThHT","HMETmeff", "HMThHT","HMTmeff", "SRS", "SRW"):
    for region in ("WCRlHT","WCRhHT", "HMEThHT", "HMThHT", "SRW"):
        ttbargamma.buildHisto([backyields.GetYield(lepton, region, "ttbargamma")], region+lepton, "cuts")
        ttbargamma.buildStatErrors([backyields.GetYieldUnc(lepton, region, "ttbargamma")], region+lepton, "cuts")
        Wgamma.buildHisto([backyields.GetYield(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wgamma.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wjets.buildHisto([backyields.GetYield(lepton, region, "Wjets")], region+lepton, "cuts")
        Wjets.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Wjets")], region+lepton, "cuts")
        ttbarDilep.buildHisto([backyields.GetYield(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        ttbarDilep.buildStatErrors([backyields.GetYieldUnc(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        singletop.buildHisto([backyields.GetYield(lepton, region, "singletop")], region+lepton, "cuts")
        singletop.buildStatErrors([backyields.GetYieldUnc(lepton, region, "singletop")], region+lepton, "cuts")
        Zgamma.buildHisto([backyields.GetYield(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zgamma.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zjets.buildHisto([backyields.GetYield(lepton, region, "Zjets")], region+lepton, "cuts")
        Zjets.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Zjets")], region+lepton, "cuts")
        diboson.buildHisto([backyields.GetYield(lepton, region, "diboson")], region+lepton, "cuts")
        diboson.buildStatErrors([backyields.GetYieldUnc(lepton, region, "diboson")], region+lepton, "cuts")
Example #10
0
def common_setting(mass):
    from configManager import configMgr
    from ROOT import kBlack, kGray, kRed, kPink, kViolet, kBlue, kAzure, kGreen, \
        kOrange
    from configWriter import Sample
    from systematic import Systematic
    import os

    color_dict = {
        "Zbb": kAzure,
        "Zbc": kAzure,
        "Zbl": kAzure,
        "Zcc": kAzure,
        "Zcl": kBlue,
        "Zl": kBlue,
        "Wbb": kGreen,
        "Wbc": kGreen,
        "Wbl": kGreen,
        "Wcc": kGreen,
        "Wcl": kGreen,
        "Wl": kGreen,
        "ttbar": kOrange,
        "stop": kOrange,
        "stopWt": kOrange,
        "ZZPw": kGray,
        "WZPw": kGray,
        "WWPw": kGray,
        "fakes": kPink,
        "Zjets": kAzure,
        "Wjets": kGreen,
        "top": kOrange,
        "diboson": kGray,
        "$Z\\tau\\tau$+HF": kAzure,
        "$Z\\tau\\tau$+LF": kBlue,
        "$W$+jets": kGreen,
        "$Zee$": kViolet,
        "Zhf": kAzure,
        "Zlf": kBlue,
        "Zee": kViolet,
        "others": kViolet,
        signal_prefix + "1000": kRed,
        signal_prefix + "1100": kRed,
        signal_prefix + "1200": kRed,
        signal_prefix + "1400": kRed,
        signal_prefix + "1600": kRed,
        signal_prefix + "1800": kRed,
        signal_prefix + "2000": kRed,
        signal_prefix + "2500": kRed,
        signal_prefix + "3000": kRed,
        # Add your new processes here
        "VH": kGray + 2,
        "VHtautau": kGray + 2,
        "ttH": kGray + 2,
    }

    ##########################

    # Setting the parameters of the hypothesis test
    configMgr.doExclusion = True  # True=exclusion, False=discovery
    configMgr.nTOYs = 10000  # default=5000
    configMgr.calculatorType = 0  # 2=asymptotic calculator, 0=frequentist calculator
    configMgr.testStatType = 3  # 3=one-sided profile likelihood test statistic (LHC default)
    configMgr.nPoints = 30  # number of values scanned of signal-strength for upper-limit determination of signal strength.
    configMgr.writeXML = False
    configMgr.seed = 40
    configMgr.toySeedSet = True
    configMgr.toySeed = 400

    # Pruning
    # - any overallSys systematic uncertainty if the difference of between the up variation and the nominal and between
    #   the down variation and the nominal is below a certain (user) given threshold
    # - for histoSys types, the situation is more complex:
    #   - a first check is done if the integral of the up histogram - the integral of the nominal histogram is smaller
    #     than the integral of the nominal histogram and the same for the down histogram
    #   - then a second check is done if the shape of the up, down and nominal histograms is very similar Only when both
    #     conditions are fulfilled the systematics will be removed.
    # default is False, so the pruning is normally not enabled
    configMgr.prun = True
    # The threshold to decide if an uncertainty is small or not is set by configMgr.prunThreshold = 0.005
    # where the number gives the fraction of deviation with respect to the nominal histogram below which an uncertainty
    # is considered to be small. The default is currently set to 0.01, corresponding to 1 % (This might be very aggressive
    # for the one or the other analyses!)
    configMgr.prunThreshold = 0.005
    # method 1: a chi2 test (this is still a bit experimental, so watch out if this is working or not)
    # method 2: checking for every bin of the histograms that the difference between up variation and nominal and down (default)
    configMgr.prunMethod = 2
    # variation and nominal is below a certain threshold.
    # Smoothing: HistFitter does not provide any smoothing tools.
    # More Details: https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/HistFitterAdvancedTutorial#Pruning_in_HistFitter

    ##########################

    # Keep SRs also in background fit confuguration
    configMgr.keepSignalRegionType = True
    configMgr.blindSR = BLIND

    # Give the analysis a name
    configMgr.analysisName = "bbtautau" + "X" + mass
    configMgr.histCacheFile = "data/" + configMgr.analysisName + ".root"
    configMgr.outputFileName = "results/" + configMgr.analysisName + "_Output.root"

    # Define cuts
    configMgr.cutsDict["SR"] = "1."

    # Define weights
    configMgr.weights = "1."

    # Define samples
    list_samples = []

    yields_mass = yields[mass]
    for process, yields_process in yields_mass.items():
        if process == 'data' or signal_prefix in process: continue
        # print("-> {} / Colour: {}".format(process, color_dict[process]))
        bkg = Sample(str(process), color_dict[process])
        bkg.setStatConfig(stat_config)
        # OLD: add lumi uncertainty (bkg/sig correlated, not for data-driven fakes)
        # NOW: add lumi by hand
        bkg.setNormByTheory(False)
        noms = yields_process["nEvents"]
        errors = yields_process["nEventsErr"] if use_mcstat else [0.0]
        # print("  nEvents (StatError): {} ({})".format(noms, errors))
        bkg.buildHisto(noms, "SR", my_disc, 0.5)
        bkg.buildStatErrors(errors, "SR", my_disc)
        if not stat_only and not no_syst:
            if process == 'fakes':
                key_here = "ATLAS_FF_1BTAG_SIDEBAND_Syst_hadhad"
                if not impact_check_continue(dict_syst_check, key_here):
                    bkg.addSystematic(
                        Systematic(key_here, configMgr.weights, 1.50, 0.50,
                                   "user", syst_type))
            else:
                key_here = "ATLAS_Lumi_Run2_hadhad"
                if not impact_check_continue(dict_syst_check, key_here):
                    bkg.addSystematic(
                        Systematic(key_here, configMgr.weights, 1.017, 0.983,
                                   "user", syst_type))
            for key, values in yields_process.items():
                if 'ATLAS' not in key: continue
                if impact_check_continue(dict_syst_check, key): continue
                # this should not be applied on the Sherpa
                if process == 'Zhf' and key == 'ATLAS_DiTauSF_ZMODEL_hadhad':
                    continue
                if process == 'Zlf' and key == 'ATLAS_DiTauSF_ZMODEL_hadhad':
                    continue
                ups = values[0]
                downs = values[1]
                systUpRatio = [
                    u / n if n != 0. else float(1.) for u, n in zip(ups, noms)
                ]
                systDoRatio = [
                    d / n if n != 0. else float(1.)
                    for d, n in zip(downs, noms)
                ]
                bkg.addSystematic(
                    Systematic(str(key), configMgr.weights, systUpRatio,
                               systDoRatio, "user", syst_type))
        list_samples.append(bkg)

    # FIXME: This is unusual!
    top = Sample('top', kOrange)
    top.setStatConfig(False)  # No stat error
    top.setNormByTheory(False)  # consider lumi for it
    top.buildHisto([0.00001], "SR", my_disc, 0.5)  # small enough
    # HistFitter can accept such large up ratio
    # Systematic(name, weight, ratio_up, ratio_down, syst_type, syst_fistfactory_type)
    if not stat_only and not no_syst:
        key_here = 'ATLAS_TTBAR_YIELD_UPPER_hadhad'
        if not impact_check_continue(dict_syst_check, key_here):
            top.addSystematic(
                Systematic(key_here, configMgr.weights, unc_ttbar[mass], 0.9,
                           "user", syst_type))
    list_samples.append(top)

    sigSample = Sample("Sig", kRed)
    sigSample.setNormFactor("mu_Sig", 1., 0., 100.)
    #sigSample.setStatConfig(stat_config)
    sigSample.setStatConfig(False)
    sigSample.setNormByTheory(False)
    noms = yields_mass[signal_prefix + mass]["nEvents"]
    errors = yields_mass[signal_prefix +
                         mass]["nEventsErr"] if use_mcstat else [0.0]
    sigSample.buildHisto([n * MY_SIGNAL_NORM * 1e-3 for n in noms], "SR",
                         my_disc, 0.5)
    #sigSample.buildStatErrors(errors, "SR", my_disc)
    for key, values in yields_mass[signal_prefix + mass].items():
        if 'ATLAS' not in key: continue
        if impact_check_continue(dict_syst_check, key):
            continue
        ups = values[0]
        downs = values[1]
        systUpRatio = [
            u / n if n != 0. else float(1.) for u, n in zip(ups, noms)
        ]
        systDoRatio = [
            d / n if n != 0. else float(1.) for d, n in zip(downs, noms)
        ]
        if not stat_only and not no_syst:
            sigSample.addSystematic(
                Systematic(str(key), configMgr.weights, systUpRatio,
                           systDoRatio, "user", syst_type))
    if not stat_only and not no_syst:
        key_here = "ATLAS_SigAccUnc_hadhad"
        if not impact_check_continue(dict_syst_check, key_here):
            sigSample.addSystematic(
                Systematic(key_here, configMgr.weights,
                           [1 + unc_sig_acc[mass] for i in range(my_nbins)],
                           [1 - unc_sig_acc[mass]
                            for i in range(my_nbins)], "user", syst_type))
        key_here = "ATLAS_Lumi_Run2_hadhad"
        if not impact_check_continue(dict_syst_check, key_here):
            sigSample.addSystematic(
                Systematic(key_here, configMgr.weights, 1.017, 0.983, "user",
                           syst_type))

    list_samples.append(sigSample)

    # Set observed and expected number of events in counting experiment
    n_SPlusB = yields_mass[signal_prefix +
                           mass]["nEvents"][0] + sum_of_bkg(yields_mass)[0]
    n_BOnly = sum_of_bkg(yields_mass)[0]
    if BLIND:
        # configMgr.useAsimovSet = True # Use the Asimov dataset
        # configMgr.generateAsimovDataForObserved = True # Generate Asimov data as obsData for UL
        # configMgr.useSignalInBlindedData = False
        ndata = sum_of_bkg(yields_mass)
    else:
        try:
            ndata = yields_mass["data"]["nEvents"]
        except:
            ndata = [0. for _ in range(my_nbins)]

    lumiError = 0.017  # Relative luminosity uncertainty

    dataSample = Sample("Data", kBlack)
    dataSample.setData()
    dataSample.buildHisto(ndata, "SR", my_disc, 0.5)
    list_samples.append(dataSample)

    # Define top-level
    ana = configMgr.addFitConfig("SPlusB")
    ana.addSamples(list_samples)
    ana.setSignalSample(sigSample)

    # Define measurement
    meas = ana.addMeasurement(name="NormalMeasurement",
                              lumi=1.0,
                              lumiErr=lumiError / 100000.)
    # make it very small so that pruned
    # we use the one added by hand
    meas.addPOI("mu_Sig")
    #meas.statErrorType = "Poisson"
    # Fix the luminosity in HistFactory to constant
    meas.addParamSetting("Lumi", True, 1)

    # Add the channel
    chan = ana.addChannel(my_disc, ["SR"], my_nbins, my_xmin, my_xmax)
    chan.blind = BLIND
    #chan.statErrorType = "Poisson"
    ana.addSignalChannels([chan])

    # These lines are needed for the user analysis to run
    # Make sure file is re-made when executing HistFactory
    if configMgr.executeHistFactory:
        if os.path.isfile("data/%s.root" % configMgr.analysisName):
            os.remove("data/%s.root" % configMgr.analysisName)
Example #11
0
singletop.setStatConfig(True)
singletop.addSystematic(singletopNorm)

gammajets = Sample("gammajets",28) # brown
gammajets.setStatConfig(True)
gammajets.addSystematic(qcdElNorm)
#gammajets.setQCD()

data = Sample("data",kBlack)
data.setData()

commonSamples = [ttbargamma, Wgamma, Wjets, ttbarDilep, singletop, Zgamma, Zjets, diboson, gammajets, data]

for lepton in ('El', 'Mu'):
    for region in ("WCRlHT","WCRhHT", "HMEThHT","HMETmeff", "HMThHT","HMTmeff", "SRS", "SRW"):
        ttbargamma.buildHisto([Tables.GetYield(lepton, region, "ttbargamma")], region+lepton, "cuts")
        ttbargamma.buildStatErrors([Tables.GetYieldUnc(lepton, region, "ttbargamma")], region+lepton, "cuts")
        Wgamma.buildHisto([Tables.GetYield(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wgamma.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wjets.buildHisto([Tables.GetYield(lepton, region, "Wjets")], region+lepton, "cuts")
        Wjets.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Wjets")], region+lepton, "cuts")
        ttbarDilep.buildHisto([Tables.GetYield(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        ttbarDilep.buildStatErrors([Tables.GetYieldUnc(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        singletop.buildHisto([Tables.GetYield(lepton, region, "singletop")], region+lepton, "cuts")
        singletop.buildStatErrors([Tables.GetYieldUnc(lepton, region, "singletop")], region+lepton, "cuts")
        Zgamma.buildHisto([Tables.GetYield(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zgamma.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zjets.buildHisto([Tables.GetYield(lepton, region, "Zjets")], region+lepton, "cuts")
        Zjets.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Zjets")], region+lepton, "cuts")
        diboson.buildHisto([Tables.GetYield(lepton, region, "diboson")], region+lepton, "cuts")
        diboson.buildStatErrors([Tables.GetYieldUnc(lepton, region, "diboson")], region+lepton, "cuts")
Example #12
0
    ##########################

    # Give the analysis a name
    configMgr.analysisName = "SimpleUL_%s" % SR
    configMgr.outputFileName = "results/%s_Output.root" % configMgr.analysisName

    # Define cuts
    configMgr.cutsDict["UserRegion"] = "1."

    # Define weights
    configMgr.weights = "1."

    # Define samples
    bkgSample = Sample("Bkg", kGreen - 9)
    bkgSample.setStatConfig(False)
    bkgSample.buildHisto([nbkg], "UserRegion", "cuts")
    #bkgSample.buildStatErrors([nbkgErr],"UserRegion","cuts")
    #bkgSample.addSystematic(corb)
    bkgSample.addSystematic(ucb)

    dataSample = Sample("Data", kBlack)
    dataSample.setData()
    dataSample.buildHisto([ndata], "UserRegion", "cuts")

    # Define top-level
    ana = configMgr.addFitConfig("SPlusB")
    ana.addSamples([bkgSample, dataSample])
    #ana.setSignalSample(sigSample)

    # Define measurement
    meas = ana.addMeasurement(name="NormalMeasurement",
topKtScale = Systematic("KtScaleTop",configMgr.weights,ktScaleTopHighWeights,ktScaleTopLowWeights,"weight","overallSys")

#JES (tree-based)
jes = Systematic("JES","_NoSys","_JESup","_JESdown","tree","overallSys")
configMgr.nomName = "_NoSys"

#-------------------------------------------
# List of samples and their plotting colours
#-------------------------------------------
topSample = Sample("Top",kGreen-9)
#topSample.setNormFactor("mu_Top",1.,0.,5.)
wzSample = Sample("WZ",kAzure+1)
#wzSample.setNormFactor("mu_WZ",1.,0.,5.)
dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto([3.],"SR","cuts",0.5)

#**************
# Discovery fit
#**************

if myFitType==FitType.Discovery:
 
   #Fit config instance
   discoveryFitConfig = configMgr.addTopLevelXML("Discovery")
   meas=discoveryFitConfig.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039)
   meas.addPOI("mu_Discovery")
 
   #Samples
   discoveryFitConfig.addSamples([topSample,wzSample,dataSample])
Example #14
0
    
    # ----------------------------------------------------- #
    #                        Data                           # 
    # ----------------------------------------------------- #

    dataSample.setData()

    # ----------------------------------------------------- #
    #                        Fakes                          # 
    # ----------------------------------------------------- #
    
    # Set by hand the fake estimates per-region.
    # The regions in which we expect a negative yield, set the
    # yield to 0.1 but keep the overal relative uncertainty

    fakeSample.buildHisto([0.1],   "eeSuper0a",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper0a",  "cuts")
    fakeSample.buildHisto([0.1],   "emSuper0a",  "cuts")
    fakeSample.buildHisto([0.1],   "eeSuper0b",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper0b",  "cuts")
    fakeSample.buildHisto([0.1],   "emSuper0b",  "cuts")
    fakeSample.buildHisto([0.02],  "eeSuper0c",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper0c",  "cuts")
    fakeSample.buildHisto([0.1],   "emSuper0c",  "cuts")
    fakeSample.buildHisto([3.46],  "eeSuper1a",  "cuts")
    fakeSample.buildHisto([4.18],  "mmSuper1a",  "cuts")
    fakeSample.buildHisto([3.62],  "emSuper1a",  "cuts")
    fakeSample.buildHisto([0.55],  "eeSuper1b",  "cuts")
    fakeSample.buildHisto([0.1],   "mmSuper1b",  "cuts")
    fakeSample.buildHisto([0.57],  "emSuper1b",  "cuts")
    fakeSample.buildHisto([1.70],  "eeSuper1c",  "cuts")
topSample.setStatConfig(useStat)
topSample.setNormRegions([("SLWR","nJet"),("SLTR","nJet")])
wzSample = Sample("WZ",kAzure+1)
wzSample.setNormFactor("mu_WZ",1.,0.,5.)
wzSample.setStatConfig(useStat)
wzSample.setNormRegions([("SLWR","nJet"),("SLTR","nJet")])
bgSample = Sample("BG",kYellow-3)
bgSample.setNormFactor("mu_BG",1.,0.,5.)
bgSample.setStatConfig(useStat)
bgSample.setNormRegions([("SLWR","nJet"),("SLTR","nJet")])
qcdSample = Sample("QCD",kGray+1)
qcdSample.setQCD(True,"histoSys")
qcdSample.setStatConfig(useStat)
dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto([86.,66.,62.,35.,11.,7.,2.,0.],"SLTR","nJet",2)
dataSample.buildHisto([1092.,426.,170.,65.,27.,9.,4.,1.],"SLWR","nJet",2)

# set the file from which the samples should be taken
for sam in [topSample, wzSample, bgSample, qcdSample, dataSample]:
        sam.setFileList(bgdFiles)

#Binnings
nJetBinLowHard = 3
nJetBinLowSoft = 2
nJetBinHighTR = 10
nJetBinHighWR = 10


nBJetBinLow = 0
nBJetBinHigh = 4
Example #16
0
#topKtScale = Systematic("KtScaleTop",configMgr.weights,ktScaleTopHighWeights,ktScaleTopLowWeights,"weight","normHistoSys")

#JES (tree-based)
jes = Systematic("JES", "_NoSys", "_JESup", "_JESdown", "tree", "overallSys")
configMgr.nomName = "_NoSys"

#-------------------------------------------
# List of samples and their plotting colours
#-------------------------------------------
topSample = Sample("Top", kGreen - 9)
#topSample.setNormFactor("mu_Top",1.,0.,5.)
wzSample = Sample("WZ", kAzure + 1)
#wzSample.setNormFactor("mu_WZ",1.,0.,5.)
dataSample = Sample("Data", kBlack)
dataSample.setData()
dataSample.buildHisto([0., 1., 5., 15., 4., 0.], "SR", "metmeff2Jet", 0.1, 0.1)
#dataSample.buildStatErrors([1.,1.,2.4,3.9,2.,0.],"SR","metmeff2Jet")

#**************
# Exclusion fit
#**************
if myFitType == FitType.Exclusion:

    # loop over all signal points
    for sig in sigSamples:
        # Fit config instance
        exclusionFitConfig = configMgr.addFitConfig("Exclusion_" + sig)
        meas = exclusionFitConfig.addMeasurement(name="NormalMeasurement",
                                                 lumi=1.0,
                                                 lumiErr=0.039)
        meas.addPOI("mu_SIG")
    #                   ZX SAMPLE                    #
    #------------------------------------------------#

    zxSample.setStatConfig(useStat)
    if userOpts.splitMCSys:
        zxSample.addSystematic(sysObj.AR_mcstat_ZX)
    zxSample.setNormByTheory()
    zxSample = addSys(zxSample, False, sysObj)

    #------------------------------------------------#
    #                     FAKES                      #
    #------------------------------------------------#
    # As we don't have the final fake estimates, use the preliminary results per region
    # and take negative yields to be 0.1 (we expect the fake to not contribute so we take
    # this small value and use the relative uncertainty of stat+syst to cover our asses)
    fakeSample.buildHisto([0.99], "eeSuper0a", "cuts")
    fakeSample.buildHisto([0.1],  "mmSuper0a", "cuts")
    fakeSample.buildHisto([0.1],  "emSuper0a", "cuts")
    fakeSample.buildHisto([0.1],  "eeSuper0b", "cuts")
    fakeSample.buildHisto([0.1],  "mmSuper0b", "cuts")
    fakeSample.buildHisto([0.1],  "emSuper0b", "cuts")
    fakeSample.buildHisto([0.02], "eeSuper0c", "cuts")
    fakeSample.buildHisto([0.1],  "mmSuper0c", "cuts")
    fakeSample.buildHisto([0.1],  "emSuper0c", "cuts")
    fakeSample.buildHisto([1.07], "eeSuper1a", "cuts")
    fakeSample.buildHisto([2.12], "mmSuper1a", "cuts")
    fakeSample.buildHisto([0.37], "emSuper1a", "cuts")
    fakeSample.buildHisto([0.49], "eeSuper1b", "cuts")
    fakeSample.buildHisto([0.1],  "mmSuper1b", "cuts")
    fakeSample.buildHisto([0.1],  "emSuper1b", "cuts")
    fakeSample.buildHisto([0.31], "eeSuper1c", "cuts")
##########################

# Give the analysis a name
configMgr.analysisName = "PhotonMetAnalysis_Simple"
configMgr.outputFileName = "results/%s_Output.root" % configMgr.analysisName

# Define cuts
configMgr.cutsDict["SR"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg", ROOT.kGreen - 9)
bkgSample.setStatConfig(True)
bkgSample.buildHisto([nbkg], "SR", "cuts", 0.5)
bkgSample.addSystematic(ucb)

sigSample = Sample("GGM_GG_bhmix_%d_%d" % (args.m3, args.mu), ROOT.kOrange + 3)
sigSample.setNormFactor("mu_SIG", 1., 0., 10.)
#sigSample.setStatConfig(True)
sigSample.setNormByTheory()
sigSample.buildHisto([nsig], "SR", "cuts", 0.5)

dataSample = Sample("Data", ROOT.kBlack)
dataSample.setData()
dataSample.buildHisto([ndata], "SR", "cuts", 0.5)

# Define top-level
ana = configMgr.addFitConfig("Disc")
ana.addSamples([bkgSample, sigSample, dataSample])
#topKtScale = Systematic("KtScaleTop",configMgr.weights,ktScaleTopHighWeights,ktScaleTopLowWeights,"weight","normHistoSys")

#JES (tree-based)
jes = Systematic("JES","_NoSys","_JESup","_JESdown","tree","overallSys")
configMgr.nomName = "_NoSys"

#-------------------------------------------
# List of samples and their plotting colours
#-------------------------------------------
topSample = Sample("Top",kGreen-9)
#topSample.setNormFactor("mu_Top",1.,0.,5.)
wzSample = Sample("WZ",kAzure+1)
#wzSample.setNormFactor("mu_WZ",1.,0.,5.)
dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto([0.,1.,5.,15.,4.,0.],"SR","metmeff2Jet",0.1,0.1)
#dataSample.buildStatErrors([1.,1.,2.4,3.9,2.,0.],"SR","metmeff2Jet")

#**************
# Exclusion fit
#**************
if myFitType==FitType.Exclusion:
    
    # loop over all signal points
    for sig in sigSamples:
    # Fit config instance
       exclusionFitConfig = configMgr.addFitConfig("Exclusion_"+sig)
       meas=exclusionFitConfig.addMeasurement(name="NormalMeasurement",lumi=1.0,lumiErr=0.039)
       meas.addPOI("mu_SIG")

       # Samples
        normalization_name = "mu_" + sample_name
        cur_sample.setNormFactor(normalization_name, 1, 0, 100)

        if is_signal:
            POIs.append(normalization_name)
            signal_sample = cur_sample

    # ... for all regions
    for region_name, region_infile in zip(region_names, region_infiles):
        binvals, edges = HistogramImporter.import_histogram(
            os.path.join(indir, region_infile), template_name)
        bin_width = edges[1] - edges[0]

        cur_sample.buildHisto(binvals,
                              region_name,
                              "mBB",
                              binLow=edges[0],
                              binWidth=bin_width)

    samples.append(cur_sample)

# also make the (Asimov) data sample
data_sample = Sample("data", ROOT.kBlack)
data_sample.setData()

# in each region, it holds the total event content
for region_name, region_infile in zip(region_names, region_infiles):

    binvals = None
    for sample_name, template_name in zip(sample_names, template_names):
        sample_binvals, edges = HistogramImporter.import_histogram(
#---------
#   Fit
#---------

# Background only fit
if myFitType == FitType.Background:
    fitconfig = configMgr.addFitConfig('BkgOnlyFit')

# Discovery fit
elif myFitType == FitType.Discovery:
    fitconfig = configMgr.addFitConfig('DiscoveryFit')

    unitary_sample = Sample('Unitary', ROOT.kViolet+5)
    unitary_sample.setNormFactor('mu_SIG', 1, 0, 10)
    unitary_sample.buildHisto([1,], 'SR', '0.5')

    fitconfig.addSamples(unitary_sample)
    fitconfig.setSignalSample(unitary_sample)
    
# Exclusion fit
elif myFitType == FitType.Exclusion:
    fitconfig = configMgr.addFitConfig('ExclusionFit')


fitconfig.addSamples(bkg_samples + data_samples)

# Measurement
measName = "BasicMeasurement"
measLumi = 1.0
measLumiError = 0.029 # Preliminar for ICHEP: 2.9% (3.7% for 2016 and 2.1% for 2015)
# ------------------------------------------------------------------------------
# Configure exclusion fits
print 'Setting up exclusion fit!'

exclusion_sr_config = configMgr.addFitConfigClone(background_config, "Sig_excl")
for region_name in configMgr.cutsDict.keys():
    if 'SR' not in region_name:
        continue

    print 'this sr name: ', region_name

    sig_sample = Sample('fake_signal', kViolet+5)
    sig_sample.setNormFactor('mu_SIG', 1, 0, 10)

    sig_sample.buildHisto([1], region_name, '0.5')

    exclusion_sr_config.addSamples(sig_sample)
    exclusion_sr_config.setSignalSample(sig_sample)

# ------------------------------------------------------------------------------
# Create TLegend for our plots
# TCanvas is needed for this, but it gets deleted afterwards
c = TCanvas()
compFillStyle = 1001 # see ROOT for Fill styles
leg = TLegend(0.6, 0.475, 0.9, 0.925, "")
leg.SetFillStyle(0)
leg.SetFillColor(0)
leg.SetBorderSize(0)

# Data entry
Example #23
0
# Define measurement
meas = ana.addMeasurement(name="NormalMeasurement",
                          lumi=1.0,
                          lumiErr=lumiError)
meas.addPOI("mu_A")
"""
meas.addParamSetting("mu_dummy_D",True,1)
meas.addParamSetting("mu_dummy_B",True,1)
meas.addParamSetting("mu_dummy_C",True,1)
"""
#meas.addParamSetting("Lumi",True,1)

#create test data
dataSample = Sample("Data", kBlack)
dataSample.setData()
dataSample.buildHisto([ndataA], "A", "cuts", 0.5)
dataSample.buildHisto([ndataB], "B", "cuts", 0.5)
dataSample.buildHisto([ndataC], "C", "cuts", 0.5)
dataSample.buildHisto([ndataD], "D", "cuts", 0.5)

backgroundSample = Sample("NonQCDBackground", kBlack)
backgroundSample.buildHisto([nbkgA], "A", "cuts", 0.5)
backgroundSample.buildHisto([nbkgB], "B", "cuts", 0.5)
backgroundSample.buildHisto([nbkgC], "C", "cuts", 0.5)
backgroundSample.buildHisto([nbkgD], "D", "cuts", 0.5)

ana.addSamples([dataSample, backgroundSample])

#make dummy samples
bkgSampleA = Sample("dummy_BkgA", kBlue)
bkgSampleA.buildHisto([1], "A", "cuts", 0.5)