Exemple #1
0
    def create_samples(self):
        print "total samples:",len(sample_names)
        for index in range(len(sample_names)):
            sample_name = sample_names[index]
            sample = Sample(sample_name, sample_colors[index])
            
            file_name = input_dir+sample_name+"_combined.root"
            yields_dic = self.read_hist(file_name)
            print "Sample:", sample_name+";",
            if "data" in sample_name:
                sample.setData()
                self.data_sample = sample
                for region in regions:
                    nevts, nerror = yields_dic[region]
                    sample.buildHisto([nevts], region, "cuts", 0.5)
                    print region,str(round(nevts,3))+";",
                print
                continue

            sample.setNormByTheory()
            for region in regions:
                nevts, nerror = yields_dic[region]
                nevts *= weight
                nerror *= weight
                if "Dijets" in sample_name and "SR" in region:
                    if self.cut == 10:
                        nevts = 4.07
                        nerror = math.sqrt(nevts)
                    if self.cut == 14:
                        nevts = 2.42
                        nerror = math.sqrt(nevts)
                sample.buildHisto([nevts], region, "cuts", 0.5)
                sample.buildStatErrors([nerror], region, "cuts")
                print region,str(round(nevts,3))+";",
            print
            #sample.setStatConfig(True)
            sample.setFileList([in_file_path])
            ## add systematic??
            sample.addSystematic(Systematic(sample_name+"_stats",\
                        configMgr.weights, 1.2, 0.8, "user", "userOverallSys"))
            #for systematic in self.sys_common:
                #sample.addSystematic(systematic)
            self.set_norm_factor(sample)
# Give the analysis a name
configMgr.analysisName = "MyUserAnalysis"
configMgr.outputFileName = "results/%s_Output.root"%configMgr.analysisName

# Define cuts
configMgr.cutsDict["UserRegion"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg",kGreen-9)
bkgSample.setStatConfig(True)
bkgSample.buildHisto([nbkg],"UserRegion","cuts",0.5)
bkgSample.buildStatErrors([nbkgErr],"UserRegion","cuts")
bkgSample.addSystematic(corb)
bkgSample.addSystematic(ucb)

sigSample = Sample("Sig",kPink)
sigSample.setNormFactor("mu_Sig",1.,0.,100.)
sigSample.setStatConfig(True)
sigSample.setNormByTheory()
sigSample.buildHisto([nsig],"UserRegion","cuts",0.5)
sigSample.buildStatErrors([nsigErr],"UserRegion","cuts")
sigSample.addSystematic(cors)
sigSample.addSystematic(ucs)

dataSample = Sample("Data",kBlack)
dataSample.setData()
dataSample.buildHisto([ndata],"UserRegion","cuts",0.5)
Exemple #3
0
def common_setting(mass):
    from configManager import configMgr
    from ROOT import kBlack, kGray, kRed, kPink, kViolet, kBlue, kAzure, kGreen, \
        kOrange
    from configWriter import Sample
    from systematic import Systematic
    import os

    color_dict = {
        "Zbb": kAzure,
        "Zbc": kAzure,
        "Zbl": kAzure,
        "Zcc": kAzure,
        "Zcl": kBlue,
        "Zl": kBlue,
        "Wbb": kGreen,
        "Wbc": kGreen,
        "Wbl": kGreen,
        "Wcc": kGreen,
        "Wcl": kGreen,
        "Wl": kGreen,
        "ttbar": kOrange,
        "stop": kOrange,
        "stopWt": kOrange,
        "ZZPw": kGray,
        "WZPw": kGray,
        "WWPw": kGray,
        "fakes": kPink,
        "Zjets": kAzure,
        "Wjets": kGreen,
        "top": kOrange,
        "diboson": kGray,
        "$Z\\tau\\tau$+HF": kAzure,
        "$Z\\tau\\tau$+LF": kBlue,
        "$W$+jets": kGreen,
        "$Zee$": kViolet,
        "Zhf": kAzure,
        "Zlf": kBlue,
        "Zee": kViolet,
        "others": kViolet,
        signal_prefix + "1000": kRed,
        signal_prefix + "1100": kRed,
        signal_prefix + "1200": kRed,
        signal_prefix + "1400": kRed,
        signal_prefix + "1600": kRed,
        signal_prefix + "1800": kRed,
        signal_prefix + "2000": kRed,
        signal_prefix + "2500": kRed,
        signal_prefix + "3000": kRed,
        # Add your new processes here
        "VH": kGray + 2,
        "VHtautau": kGray + 2,
        "ttH": kGray + 2,
    }

    ##########################

    # Setting the parameters of the hypothesis test
    configMgr.doExclusion = True  # True=exclusion, False=discovery
    configMgr.nTOYs = 10000  # default=5000
    configMgr.calculatorType = 0  # 2=asymptotic calculator, 0=frequentist calculator
    configMgr.testStatType = 3  # 3=one-sided profile likelihood test statistic (LHC default)
    configMgr.nPoints = 30  # number of values scanned of signal-strength for upper-limit determination of signal strength.
    configMgr.writeXML = False
    configMgr.seed = 40
    configMgr.toySeedSet = True
    configMgr.toySeed = 400

    # Pruning
    # - any overallSys systematic uncertainty if the difference of between the up variation and the nominal and between
    #   the down variation and the nominal is below a certain (user) given threshold
    # - for histoSys types, the situation is more complex:
    #   - a first check is done if the integral of the up histogram - the integral of the nominal histogram is smaller
    #     than the integral of the nominal histogram and the same for the down histogram
    #   - then a second check is done if the shape of the up, down and nominal histograms is very similar Only when both
    #     conditions are fulfilled the systematics will be removed.
    # default is False, so the pruning is normally not enabled
    configMgr.prun = True
    # The threshold to decide if an uncertainty is small or not is set by configMgr.prunThreshold = 0.005
    # where the number gives the fraction of deviation with respect to the nominal histogram below which an uncertainty
    # is considered to be small. The default is currently set to 0.01, corresponding to 1 % (This might be very aggressive
    # for the one or the other analyses!)
    configMgr.prunThreshold = 0.005
    # method 1: a chi2 test (this is still a bit experimental, so watch out if this is working or not)
    # method 2: checking for every bin of the histograms that the difference between up variation and nominal and down (default)
    configMgr.prunMethod = 2
    # variation and nominal is below a certain threshold.
    # Smoothing: HistFitter does not provide any smoothing tools.
    # More Details: https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/HistFitterAdvancedTutorial#Pruning_in_HistFitter

    ##########################

    # Keep SRs also in background fit confuguration
    configMgr.keepSignalRegionType = True
    configMgr.blindSR = BLIND

    # Give the analysis a name
    configMgr.analysisName = "bbtautau" + "X" + mass
    configMgr.histCacheFile = "data/" + configMgr.analysisName + ".root"
    configMgr.outputFileName = "results/" + configMgr.analysisName + "_Output.root"

    # Define cuts
    configMgr.cutsDict["SR"] = "1."

    # Define weights
    configMgr.weights = "1."

    # Define samples
    list_samples = []

    yields_mass = yields[mass]
    for process, yields_process in yields_mass.items():
        if process == 'data' or signal_prefix in process: continue
        # print("-> {} / Colour: {}".format(process, color_dict[process]))
        bkg = Sample(str(process), color_dict[process])
        bkg.setStatConfig(stat_config)
        # OLD: add lumi uncertainty (bkg/sig correlated, not for data-driven fakes)
        # NOW: add lumi by hand
        bkg.setNormByTheory(False)
        noms = yields_process["nEvents"]
        errors = yields_process["nEventsErr"] if use_mcstat else [0.0]
        # print("  nEvents (StatError): {} ({})".format(noms, errors))
        bkg.buildHisto(noms, "SR", my_disc, 0.5)
        bkg.buildStatErrors(errors, "SR", my_disc)
        if not stat_only and not no_syst:
            if process == 'fakes':
                key_here = "ATLAS_FF_1BTAG_SIDEBAND_Syst_hadhad"
                if not impact_check_continue(dict_syst_check, key_here):
                    bkg.addSystematic(
                        Systematic(key_here, configMgr.weights, 1.50, 0.50,
                                   "user", syst_type))
            else:
                key_here = "ATLAS_Lumi_Run2_hadhad"
                if not impact_check_continue(dict_syst_check, key_here):
                    bkg.addSystematic(
                        Systematic(key_here, configMgr.weights, 1.017, 0.983,
                                   "user", syst_type))
            for key, values in yields_process.items():
                if 'ATLAS' not in key: continue
                if impact_check_continue(dict_syst_check, key): continue
                # this should not be applied on the Sherpa
                if process == 'Zhf' and key == 'ATLAS_DiTauSF_ZMODEL_hadhad':
                    continue
                if process == 'Zlf' and key == 'ATLAS_DiTauSF_ZMODEL_hadhad':
                    continue
                ups = values[0]
                downs = values[1]
                systUpRatio = [
                    u / n if n != 0. else float(1.) for u, n in zip(ups, noms)
                ]
                systDoRatio = [
                    d / n if n != 0. else float(1.)
                    for d, n in zip(downs, noms)
                ]
                bkg.addSystematic(
                    Systematic(str(key), configMgr.weights, systUpRatio,
                               systDoRatio, "user", syst_type))
        list_samples.append(bkg)

    # FIXME: This is unusual!
    top = Sample('top', kOrange)
    top.setStatConfig(False)  # No stat error
    top.setNormByTheory(False)  # consider lumi for it
    top.buildHisto([0.00001], "SR", my_disc, 0.5)  # small enough
    # HistFitter can accept such large up ratio
    # Systematic(name, weight, ratio_up, ratio_down, syst_type, syst_fistfactory_type)
    if not stat_only and not no_syst:
        key_here = 'ATLAS_TTBAR_YIELD_UPPER_hadhad'
        if not impact_check_continue(dict_syst_check, key_here):
            top.addSystematic(
                Systematic(key_here, configMgr.weights, unc_ttbar[mass], 0.9,
                           "user", syst_type))
    list_samples.append(top)

    sigSample = Sample("Sig", kRed)
    sigSample.setNormFactor("mu_Sig", 1., 0., 100.)
    #sigSample.setStatConfig(stat_config)
    sigSample.setStatConfig(False)
    sigSample.setNormByTheory(False)
    noms = yields_mass[signal_prefix + mass]["nEvents"]
    errors = yields_mass[signal_prefix +
                         mass]["nEventsErr"] if use_mcstat else [0.0]
    sigSample.buildHisto([n * MY_SIGNAL_NORM * 1e-3 for n in noms], "SR",
                         my_disc, 0.5)
    #sigSample.buildStatErrors(errors, "SR", my_disc)
    for key, values in yields_mass[signal_prefix + mass].items():
        if 'ATLAS' not in key: continue
        if impact_check_continue(dict_syst_check, key):
            continue
        ups = values[0]
        downs = values[1]
        systUpRatio = [
            u / n if n != 0. else float(1.) for u, n in zip(ups, noms)
        ]
        systDoRatio = [
            d / n if n != 0. else float(1.) for d, n in zip(downs, noms)
        ]
        if not stat_only and not no_syst:
            sigSample.addSystematic(
                Systematic(str(key), configMgr.weights, systUpRatio,
                           systDoRatio, "user", syst_type))
    if not stat_only and not no_syst:
        key_here = "ATLAS_SigAccUnc_hadhad"
        if not impact_check_continue(dict_syst_check, key_here):
            sigSample.addSystematic(
                Systematic(key_here, configMgr.weights,
                           [1 + unc_sig_acc[mass] for i in range(my_nbins)],
                           [1 - unc_sig_acc[mass]
                            for i in range(my_nbins)], "user", syst_type))
        key_here = "ATLAS_Lumi_Run2_hadhad"
        if not impact_check_continue(dict_syst_check, key_here):
            sigSample.addSystematic(
                Systematic(key_here, configMgr.weights, 1.017, 0.983, "user",
                           syst_type))

    list_samples.append(sigSample)

    # Set observed and expected number of events in counting experiment
    n_SPlusB = yields_mass[signal_prefix +
                           mass]["nEvents"][0] + sum_of_bkg(yields_mass)[0]
    n_BOnly = sum_of_bkg(yields_mass)[0]
    if BLIND:
        # configMgr.useAsimovSet = True # Use the Asimov dataset
        # configMgr.generateAsimovDataForObserved = True # Generate Asimov data as obsData for UL
        # configMgr.useSignalInBlindedData = False
        ndata = sum_of_bkg(yields_mass)
    else:
        try:
            ndata = yields_mass["data"]["nEvents"]
        except:
            ndata = [0. for _ in range(my_nbins)]

    lumiError = 0.017  # Relative luminosity uncertainty

    dataSample = Sample("Data", kBlack)
    dataSample.setData()
    dataSample.buildHisto(ndata, "SR", my_disc, 0.5)
    list_samples.append(dataSample)

    # Define top-level
    ana = configMgr.addFitConfig("SPlusB")
    ana.addSamples(list_samples)
    ana.setSignalSample(sigSample)

    # Define measurement
    meas = ana.addMeasurement(name="NormalMeasurement",
                              lumi=1.0,
                              lumiErr=lumiError / 100000.)
    # make it very small so that pruned
    # we use the one added by hand
    meas.addPOI("mu_Sig")
    #meas.statErrorType = "Poisson"
    # Fix the luminosity in HistFactory to constant
    meas.addParamSetting("Lumi", True, 1)

    # Add the channel
    chan = ana.addChannel(my_disc, ["SR"], my_nbins, my_xmin, my_xmax)
    chan.blind = BLIND
    #chan.statErrorType = "Poisson"
    ana.addSignalChannels([chan])

    # These lines are needed for the user analysis to run
    # Make sure file is re-made when executing HistFactory
    if configMgr.executeHistFactory:
        if os.path.isfile("data/%s.root" % configMgr.analysisName):
            os.remove("data/%s.root" % configMgr.analysisName)
# Give the analysis a name
configMgr.analysisName = "MyUserAnalysis"
configMgr.outputFileName = "results/%s_Output.root" % configMgr.analysisName

# Define cuts
configMgr.cutsDict["UserRegion"] = "1."

# Define weights
configMgr.weights = "1."

# Define samples
bkgSample = Sample("Bkg", kGreen - 9)
bkgSample.setStatConfig(True)
bkgSample.buildHisto([nbkg], "UserRegion", "cuts", 0.5)
bkgSample.buildStatErrors([nbkgErr], "UserRegion", "cuts")
bkgSample.addSystematic(corb)
bkgSample.addSystematic(ucb)

sigSample = Sample("Sig", kPink)
sigSample.setNormFactor("mu_Sig", 1., 0., 100.)
sigSample.setStatConfig(True)
sigSample.setNormByTheory()
sigSample.buildHisto([nsig], "UserRegion", "cuts", 0.5)
sigSample.buildStatErrors([nsigErr], "UserRegion", "cuts")
sigSample.addSystematic(cors)
sigSample.addSystematic(ucs)

dataSample = Sample("Data", kBlack)
dataSample.setData()
dataSample.buildHisto([ndata], "UserRegion", "cuts", 0.5)
Exemple #5
0
gammajets.setStatConfig(True)
gammajets.setNormByTheory(False)
#gammajets.addSystematic(qcdElNorm)
#gammajets.setQCD()

data = Sample("data",kBlack)
data.setData()


commonSamples = [ttbargamma, Wgamma, Wjets, ttbarDilep, singletop, Zgamma, Zjets, diboson, gammajets, data]

for lepton in ['El']:
    #for region in ("WCRlHT","WCRhHT", "HMEThHT","HMETmeff", "HMThHT","HMTmeff", "SRS", "SRW"):
    for region in ("WCRlHT","WCRhHT", "HMEThHT", "HMThHT", "SRW"):
        ttbargamma.buildHisto([backyields.GetYield(lepton, region, "ttbargamma")], region+lepton, "cuts")
        ttbargamma.buildStatErrors([backyields.GetYieldUnc(lepton, region, "ttbargamma")], region+lepton, "cuts")
        Wgamma.buildHisto([backyields.GetYield(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wgamma.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wjets.buildHisto([backyields.GetYield(lepton, region, "Wjets")], region+lepton, "cuts")
        Wjets.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Wjets")], region+lepton, "cuts")
        ttbarDilep.buildHisto([backyields.GetYield(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        ttbarDilep.buildStatErrors([backyields.GetYieldUnc(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        singletop.buildHisto([backyields.GetYield(lepton, region, "singletop")], region+lepton, "cuts")
        singletop.buildStatErrors([backyields.GetYieldUnc(lepton, region, "singletop")], region+lepton, "cuts")
        Zgamma.buildHisto([backyields.GetYield(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zgamma.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zjets.buildHisto([backyields.GetYield(lepton, region, "Zjets")], region+lepton, "cuts")
        Zjets.buildStatErrors([backyields.GetYieldUnc(lepton, region, "Zjets")], region+lepton, "cuts")
        diboson.buildHisto([backyields.GetYield(lepton, region, "diboson")], region+lepton, "cuts")
        diboson.buildStatErrors([backyields.GetYieldUnc(lepton, region, "diboson")], region+lepton, "cuts")
        gammajets.buildHisto([backyields.GetYield(lepton, region, "gammajets")], region+lepton, "cuts")
Exemple #6
0
singletop.addSystematic(singletopNorm)

gammajets = Sample("gammajets",28) # brown
gammajets.setStatConfig(True)
gammajets.addSystematic(qcdElNorm)
#gammajets.setQCD()

data = Sample("data",kBlack)
data.setData()

commonSamples = [ttbargamma, Wgamma, Wjets, ttbarDilep, singletop, Zgamma, Zjets, diboson, gammajets, data]

for lepton in ('El', 'Mu'):
    for region in ("WCRlHT","WCRhHT", "HMEThHT","HMETmeff", "HMThHT","HMTmeff", "SRS", "SRW"):
        ttbargamma.buildHisto([Tables.GetYield(lepton, region, "ttbargamma")], region+lepton, "cuts")
        ttbargamma.buildStatErrors([Tables.GetYieldUnc(lepton, region, "ttbargamma")], region+lepton, "cuts")
        Wgamma.buildHisto([Tables.GetYield(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wgamma.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Wgamma")], region+lepton, "cuts")
        Wjets.buildHisto([Tables.GetYield(lepton, region, "Wjets")], region+lepton, "cuts")
        Wjets.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Wjets")], region+lepton, "cuts")
        ttbarDilep.buildHisto([Tables.GetYield(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        ttbarDilep.buildStatErrors([Tables.GetYieldUnc(lepton, region, "ttbarDilep")], region+lepton, "cuts")
        singletop.buildHisto([Tables.GetYield(lepton, region, "singletop")], region+lepton, "cuts")
        singletop.buildStatErrors([Tables.GetYieldUnc(lepton, region, "singletop")], region+lepton, "cuts")
        Zgamma.buildHisto([Tables.GetYield(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zgamma.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Zgamma")], region+lepton, "cuts")
        Zjets.buildHisto([Tables.GetYield(lepton, region, "Zjets")], region+lepton, "cuts")
        Zjets.buildStatErrors([Tables.GetYieldUnc(lepton, region, "Zjets")], region+lepton, "cuts")
        diboson.buildHisto([Tables.GetYield(lepton, region, "diboson")], region+lepton, "cuts")
        diboson.buildStatErrors([Tables.GetYieldUnc(lepton, region, "diboson")], region+lepton, "cuts")
        gammajets.buildHisto([Tables.GetYield(lepton, region, "gammajets")], region+lepton, "cuts")