Beispiel #1
0
    def test_write_images(self):
        """Test the write_images function."""

        test_table = Table("Some Table")

        # Get test PDF
        some_pdf = "%s/minimal.pdf" % os.path.dirname(__file__)

        # This should work fine
        test_table.add_image(some_pdf)
        testdir = tmp_directory_name()
        self.addCleanup(shutil.rmtree, testdir)
        try:
            test_table.write_images(testdir)
        except TypeError:
            self.fail("Table.write_images raised an unexpected TypeError.")

        # Check that output file exists
        expected_file = os.path.join(testdir, "minimal.png")
        self.assertTrue(os.path.exists(expected_file))

        # Try wrong type of input argument
        bad_arguments = [None, 5, {}, []]
        for argument in bad_arguments:
            with self.assertRaises(TypeError):
                test_table.write_images(argument)
        self.doCleanups()
Beispiel #2
0
def make_table():
    params = ['r_ggH', 'r_VBF', 'r_VH', 'r_top']

    # Load results + xsbr data
    inputMode = "mu"
    translatePOIs = LoadTranslations("translate/pois_%s.json" % inputMode)
    with open(
            "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/correlations_mu.json",
            "r") as jf:
        correlations = json.load(jf)
    with open(
            "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/correlations_expected_mu.json",
            "r") as jf:
        correlations_exp = json.load(jf)

    # Make table of results
    table = Table("Correlations: production mode signal strength")
    table.description = "Observed and expected correlations between the parameters in the production mode signal strength fit."
    table.location = "Results from additional material"
    table.keywords["reactions"] = ["P P --> H ( --> GAMMA GAMMA ) X"]

    pois_x = Variable("Parameter (x)", is_independent=True, is_binned=False)
    pois_y = Variable("Parameter (y)", is_independent=True, is_binned=False)
    c = Variable("Observed correlation", is_independent=False, is_binned=False)
    c.add_qualifier("SQRT(S)", 13, "TeV")
    c.add_qualifier("MH", '125.38', "GeV")
    c_exp = Variable("Expected correlation",
                     is_independent=False,
                     is_binned=False)
    c_exp.add_qualifier("SQRT(S)", 13, "TeV")
    c_exp.add_qualifier("MH", '125.38', "GeV")

    poiNames_x = []
    poiNames_y = []
    corr = []
    corr_exp = []
    for ipoi in params:
        for jpoi in params:
            poiNames_x.append(str(Translate(ipoi, translatePOIs)))
            poiNames_y.append(str(Translate(jpoi, translatePOIs)))
            # Extract correlation coefficient
            corr.append(correlations["%s__%s" % (ipoi, jpoi)])
            corr_exp.append(correlations_exp["%s__%s" % (ipoi, jpoi)])
    pois_x.values = poiNames_x
    pois_y.values = poiNames_y
    c.values = np.round(np.array(corr), 3)
    c_exp.values = np.round(np.array(corr_exp), 3)

    # Add variables to table
    table.add_variable(pois_x)
    table.add_variable(pois_y)
    table.add_variable(c)
    table.add_variable(c_exp)

    # Add figure
    table.add_image(
        "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/perproc_mu_corr.pdf"
    )

    return table
def make_table():

    xparam = 'kappa_V'
    yparam = 'kappa_F'

    # Load results + xsbr data
    inputMode = "kappas"
    translatePOIs = LoadTranslations("translate/pois_%s.json" % inputMode)

    # Extract observed results
    fobs = "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/flashggFinalFit/Combine/runFits_UL_redo_kVkF/output_scan2D_syst_fixedMH_v2_obs_kappa_V_vs_kappa_F.root"
    f_in = ROOT.TFile(fobs)
    t_in = f_in.Get("limit")
    xvals, yvals, deltaNLL = [], [], []
    ev_idx = 0
    for ev in t_in:
        xvals.append(getattr(ev, xparam))
        yvals.append(getattr(ev, yparam))
        deltaNLL.append(getattr(ev, "deltaNLL"))

    # Convert to numpy arrays as required for interpolation
    x = np.asarray(xvals)
    y = np.asarray(yvals)
    dnll = np.asarray(deltaNLL)
    v = 2 * (deltaNLL - np.min(deltaNLL))

    # Make table of results
    table = Table("Kappas 2D: vector boson and fermion")
    table.description = "Observed likelihood surface."
    table.location = "Results from Figure 22"
    table.keywords["reactions"] = ["P P --> H ( --> GAMMA GAMMA ) X"]

    pois_x = Variable(str(Translate(xparam, translatePOIs)),
                      is_independent=True,
                      is_binned=False)
    pois_y = Variable(str(Translate(yparam, translatePOIs)),
                      is_independent=True,
                      is_binned=False)
    q = Variable("Observed -2$\\Delta$NLL",
                 is_independent=False,
                 is_binned=False)
    q.add_qualifier("SQRT(S)", 13, "TeV")
    q.add_qualifier("MH", '125.38', "GeV")

    pois_x.values = x
    pois_y.values = y
    q.values = np.round(np.array(v), 2)

    # Add variables to table
    table.add_variable(pois_x)
    table.add_variable(pois_y)
    table.add_variable(q)

    # Add figure
    table.add_image(
        "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/scan2D_syst_obs_kappa_V_vs_kappa_F.pdf"
    )

    return table
Beispiel #4
0
    def test_add_image(self):
        """Get test PDF"""
        # Get test PDF
        some_pdf = "%s/minimal.pdf" % os.path.dirname(__file__)

        test_table = Table("Some Table")

        # This should work fine
        try:
            try:
                test_table.add_image(some_pdf)
            except RuntimeError:
                self.fail("Table.add_image raised an unexpected RuntimeError.")
        except TypeError:
            self.fail("Table.add_image raised an unexpected TypeError.")

        # Try wrong argument types
        wrong_type = [None, 5, {}, []]
        for argument in wrong_type:
            with self.assertRaises(TypeError):
                test_table.add_image(argument)

        # Try non-existing paths:
        nonexisting = ["/a/b/c/d/e", "./daskjl/aksj/asdasd.pdf"]
        for argument in nonexisting:
            with self.assertRaises(RuntimeError):
                test_table.add_image(argument)
Beispiel #5
0
    def test_write_images_multiple_executions(self):
        """
        write_images is supposed to only recreate output PNG
        files if the output file does not yet exist or is outdated
        relative to the input file.
        """

        test_table = Table("Some Table")
        some_pdf = "%s/minimal.pdf" % os.path.dirname(__file__)
        test_table.add_image(some_pdf)
        testdir = "test_output"
        self.addCleanup(shutil.rmtree, testdir)

        expected_main_file = os.path.join(testdir, "minimal.png")
        expected_thumbnail_file = os.path.join(testdir, "thumb_minimal.png")

        # Output files should not yet exist
        self.assertTrue(not os.path.exists(expected_main_file))
        self.assertTrue(not os.path.exists(expected_thumbnail_file))

        # Run the function
        test_table.write_images(testdir)

        # Output files now exist
        self.assertTrue(os.path.exists(expected_main_file))
        self.assertTrue(os.path.exists(expected_thumbnail_file))

        # Make sure that output is not recreated if input file is unchanged
        modified_time_main = os.path.getmtime(expected_main_file)
        modified_time_thumbnail = os.path.getmtime(expected_thumbnail_file)
        test_table.write_images(testdir)
        self.assertEqual(modified_time_main, os.path.getmtime(expected_main_file))
        self.assertEqual(modified_time_thumbnail, os.path.getmtime(expected_thumbnail_file))


        # Make sure that a change in input file triggers recreation
        os.utime(some_pdf, None)
        test_table.write_images(testdir)
        self.assertTrue(modified_time_main < os.path.getmtime(expected_main_file))
        self.assertTrue(modified_time_thumbnail < os.path.getmtime(expected_thumbnail_file))
def Plot_pp_pPb_Avg_FF_and_Ratio(Comb_Dict):
    
    label_size=22
    axis_size=34
    plot_power = False
    Colors = ["red","blue"]
    Markers = ["s","o"]
    fig = plt.figure(figsize=(8,8))
    pp_sys_Error = 0
    p_Pb_sys_Error = 0
    fig.add_axes((0.1,0.3,0.88,0.6))
    for SYS,sys_col,marker in zip(reversed(Systems),reversed(Colors),reversed(Markers)):

        #Systematics
        Efficiency_Uncertainty = 0.056*Comb_Dict["%s_Combined_FF"%(SYS)]
        
        Eta_Cor = Eta_Correction #see default_value.py for value
        Eta_Cor_Uncertainty = Eta_Correction_Uncertainty*Comb_Dict["%s_Combined_FF"%(SYS)]
        if not(Apply_Eta_Correction and SYS=="p-Pb"):
            Eta_Cor_Uncertainty = 0  #2% otherwise
        
        FF_Central = Comb_Dict["%s_Combined_FF"%(SYS)] #Eta Correction is applied when creating Dictionary!
        Sys_Uncertainty = np.sqrt(Efficiency_Uncertainty**2 + Comb_Dict["%s_purity_Uncertainty"%(SYS)]**2 + Eta_Cor_Uncertainty**2)
        
        if (SYS=="pp"):
            pp_sys_Error = Sys_Uncertainty
        elif (SYS=="p-Pb"):
            p_Pb_sys_Error=Sys_Uncertainty
        #Plots
        if (SYS=="pp"):
            leg_string = SYS
        if (SYS=="p-Pb"):
            leg_string = "p$-$Pb"
        plt.errorbar(zT_centers[:NzT-ZT_OFF_PLOT], Comb_Dict["%s_Combined_FF"%(SYS)][:NzT-ZT_OFF_PLOT],xerr=zT_widths[:NzT-ZT_OFF_PLOT]*0,
        yerr=Comb_Dict["%s_Combined_FF_Errors"%(SYS)][:NzT-ZT_OFF_PLOT],linewidth=1, fmt=marker,color=sys_col,capsize=0)#for lines

        plt.plot(zT_centers[:NzT-ZT_OFF_PLOT], Comb_Dict["%s_Combined_FF"%(SYS)][:NzT-ZT_OFF_PLOT],marker,linewidth=0,color=sys_col,
        label=leg_string)#for legend without lines
        
        if (SYS == "pp"):
            Sys_Plot_pp = plt.bar(zT_centers[:NzT-ZT_OFF_PLOT], Sys_Uncertainty[:NzT-ZT_OFF_PLOT]+Sys_Uncertainty[:NzT-ZT_OFF_PLOT],
            bottom=Comb_Dict["%s_Combined_FF"%(SYS)][:NzT-ZT_OFF_PLOT]-Sys_Uncertainty[:NzT-ZT_OFF_PLOT],width=zT_widths[:NzT-ZT_OFF_PLOT]*2, align='center',color=sys_col,alpha=0.3,edgecolor=sys_col)
        else:
            Sys_Plot_pp = plt.bar(zT_centers[:NzT-ZT_OFF_PLOT], Sys_Uncertainty[:NzT-ZT_OFF_PLOT]+Sys_Uncertainty[:NzT-ZT_OFF_PLOT],
            bottom=Comb_Dict["%s_Combined_FF"%(SYS)][:NzT-ZT_OFF_PLOT]-Sys_Uncertainty[:NzT-ZT_OFF_PLOT],width=zT_widths[:NzT-ZT_OFF_PLOT]*2,align='center',color=sys_col,fill=False,edgecolor="blue")
        
        if (plot_power):
            model,p,chi2dof = Fit_FF_PowerLaw(Comb_Dict,SYS)
            plt.plot(zT_centers[:NzT-ZT_OFF_PLOT], model, sys_col,label=r"%s $\alpha = %1.2f\pm 0.1 \chi^2 = %1.2f$"%(SYS,p,chi2dof))
    
    if (Use_MC):
        plt.plot(zT_centers[:NzT-ZT_OFF_PLOT],pythia_FF,'--',color="forestgreen",label="PYTHIA 8.2 Monash")
        plt.errorbar(zT_centers[:NzT-ZT_OFF_PLOT],pythia_FF,yerr=pythia_FF_Errors,fmt='--',color="forestgreen",capsize=0) 
    
    
    plt.yscale('log')                             
    plt.ylabel(r"$\frac{1}{N_{\mathrm{\gamma}}}\frac{\mathrm{d}^3N}{\mathrm{d}z_{\mathrm{T}}\mathrm{d}|\Delta\varphi|\mathrm{d}\Delta\eta}$",fontsize=axis_size,y=0.76)
    plt.ylim(0.037,15)
    plt.yticks(fontsize=20)
    plt.xticks(fontsize=0)
    plt.xlim(0,0.65)
    plt.tick_params(which='both',direction='in',right=True,top=True,bottom=False,length=10)
    plt.tick_params(which='minor',length=5)

    #pp_sys_Error = (Comb_Dict["pp_Combined_FF"][:NzT-ZT_OFF_PLOT])*math.sqrt(Rel_pUncert["pp"]**2+0.056**2)
    #p_Pb_sys_Error = (Comb_Dict["p-Pb_Combined_FF"][:NzT-ZT_OFF_PLOT])*math.sqrt(Rel_pUncert["p-Pb"]**2+0.056**2+Eta_Cor**2)
    
    Chi2,NDF,Pval = Get_pp_pPb_List_Chi2(Comb_Dict["pp_Combined_FF"][:NzT-ZT_OFF_PLOT],
                                         Comb_Dict["pp_Combined_FF_Errors"][:NzT-ZT_OFF_PLOT],
                                         pp_sys_Error,
                                         Comb_Dict["p-Pb_Combined_FF"][:NzT-ZT_OFF_PLOT],
                                         Comb_Dict["p-Pb_Combined_FF_Errors"][:NzT-ZT_OFF_PLOT],
                                         p_Pb_sys_Error)

    leg = plt.legend(numpoints=1,frameon=True,edgecolor='white', framealpha=0.0, fontsize=label_size,handlelength=1,labelspacing=0.2,loc='lower left',bbox_to_anchor=(0.001, 0.05))


    plt.annotate(r"ALICE, $\sqrt{s_{\mathrm{_{NN}}}}=5.02$ TeV",xy=(0.115,0.008),xycoords='axes fraction', ha='left',va='bottom',fontsize=label_size)
    plt.annotate(r"%1.0f < $p_\mathrm{T}^{\gamma}$ < %1.0f GeV/$c$"%(pTbins[0],pTbins[N_pT_Bins]),xy=(0.97, 0.81), xycoords='axes fraction', ha='right', va='top', fontsize=label_size)
    plt.annotate(r"%1.1f < $p_\mathrm{T}^\mathrm{h}$ < %1.1f GeV/$c$"%(Min_Hadron_pT,Max_Hadron_pT),xy=(0.97, 0.89), xycoords='axes fraction', ha='right', va='top', fontsize=label_size)
    plt.annotate("$\chi^2/\mathrm{ndf}$ = %1.1f/%i, $p$ = %1.2f"%(Chi2*NDF,NDF,Pval), xy=(0.97, 0.97), xycoords='axes fraction', ha='right', va='top', fontsize=label_size)



#HEP FF
    Fig5 = Table("Figure 5 Top Panel")
    Fig5.description = "$\gamma^\mathrm{iso}$-tagged fragmentation function for pp (red) and p$-$Pb data (blue) at $\sqrt{s_\mathrm{NN}}$ = 5.02 TeV as measured by the ALICE detector. The boxes represent the systematic uncertainties while the vertical bars indicate the statistical uncertainties. The dashed green line corresponds to PYTHIA 8.2 Monash Tune. The $\chi^2$ test for the comparison of pp and p$-$Pb data incorporates correlations among different $z_\mathrm{T}$ intervals. A constant that was fit to the ratio is shown as grey band, with the width indicating the uncertainty on the fit."
    Fig5.location = "Data from Figure 5 Top Panel, Page 15"
    Fig5.keywords["observables"] = ["$\frac{1}{N_{\mathrm{\gamma}}}\frac{\mathrm{d}^3N}{\mathrm{d}z_{\mathrm{T}}\mathrm{d}\Delta\varphi\mathrm{d}\Delta\eta}$"]
    Fig5.add_image("./pics/LO/zT_Rebin_8_006zT06zT13fnew/Final_FFunction_and_Ratio.pdf")
    
    # x-axis: zT
    zt = Variable(r"$z_\mathrm{T}$", is_independent=True, is_binned=True, units="")
    zt.values = zT_edges
    Fig5.add_variable(zt)

    # y-axis: p-Pb Yields
    pPb_data = Variable("p$-$Pb conditional yield of associated hadrons", is_independent=False, is_binned=False, units="")
    pPb_data.values = Comb_Dict["p-Pb_Combined_FF"]
    
    pPb_sys = Uncertainty("p-Pb Systematic", is_symmetric=True)
    pPb_sys.values = p_Pb_sys_Error
    pPb_stat = Uncertainty("p-Pb Statistical", is_symmetric=True)
    pPb_stat.values = Comb_Dict["p-Pb_Combined_FF_Errors"]
    pPb_data.add_uncertainty(pPb_sys)
    pPb_data.add_uncertainty(pPb_stat)    

    # y-axis: pp Yields
    pp_data = Variable("pp conditional yield of associated hadrons", is_independent=False, is_binned=False, units="")
    pp_data.values = Comb_Dict["pp_Combined_FF"]
    
    pp_sys = Uncertainty("pp Systematic", is_symmetric=True)
    pp_sys.values = pp_sys_Error
    pp_stat = Uncertainty("pp Statistical", is_symmetric=True)
    pp_stat.values = Comb_Dict["pp_Combined_FF_Errors"]
    pp_data.add_uncertainty(pp_sys)
    pp_data.add_uncertainty(pp_stat)

    # y-axis: PYTHIA Yields
    pythia_data = Variable("PYTHIA conditional yield of associated hadrons", is_independent=False, is_binned=False, units="")
    pythia_data.values = pythia_FF
    
    pythia_stat = Uncertainty("PYTHIA Statistical", is_symmetric=True)
    pythia_stat.values = pythia_FF_Errors
    pythia_data.add_uncertainty(pythia_stat)

    #Add everything to the HEP Table
    Fig5.add_variable(pPb_data)
    Fig5.add_variable(pp_data)
    Fig5.add_variable(pythia_data)

    submission.add_table(Fig5)

    #RATIO SECOND Y_AXIS
    fig.add_axes((0.1,0.1,0.88,0.2))

    pPb_Combined = Comb_Dict["p-Pb_Combined_FF"]
    pPb_Combined_Errors = Comb_Dict["p-Pb_Combined_FF_Errors"]
    pPb_purity_Uncertainty = Comb_Dict["p-Pb_purity_Uncertainty"]
    
    pp_Combined = Comb_Dict["pp_Combined_FF"]
    pp_Combined_Errors = Comb_Dict["pp_Combined_FF_Errors"]
    pp_purity_Uncertainty = Comb_Dict["pp_purity_Uncertainty"]
    
    Ratio = pPb_Combined/pp_Combined
    Ratio_Error = np.sqrt((pPb_Combined_Errors/pPb_Combined)**2 + (pp_Combined_Errors/pp_Combined)**2)*Ratio
    Ratio_Plot = plt.errorbar(zT_centers[:NzT-ZT_OFF_PLOT], Ratio[:NzT-ZT_OFF_PLOT], yerr=Ratio_Error[:NzT-ZT_OFF_PLOT],xerr=zT_widths[:NzT-ZT_OFF_PLOT]*0, fmt='ko',capsize=0, ms=6,lw=1)
    
        #Save
    np.save("npy_files/%s_Averaged_FF_Ratio_%s.npy"%(Shower,description_string),Ratio)
    np.save("npy_files/%s_Averaged_FF_Ratio_Errors_%s.npy"%(Shower,description_string),Ratio_Error)
    
    Purity_Uncertainty = np.sqrt((pp_purity_Uncertainty/pp_Combined)**2 + (pPb_purity_Uncertainty/pPb_Combined)**2)*Ratio
    Efficiency_Uncertainty = np.ones(len(pPb_Combined))*0.056*math.sqrt(2)*Ratio 
    Eta_Cor_Uncertainty = Eta_Correction_Uncertainty/Comb_Dict["p-Pb_Combined_FF"]*Ratio
    if (CorrectedP):
        Ratio_Systematic = np.sqrt(Purity_Uncertainty**2 + Efficiency_Uncertainty**2 + Eta_Cor_Uncertainty**2)
        
    Sys_Plot = plt.bar(zT_centers[:NzT-ZT_OFF_PLOT], Ratio_Systematic[:NzT-ZT_OFF_PLOT]+Ratio_Systematic[:NzT-ZT_OFF_PLOT],
            bottom=Ratio[:NzT-ZT_OFF_PLOT]-Ratio_Systematic[:NzT-ZT_OFF_PLOT], width=zT_widths[:NzT-ZT_OFF_PLOT]*2, align='center',color='black',alpha=0.25)
    
    ### ROOT LINEAR and CONSTANT FITS ###
    Ratio_TGraph = TGraphErrors()
    for izt in range (len(Ratio)-ZT_OFF_PLOT):
        Ratio_TGraph.SetPoint(izt,zT_centers[izt],Ratio[izt])
        Ratio_TGraph.SetPointError(izt,0,Ratio_Error[izt])

    Ratio_TGraph.Fit("pol0","S")
    f = Ratio_TGraph.GetFunction("pol0")
    chi2_red  = f.GetChisquare()/f.GetNDF()
    pval = f.GetProb()
    p0 = f.GetParameter(0)
    p0e = f.GetParError(0)
    p0col = "grey"
    Show_Fits = True
    if (Show_Fits):
        sys_const = 0.19 #23% relative from purity + tracking
        #sys_const = 0.504245 #IRC
        plt.annotate("$c = {0:.2f} \pm {1:.2f} \pm {2:.2f}$".format(p0,p0e,sys_const), xy=(0.98, 0.9), xycoords='axes fraction', ha='right', va='top', color="black",fontsize=label_size,alpha=.9)
        plt.annotate(r"$p = %1.2f$"%(pval), xy=(0.98, 0.75), xycoords='axes fraction', ha='right', va='top', color="black",fontsize=label_size,alpha=.9)

        c_error = math.sqrt(p0e**2 + sys_const**2)
        plt.fill_between(np.arange(0,1.1,0.1), p0+c_error, p0-c_error,color=p0col,alpha=.3)
    
    ###LABELS/AXES###
    plt.axhline(y=1, color='k', linestyle='--')
    
    plt.xlabel("${z_\mathrm{T}} = p_\mathrm{T}^{\mathrm{h}}/p_\mathrm{T}^\gamma$",fontsize=axis_size-8,x=0.9)
    plt.ylabel(r"$\frac{\mathrm{p-Pb}}{\mathrm{pp}}$",fontsize=axis_size,y=0.5)
    plt.ylim((-0.0, 2.8))
    plt.xticks(fontsize=20)
    plt.yticks([0.5,1.0,1.5,2.0,2.5],fontsize=20)
    plt.xlim(0,0.65)
    plt.tick_params(which='both',direction='in',right=True,bottom=True,top=True,length=10)
    plt.tick_params(which='both',direction='in',top=True,length=5)

    plt.savefig("pics/%s/%s/Final_FFunction_and_Ratio.pdf"%(Shower,description_string), bbox_inches = "tight")
    plt.show()

#RATIO HEP
    FigRatio = Table("Figure 5 Bottom Panel")
    FigRatio.description = r"$\gamma^\mathrm{iso}$-tagged fragmentation function for pp (red) and p$-$Pb data (blue) at $\sqrt{s_\mathrm{NN}}$ = 5.02 TeV as measured by the ALICE detector. The boxes represent the systematic uncertainties while the vertical bars indicate the statistical uncertainties. The dashed green line corresponds to PYTHIA 8.2 Monash Tune. The $\chi^2$ test for the comparison of pp and p$-$Pb data incorporates correlations among different $z_\mathrm{T}$ intervals. A constant that was fit to the ratio is shown as grey band, with the width indicating the uncertainty on the fit."
    FigRatio.location = "Data from Figure 5, Bottom Panel, Page 15"
    FigRatio.keywords["observables"] = [r"$\frac{1}{N_{\mathrm{\gamma}}}\frac{\mathrm{d}^3N}{\mathrm{d}z_{\mathrm{T}}\mathrm{d}\Delta\varphi\mathrm{d}\Delta\eta}$"]
    FigRatio.add_image("./pics/LO/zT_Rebin_8_006zT06zT13fnew/Final_FFunction_and_Ratio.pdf")

    # x-axis: zT     
    zt_ratio = Variable(r"$z_\mathrm{T}$", is_independent=True, is_binned=True, units="")
    zt_ratio.values = zT_edges
    FigRatio.add_variable(zt_ratio)

    # y-axis: p-Pb Yields
    Ratio_HEP = Variable("Ratio conditional yield of associated hadrons in pp and p$-$Pb", is_independent=False, is_binned=False, units="")
    Ratio_HEP.values = Ratio
    Ratio_sys = Uncertainty("Ratio Systematic", is_symmetric=True)
    Ratio_sys.values = Ratio_Systematic
    Ratio_stat = Uncertainty("Ratio Statistical", is_symmetric=True)
    Ratio_stat.values = Ratio_Error
    Ratio_HEP.add_uncertainty(Ratio_stat)
    Ratio_HEP.add_uncertainty(Ratio_sys)
    FigRatio.add_variable(Ratio_HEP)
    submission.add_table(FigRatio)
Beispiel #7
0
def make_table():
    params = [
        'r_ggH_0J_low', 'r_ggH_0J_high', 'r_ggH_1J_low', 'r_ggH_1J_med',
        'r_ggH_1J_high', 'r_ggH_2J_low', 'r_ggH_2J_med', 'r_ggH_2J_high',
        'r_ggH_BSM_low', 'r_ggH_BSM_med', 'r_ggH_BSM_high',
        'r_qqH_low_mjj_low_pthjj', 'r_qqH_low_mjj_high_pthjj',
        'r_qqH_high_mjj_low_pthjj', 'r_qqH_high_mjj_high_pthjj', 'r_qqH_VHhad',
        'r_qqH_BSM', 'r_WH_lep_low', 'r_WH_lep_med', 'r_WH_lep_high',
        'r_ZH_lep', 'r_ttH_low', 'r_ttH_medlow', 'r_ttH_medhigh', 'r_ttH_high',
        'r_ttH_veryhigh', 'r_tH'
    ]

    # Load results + xsbr data
    inputXSBRjson = "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/flashggFinalFit/Plots/jsons/xsbr_theory_stage1p2_extended_125p38.json"
    inputExpResultsJson = '/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/flashggFinalFit/Plots/expected_UL_redo.json'
    inputObsResultsJson = '/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/flashggFinalFit/Plots/observed_UL_redo.json'
    inputMode = "stage1p2_extended"

    translatePOIs = LoadTranslations("translate/pois_%s.json" % inputMode)
    with open(inputXSBRjson, "r") as jsonfile:
        xsbr_theory = json.load(jsonfile)
    observed = CopyDataFromJsonFile(inputObsResultsJson, inputMode, params)
    expected = CopyDataFromJsonFile(inputExpResultsJson, inputMode, params)
    mh = float(re.sub("p", ".",
                      inputXSBRjson.split("_")[-1].split(".json")[0]))

    # Make table of results
    table = Table("STXS stage 1.2 minimal merging scheme")
    table.description = "Results of the minimal merging scheme STXS fit. The best fit cross sections are shown together with the respective 68% C.L. intervals. The uncertainty is decomposed into the systematic and statistical components. The expected uncertainties on the fitted parameters are given in brackets. Also listed are the SM predictions for the cross sections and the theoretical uncertainty in those predictions."
    table.location = "Results from Figure 20 and Table 13"
    table.keywords["reactions"] = ["P P --> H ( --> GAMMA GAMMA ) X"]

    pois = Variable("STXS region", is_independent=True, is_binned=False)
    poiNames = []
    for poi in params:
        poiNames.append(str(Translate(poi, translatePOIs)))
    pois.values = poiNames

    # Dependent variables

    # SM predict
    xsbr_sm = Variable("SM predicted cross section times branching ratio",
                       is_independent=False,
                       is_binned=False,
                       units='fb')
    xsbr_sm.add_qualifier("SQRT(S)", 13, "TeV")
    xsbr_sm.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    xsbr_sm.add_qualifier("MH", '125.38', "GeV")
    theory = Uncertainty("Theory", is_symmetric=False)
    xsbr_vals = []
    xsbr_hi_th, xsbr_lo_th = [], []
    for poi in params:
        xsbr_vals.append(xsbr_theory[poi]['nominal'])
        xsbr_hi_th.append(xsbr_theory[poi]['High01Sigma'])
        xsbr_lo_th.append(-1 * abs(xsbr_theory[poi]['Low01Sigma']))
    xsbr_sm.values = np.round(np.array(xsbr_vals), 3)
    theory.values = zip(np.round(np.array(xsbr_lo_th), 3),
                        np.round(np.array(xsbr_hi_th), 3))
    xsbr_sm.add_uncertainty(theory)

    # Observed cross section
    xsbr = Variable("Observed cross section times branching ratio",
                    is_independent=False,
                    is_binned=False,
                    units='fb')
    xsbr.add_qualifier("SQRT(S)", 13, "TeV")
    xsbr.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    xsbr.add_qualifier("MH", '125.38', "GeV")
    # Add uncertainties
    tot = Uncertainty("Total", is_symmetric=False)
    stat = Uncertainty("Stat only", is_symmetric=False)
    syst = Uncertainty("Syst", is_symmetric=False)

    xsbr_vals = []
    xsbr_hi_tot, xsbr_lo_tot = [], []
    xsbr_hi_stat, xsbr_lo_stat = [], []
    xsbr_hi_syst, xsbr_lo_syst = [], []
    for poi in params:
        xsbr_vals.append(xsbr_theory[poi]['nominal'] * observed[poi]['Val'])
        xsbr_hi_tot.append(
            abs(xsbr_theory[poi]['nominal'] * observed[poi]['ErrorHi']))
        xsbr_lo_tot.append(
            -1 * abs(xsbr_theory[poi]['nominal'] * observed[poi]['ErrorLo']))
        xsbr_hi_stat.append(
            abs(xsbr_theory[poi]['nominal'] * observed[poi]['StatHi']))
        xsbr_lo_stat.append(
            -1 * abs(xsbr_theory[poi]['nominal'] * observed[poi]['StatLo']))
        xsbr_hi_syst.append(
            abs(xsbr_theory[poi]['nominal'] * observed[poi]['SystHi']))
        xsbr_lo_syst.append(
            -1 * abs(xsbr_theory[poi]['nominal'] * observed[poi]['SystLo']))

    tot.values = zip(np.round(np.array(xsbr_lo_tot), 3),
                     np.round(np.array(xsbr_hi_tot), 3))
    stat.values = zip(np.round(np.array(xsbr_lo_stat), 3),
                      np.round(np.array(xsbr_hi_stat), 3))
    syst.values = zip(np.round(np.array(xsbr_lo_syst), 3),
                      np.round(np.array(xsbr_hi_syst), 3))

    xsbr.values = np.round(np.array(xsbr_vals), 3)
    xsbr.add_uncertainty(tot)
    xsbr.add_uncertainty(stat)
    xsbr.add_uncertainty(syst)

    # Observed ratio to SM
    xsbrr = Variable("Observed ratio to SM",
                     is_independent=False,
                     is_binned=False,
                     units='')
    xsbrr.add_qualifier("SQRT(S)", 13, "TeV")
    xsbrr.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    xsbrr.add_qualifier("MH", '125.38', "GeV")
    # Add uncertainties
    totr = Uncertainty("Total", is_symmetric=False)
    statr = Uncertainty("Stat only", is_symmetric=False)
    systr = Uncertainty("Syst", is_symmetric=False)

    xsbr_vals = []
    xsbr_hi_tot, xsbr_lo_tot = [], []
    xsbr_hi_stat, xsbr_lo_stat = [], []
    xsbr_hi_syst, xsbr_lo_syst = [], []
    for poi in params:
        xsbr_vals.append(observed[poi]['Val'])
        xsbr_hi_tot.append(abs(observed[poi]['ErrorHi']))
        xsbr_lo_tot.append(-1 * abs(observed[poi]['ErrorLo']))
        xsbr_hi_stat.append(abs(observed[poi]['StatHi']))
        xsbr_lo_stat.append(-1 * abs(observed[poi]['StatLo']))
        xsbr_hi_syst.append(abs(observed[poi]['SystHi']))
        xsbr_lo_syst.append(-1 * abs(observed[poi]['SystLo']))

    totr.values = zip(np.round(np.array(xsbr_lo_tot), 3),
                      np.round(np.array(xsbr_hi_tot), 3))
    statr.values = zip(np.round(np.array(xsbr_lo_stat), 3),
                       np.round(np.array(xsbr_hi_stat), 3))
    systr.values = zip(np.round(np.array(xsbr_lo_syst), 3),
                       np.round(np.array(xsbr_hi_syst), 3))

    xsbrr.values = np.round(np.array(xsbr_vals), 3)
    xsbrr.add_uncertainty(totr)
    xsbrr.add_uncertainty(statr)
    xsbrr.add_uncertainty(systr)

    # Expected cross section
    xsbr_exp = Variable("Expected cross section times branching ratio",
                        is_independent=False,
                        is_binned=False,
                        units='fb')
    xsbr_exp.add_qualifier("SQRT(S)", 13, "TeV")
    xsbr_exp.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    xsbr_exp.add_qualifier("MH", '125.38', "GeV")
    # Add uncertainties
    tot_exp = Uncertainty("Total", is_symmetric=False)
    stat_exp = Uncertainty("Stat only", is_symmetric=False)
    syst_exp = Uncertainty("Syst", is_symmetric=False)

    xsbr_vals = []
    xsbr_hi_tot, xsbr_lo_tot = [], []
    xsbr_hi_stat, xsbr_lo_stat = [], []
    xsbr_hi_syst, xsbr_lo_syst = [], []
    for poi in params:
        xsbr_vals.append(xsbr_theory[poi]['nominal'])
        xsbr_hi_tot.append(
            abs(xsbr_theory[poi]['nominal'] * expected[poi]['ErrorHi']))
        xsbr_lo_tot.append(
            -1 * abs(xsbr_theory[poi]['nominal'] * expected[poi]['ErrorLo']))
        xsbr_hi_stat.append(
            abs(xsbr_theory[poi]['nominal'] * expected[poi]['StatHi']))
        xsbr_lo_stat.append(
            -1 * abs(xsbr_theory[poi]['nominal'] * expected[poi]['StatLo']))
        xsbr_hi_syst.append(
            abs(xsbr_theory[poi]['nominal'] * expected[poi]['SystHi']))
        xsbr_lo_syst.append(
            -1 * abs(xsbr_theory[poi]['nominal'] * expected[poi]['SystLo']))

    tot_exp.values = zip(np.round(np.array(xsbr_lo_tot), 3),
                         np.round(np.array(xsbr_hi_tot), 3))
    stat_exp.values = zip(np.round(np.array(xsbr_lo_stat), 3),
                          np.round(np.array(xsbr_hi_stat), 3))
    syst_exp.values = zip(np.round(np.array(xsbr_lo_syst), 3),
                          np.round(np.array(xsbr_hi_syst), 3))

    xsbr_exp.values = np.round(np.array(xsbr_vals), 3)
    xsbr_exp.add_uncertainty(tot_exp)
    xsbr_exp.add_uncertainty(stat_exp)
    xsbr_exp.add_uncertainty(syst_exp)

    # Expected ratio to SM
    xsbrr_exp = Variable("Expected ratio to SM",
                         is_independent=False,
                         is_binned=False,
                         units='')
    xsbrr_exp.add_qualifier("SQRT(S)", 13, "TeV")
    xsbrr_exp.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    xsbrr_exp.add_qualifier("MH", '125.38', "GeV")
    # Add uncertainties
    totr_exp = Uncertainty("Total", is_symmetric=False)
    statr_exp = Uncertainty("Stat only", is_symmetric=False)
    systr_exp = Uncertainty("Syst", is_symmetric=False)

    xsbr_vals = []
    xsbr_hi_tot, xsbr_lo_tot = [], []
    xsbr_hi_stat, xsbr_lo_stat = [], []
    xsbr_hi_syst, xsbr_lo_syst = [], []
    for poi in params:
        xsbr_vals.append(1.00)
        xsbr_hi_tot.append(abs(expected[poi]['ErrorHi']))
        xsbr_lo_tot.append(-1 * abs(expected[poi]['ErrorLo']))
        xsbr_hi_stat.append(abs(expected[poi]['StatHi']))
        xsbr_lo_stat.append(-1 * abs(expected[poi]['StatLo']))
        xsbr_hi_syst.append(abs(expected[poi]['SystHi']))
        xsbr_lo_syst.append(-1 * abs(expected[poi]['SystLo']))

    totr_exp.values = zip(np.round(np.array(xsbr_lo_tot), 3),
                          np.round(np.array(xsbr_hi_tot), 3))
    statr_exp.values = zip(np.round(np.array(xsbr_lo_stat), 3),
                           np.round(np.array(xsbr_hi_stat), 3))
    systr_exp.values = zip(np.round(np.array(xsbr_lo_syst), 3),
                           np.round(np.array(xsbr_hi_syst), 3))

    xsbrr_exp.values = np.round(np.array(xsbr_vals), 3)
    xsbrr_exp.add_uncertainty(totr_exp)
    xsbrr_exp.add_uncertainty(statr_exp)
    xsbrr_exp.add_uncertainty(systr_exp)

    # Add variables to table
    table.add_variable(pois)
    table.add_variable(xsbr_sm)
    table.add_variable(xsbr)
    table.add_variable(xsbrr)
    table.add_variable(xsbr_exp)
    table.add_variable(xsbrr_exp)

    # Add figure
    table.add_image(
        "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/stxs_dist_stage1p2_minimal.pdf"
    )

    return table
Beispiel #8
0
fig2_ul_Mjets = Variable("Misid. jets",
                         is_independent=False,
                         is_binned=False,
                         units="Events per bin")
fig2_ul_Mjets.values = fig2_ul_in[:, 9]
fig2_ul_Mjets_stat = Uncertainty("stat", is_symmetric=True)
fig2_ul_Mjets_stat.values = fig2_ul_in[:, 10]
fig2_ul_Mjets.add_uncertainty(fig2_ul_Mjets_stat)

fig2_ul.add_variable(fig2_ul_pt)
fig2_ul.add_variable(fig2_ul_Data)
fig2_ul.add_variable(fig2_ul_Wgg)
fig2_ul.add_variable(fig2_ul_Mele)
fig2_ul.add_variable(fig2_ul_Others)
fig2_ul.add_variable(fig2_ul_Mjets)
fig2_ul.add_image("input/Figure_002-a.pdf")

submission.add_table(fig2_ul)

#FIGURE 2 UPPER RIGHT
fig2_ur = Table("Figure 2 (upper right)")
fig2_ur.description = "Distribution of the transverse momentum of the diphoton system for the $\mathrm{W}\gamma\gamma$ muon channel. The predicted yields are shown with their pre-fit normalisations. The observed data, the expected signal contribution and the background estimates are presented with error bars showing the corresponding statistical uncertainties."
fig2_ur.location = "Data from Figure 2 on Page 6 of the preprint"
fig2_ur.keywords["observables"] = ["Diphoton pT"]
fig2_ur.keywords["reactions"] = [
    "P P --> W GAMMA GAMMA --> MUON NU GAMMA GAMMA"
]

fig2_ur_in = np.loadtxt("input/fig2_ur.txt", skiprows=1)

#diphoton pT
Beispiel #9
0
    "$\pm$%.3f" % (datassm[4][3]),
    "$\pm$%.3f" % (datassm[5][3]),
    "$\pm$%.3f" % (datassm[6][3]),
    "$\pm$%.3f" % (datassm[7][3]),
    "$\pm$%.3f" % (datassm[8][3])
]
syst.add_qualifier("SQRT(S)", "13", "TeV")
syst.add_qualifier("LUMINOSITY", "137", "fb$^{-1}$")

tabssm.add_variable(ssmType)
tabssm.add_variable(ssm)
tabssm.add_variable(tot)
tabssm.add_variable(stat)
tabssm.add_variable(syst)

tabssm.add_image("../figures/summaryResult.png")
#tablessm.keywords()

tabssm.keywords["reactions"] = [
    "P P --> TOP TOPBAR X", "P P --> TOP TOPBAR GAMMA"
]
tabssm.keywords["cmenergies"] = [13000.0]
tabssm.keywords["observables"] = ["SIG/SIG"]
tabssm.keywords["phrases"] = [
    "Top", "Quark", "Photon", "lepton+jets", "semileptonic", "Cross Section",
    "Proton-Proton Scattering", "Inclusive", "Differential"
]
submission.add_table(tabssm)

###
### Fig 9a
BulkG.add_qualifier("Efficiency times acceptance", "Bulk graviton --> WW")
BulkG.add_qualifier("SQRT(S)", 13, "TeV")

Wprime = Variable("Efficiency times acceptance",
                  is_independent=False,
                  is_binned=False,
                  units="")
Wprime.values = data[:, 2]
Wprime.add_qualifier("Efficiency times acceptance", "Wprime --> WZ")
Wprime.add_qualifier("SQRT(S)", 13, "TeV")

table.add_variable(d)
table.add_variable(BulkG)
table.add_variable(Wprime)

table.add_image("hepdata_lib/examples/example_inputs/signalEffVsMass.pdf")

submission.add_table(table)

for table in submission.tables:
    table.keywords["cmenergies"] = [13000]

### Histogram
from hepdata_lib import Table
table2 = Table("Figure 4a")
table2.description = "Distribution in the reconstructed B quark mass, after applying all selections to events with no forward jet, compared to the background distributions estimated before fitting. The plot refers to the low-mass mB analysis. The expectations for signal MC events are given by the blue histogram lines. Different contributions to background are indicated by the colour-filled histograms. The grey-hatched error band shows total uncertainties in the background expectation. The ratio of observations to background expectations is given in the lower panel, together with the total uncertainties prior to fitting, indicated by the grey-hatched band."
table2.location = "Data from Figure 4 (upper left), located on page 12."
table2.keywords["observables"] = ["N"]
table2.add_image(
    "hepdata_lib/examples/example_inputs/CMS-B2G-17-009_Figure_004-a.pdf")
def make_table():
    params = [
        'r_ggH_0J_low', 'r_ggH_0J_high', 'r_ggH_1J_low', 'r_ggH_1J_med',
        'r_ggH_1J_high', 'r_ggH_2J_low', 'r_ggH_2J_med', 'r_ggH_2J_high',
        'r_ggH_VBFlike', 'r_ggH_BSM', 'r_qqH_VBFlike', 'r_qqH_VHhad',
        'r_qqH_BSM', 'r_WH_lep', 'r_ZH_lep', 'r_ttH', 'r_tH'
    ]

    # Load results + xsbr data
    inputMode = "stage1p2_maximal"
    translatePOIs = LoadTranslations("translate/pois_%s.json" % inputMode)
    with open(
            "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/correlations_stage1p2_maximal.json",
            "r") as jf:
        correlations = json.load(jf)
    with open(
            "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/correlations_expected_stage1p2_maximal.json",
            "r") as jf:
        correlations_exp = json.load(jf)

    # Make table of results
    table = Table("Correlations: STXS stage 1.2 maximal merging scheme")
    table.description = "Observed and expected correlations between the parameters in the STXS stage 1.2 maximal merging fit."
    table.location = "Results from Figure 19"
    table.keywords["reactions"] = ["P P --> H ( --> GAMMA GAMMA ) X"]

    pois_x = Variable("STXS region (x)", is_independent=True, is_binned=False)
    pois_y = Variable("STXS region (y)", is_independent=True, is_binned=False)
    c = Variable("Observed correlation", is_independent=False, is_binned=False)
    c.add_qualifier("SQRT(S)", 13, "TeV")
    c.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    c.add_qualifier("MH", '125.38', "GeV")

    c_exp = Variable("Expected correlation",
                     is_independent=False,
                     is_binned=False)
    c_exp.add_qualifier("SQRT(S)", 13, "TeV")
    c_exp.add_qualifier("ABS(YRAP(HIGGS))", '<2.5')
    c_exp.add_qualifier("MH", '125.38', "GeV")

    poiNames_x = []
    poiNames_y = []
    corr = []
    corr_exp = []
    for ipoi in params:
        for jpoi in params:
            poiNames_x.append(str(Translate(ipoi, translatePOIs)))
            poiNames_y.append(str(Translate(jpoi, translatePOIs)))
            # Extract correlation coefficient
            corr.append(correlations["%s__%s" % (ipoi, jpoi)])
            corr_exp.append(correlations_exp["%s__%s" % (ipoi, jpoi)])
    pois_x.values = poiNames_x
    pois_y.values = poiNames_y
    c.values = np.round(np.array(corr), 3)
    c_exp.values = np.round(np.array(corr_exp), 3)

    # Add variables to table
    table.add_variable(pois_x)
    table.add_variable(pois_y)
    table.add_variable(c)
    table.add_variable(c_exp)

    # Add figure
    table.add_image(
        "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/corrMatrix_stage1p2_maximal.pdf"
    )

    return table
def make_table():
    params = ['r_ggH', 'r_VBF', 'r_VH', 'r_top', 'r_inclusive']

    # Load results + xsbr data
    inputExpResultsJson = '/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/flashggFinalFit/Plots/expected_UL_redo.json'
    inputObsResultsJson = '/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/flashggFinalFit/Plots/observed_UL_redo.json'
    inputMode = "mu"

    translatePOIs = LoadTranslations("translate/pois_%s.json" % inputMode)
    observed = CopyDataFromJsonFile(inputObsResultsJson, inputMode, params)
    expected = CopyDataFromJsonFile(inputExpResultsJson, inputMode, params)

    # Make table of results
    table = Table("Signal strengths")
    table.description = "Best-fit values and 68% confidence intervals for the signal strength modifiers. The uncertainty is decomposed ino the theoretical systematic, experimental systematic and statistical components. Additionally, the expected uncertainties derived using an asimov dataset are provided."
    table.location = "Results from Figure 16"
    table.keywords["reactions"] = ["P P --> H ( --> GAMMA GAMMA ) X"]

    pois = Variable("Parameter", is_independent=True, is_binned=False)
    poiNames = []
    for poi in params:
        poiNames.append(str(Translate(poi, translatePOIs)))
    pois.values = poiNames

    # Dependent variables

    # Observed values
    obs = Variable("Observed", is_independent=False, is_binned=False, units='')
    obs.add_qualifier("SQRT(S)", 13, "TeV")
    obs.add_qualifier("MH", '125.38', "GeV")
    # Add uncertainties
    tot = Uncertainty("Total", is_symmetric=False)
    th = Uncertainty("Th. syst", is_symmetric=False)
    exp = Uncertainty("Exp. syst", is_symmetric=False)
    stat = Uncertainty("Stat only", is_symmetric=False)

    vals = []
    hi_tot, lo_tot = [], []
    hi_th, lo_th = [], []
    hi_exp, lo_exp = [], []
    hi_stat, lo_stat = [], []
    for poi in params:
        vals.append(observed[poi]['Val'])
        hi_tot.append(abs(observed[poi]['ErrorHi']))
        lo_tot.append(-1 * abs(observed[poi]['ErrorLo']))
        hi_th.append(abs(observed[poi]['TheoryHi']))
        lo_th.append(-1 * abs(observed[poi]['TheoryLo']))
        hi_exp.append(abs(observed[poi]['SystHi']))
        lo_exp.append(-1 * abs(observed[poi]['SystLo']))
        hi_stat.append(abs(observed[poi]['StatHi']))
        lo_stat.append(-1 * abs(observed[poi]['StatLo']))

    tot.values = zip(np.round(np.array(lo_tot), 3),
                     np.round(np.array(hi_tot), 3))
    th.values = zip(np.round(np.array(lo_th), 3), np.round(np.array(hi_th), 3))
    exp.values = zip(np.round(np.array(lo_exp), 3),
                     np.round(np.array(hi_exp), 3))
    stat.values = zip(np.round(np.array(lo_stat), 3),
                      np.round(np.array(hi_stat), 3))

    obs.values = np.round(np.array(vals), 3)
    obs.add_uncertainty(tot)
    obs.add_uncertainty(th)
    obs.add_uncertainty(exp)
    obs.add_uncertainty(stat)

    # Expected values
    ex = Variable("Expected", is_independent=False, is_binned=False, units='')
    ex.add_qualifier("SQRT(S)", 13, "TeV")
    ex.add_qualifier("MH", '125.38', "GeV")
    # Add uncertainties
    etot = Uncertainty("Total", is_symmetric=False)
    eth = Uncertainty("Th. syst", is_symmetric=False)
    eexp = Uncertainty("Exp. syst", is_symmetric=False)
    estat = Uncertainty("Stat only", is_symmetric=False)

    vals = []
    hi_tot, lo_tot = [], []
    hi_th, lo_th = [], []
    hi_exp, lo_exp = [], []
    hi_stat, lo_stat = [], []
    for poi in params:
        vals.append(1.00)
        hi_tot.append(abs(expected[poi]['ErrorHi']))
        lo_tot.append(-1 * abs(expected[poi]['ErrorLo']))
        hi_th.append(abs(expected[poi]['TheoryHi']))
        lo_th.append(-1 * abs(expected[poi]['TheoryLo']))
        hi_exp.append(abs(expected[poi]['SystHi']))
        lo_exp.append(-1 * abs(expected[poi]['SystLo']))
        hi_stat.append(abs(expected[poi]['StatHi']))
        lo_stat.append(-1 * abs(expected[poi]['StatLo']))

    etot.values = zip(np.round(np.array(lo_tot), 3),
                      np.round(np.array(hi_tot), 3))
    eth.values = zip(np.round(np.array(lo_th), 3),
                     np.round(np.array(hi_th), 3))
    eexp.values = zip(np.round(np.array(lo_exp), 3),
                      np.round(np.array(hi_exp), 3))
    estat.values = zip(np.round(np.array(lo_stat), 3),
                       np.round(np.array(hi_stat), 3))

    ex.values = np.round(np.array(vals), 3)
    ex.add_uncertainty(etot)
    ex.add_uncertainty(eth)
    ex.add_uncertainty(eexp)
    ex.add_uncertainty(estat)

    # Add variables to table
    table.add_variable(pois)
    table.add_variable(obs)
    table.add_variable(ex)

    # Add figure
    table.add_image(
        "/afs/cern.ch/work/j/jlangfor/hgg/legacy/FinalFits/UL/Dec20/CMSSW_10_2_13/src/OtherScripts/HEPdata/hepdata_lib/hig-19-015/inputs/perproc_mu_coloured.pdf"
    )

    return table
Beispiel #13
0
def addLimitPlot(submission, config):
    table = Table(config["name"])
    table.description = config["description"]
    table.location = config["location"]
    table.keywords["observables"] = ["SIG"]
    table.keywords["reactions"] = ["P P --> TOP --> tt + 6j"]
    table.add_image(config["image"])

    reader = RootFileReader(config["inputData"])
    data = reader.read_limit_tree()
    stop_pair_Br = np.array([
        10.00, 4.43, 2.15, 1.11, 0.609, 0.347, 0.205, 0.125, 0.0783, 0.0500,
        0.0326, 0.0216, 0.0145, 0.00991, 0.00683, 0.00476, 0.00335, 0.00238,
        0.00170, 0.00122, 0.000887, 0.000646, 0.000473
    ])
    stop_pair_Br1SPpercent = np.array([
        6.65, 6.79, 6.99, 7.25, 7.530, 7.810, 8.120, 8.450, 8.8000, 9.1600,
        9.5300, 9.9300, 10.3300, 10.76, 11.2, 11.65, 12.12, 12.62, 13.13,
        13.66, 14.21, 14.78, 15.37
    ])
    stop_pair_unc = stop_pair_Br * stop_pair_Br1SPpercent / 100.0
    stop_pair_up = stop_pair_Br + stop_pair_unc
    stop_pair_down = stop_pair_Br - stop_pair_unc

    nData = len(data)
    for mass_id in range(0, nData):
        data[mass_id][1:] = stop_pair_Br[mass_id] * data[mass_id][1:]

    #####################################################################################
    d = Variable("Top squark mass",
                 is_independent=True,
                 is_binned=False,
                 units="GeV")
    d.values = data[:, 0]

    sig = Variable("Top squark cross section",
                   is_independent=False,
                   is_binned=False,
                   units="pb")
    sig.values = np.array(stop_pair_Br[:nData])
    sig.add_qualifier("Limit", "")
    sig.add_qualifier("SQRT(S)", 13, "TeV")
    sig.add_qualifier("LUMINOSITY", 137, "fb$^{-1}$")

    obs = Variable("Observed cross section upper limit at 95% CL",
                   is_independent=False,
                   is_binned=False,
                   units="pb")
    obs.values = data[:, 6]
    obs.add_qualifier("Limit", "Observed")
    obs.add_qualifier("SQRT(S)", 13, "TeV")
    obs.add_qualifier("LUMINOSITY", 137, "fb$^{-1}$")

    exp = Variable("Expected cross section upper limit at 95% CL",
                   is_independent=False,
                   is_binned=False,
                   units="pb")
    exp.values = data[:, 3]
    exp.add_qualifier("Limit", "Expected")
    exp.add_qualifier("SQRT(S)", 13, "TeV")
    exp.add_qualifier("LUMINOSITY", 137, "fb$^{-1}$")

    unc_sig = Uncertainty("1 s.d.", is_symmetric=False)
    unc_sig.set_values_from_intervals(zip(stop_pair_up[:nData],
                                          stop_pair_down[:nData]),
                                      nominal=sig.values)
    sig.add_uncertainty(unc_sig)

    # +/- 1 sigma
    unc_1s = Uncertainty("1 s.d.", is_symmetric=False)
    unc_1s.set_values_from_intervals(zip(data[:, 2], data[:, 4]),
                                     nominal=exp.values)
    exp.add_uncertainty(unc_1s)

    # +/- 2 sigma
    unc_2s = Uncertainty("2 s.d.", is_symmetric=False)
    unc_2s.set_values_from_intervals(zip(data[:, 1], data[:, 5]),
                                     nominal=exp.values)
    exp.add_uncertainty(unc_2s)

    table.add_variable(d)
    table.add_variable(sig)
    table.add_variable(obs)
    table.add_variable(exp)
    submission.add_table(table)