예제 #1
0
def byb_fom(hist_reco, hist_true, hist_respon):
    """ Calculate figures of merit for comparasion with bin-by-bin method.
        
        Args:
        hist_reco : (TH1D) measured distrbution to be unfolded
        hist_true : (TH1D) true distrbution
        hist_respon : (RooUnfoldResponse) response matrix (x=reco, y=true)
        
        Returns:
        unf_cen : 1D-array of central values after unfolding, i.e. unf_cen[bin]
        unf_err : 1D-array of stat. error after unfolding, i.e. unf_err[bin]
        err_a : sum of absolute biases
        err_b : sum of normalised absolute biases
        err_c : sum of biases
        err_d : total stat error after unfolding
        err_e : ratio of sumed absoluted biases and total stat error after unfolding
        err_f : sum of two terms in type e
        """

    unfbyb = ROOT.RooUnfoldBinByBin(hist_respon, hist_reco)
    unfbyb.IncludeSystematics()
    unfres = unfbyb.Hreco()
    witherror = ROOT.RooUnfold.kCovariance
    unfcov = unfbyb.Ereco(witherror)

    unf_cen, unf_err = th1_to_arr(unfres)
    mc_cen, _ = th1_to_arr(hist_true)

    byb_err_a, byb_err_b, byb_err_c, byb_err_d, byb_err_e, byb_err_f, byb_err_g = study_complex_errors(
        unf_cen, mc_cen, unfcov)

    return unf_cen, unf_err, byb_err_a, byb_err_b, byb_err_c, byb_err_d, byb_err_e, byb_err_f, byb_err_g
예제 #2
0
def bay_fom(hist_reco, hist_true, hist_respon, ntry=None):
    """ Interally run through a list iteration parameters (n=1..ntry, default ntry=nbins) and do unfolding with Bayes method for each n. Calculate figures of merit for comparasion.
        
        Args:
        hist_reco : (TH1D) measured distrbution to be unfolded
        hist_true : (TH1D) true distrbution
        hist_respon : (RooUnfoldResponse) response matrix (x=reco, y=true)
        ntry : the max iteration parameter, default is #bins
        
        Returns:
        unf_cen_all : 2D-array of central values after unfolding, i.e. unf_cen_all[n, bin]
        unf_err_all : 2D-array of stat. error after unfolding, i.e. unf_err_all[n, bin]
        err_a : sum of absolute biases
        err_b : sum of normalised absolute biases
        err_c : sum of biases
        err_d : total stat error after unfolding
        err_e : ratio of sumed absoluted biases and total stat error after unfolding
        err_f : sum of two terms in type e
        n_arr : array of tested n
        """
    nbins = hist_reco.GetNbinsX()
    if ntry is None: ntry = nbins
    unf_cen_all = np.zeros([ntry, nbins])
    unf_err_all = np.zeros([ntry, nbins])
    err_a = []
    err_b = []
    err_c = []
    err_d = []
    err_e = []
    err_f = []
    err_g = []
    n_arr = []

    for x in range(1, ntry + 1):

        unfids = ROOT.RooUnfoldBayes(hist_respon, hist_reco, x)
        unfids.IncludeSystematics()
        unfres = unfids.Hreco()
        witherror = ROOT.RooUnfold.kCovariance
        unfcov = unfids.Ereco(witherror)

        unf_cen, unf_err = th1_to_arr(unfres)
        mc_cen, _ = th1_to_arr(hist_true)

        ids_err_a, ids_err_b, ids_err_c, ids_err_d, ids_err_e, ids_err_f, ids_err_g = study_complex_errors(
            unf_cen, mc_cen, unfcov)
        err_a.append(ids_err_a)
        err_b.append(ids_err_b)
        err_c.append(ids_err_c)
        err_d.append(ids_err_d)
        err_e.append(ids_err_e)
        err_f.append(ids_err_f)
        err_g.append(ids_err_g)
        n_arr.append(x)

        for y in range(nbins):
            unf_cen_all[x - 1, y] = unf_cen[y]
            unf_err_all[x - 1, y] = unf_err[y]

    return unf_cen_all, unf_err_all, err_a, err_b, err_c, err_d, err_e, err_f, err_g, n_arr
예제 #3
0
 def unf_result_df(self):
     self.result_df['truth_central'], self.result_df[
         'truth_stat_error'] = th1_to_arr(self.hist_test_true)
     self.result_df['measured_central'], self.result_df[
         'measured_error'] = th1_to_arr(self.hist_test_measure)
     self.result_df['unfolded_central'], self.result_df[
         'unfolded_error'] = th1_to_arr(self.unfres)
예제 #4
0
def svd_fom(hist_reco, hist_true, hist_respon):
    """ Interally run through all regularisation parameters (k=2..nbins, k=0 using default nbins/2, k=1 not good to use) and do unfolding with SVD method for each k. Calculate figures of merit for comparasion.
        
        Args:
        hist_reco : (TH1D) measured distrbution to be unfolded
        hist_true : (TH1D) true distrbution
        hist_respon : (RooUnfoldResponse) response matrix (x=reco, y=true)
        
        Returns:
        unf_cen_all : 2D-array of central values after unfolding, i.e. unf_cen_all[k, bin]
        unf_err_all : 2D-array of stat. error after unfolding, i.e. unf_err_all[k, bin]
        err_a : sum of absolute biases
        err_b : sum of normalised absolute biases
        err_c : sum of biases
        err_d : total stat error after unfolding
        err_e : ratio of sumed absoluted biases and total stat error after unfolding
        err_f : sum of two terms in type e
        k_arr : array of tested k
        """

    nbins = hist_reco.GetNbinsX()
    unf_cen_all = np.zeros([nbins - 1, nbins])
    unf_err_all = np.zeros([nbins - 1, nbins])
    err_a = []
    err_b = []
    err_c = []
    err_d = []
    err_e = []
    err_f = []
    err_g = []
    k_arr = []

    for x in range(2, nbins + 1):

        unfsvd = ROOT.RooUnfoldSvd(hist_respon, hist_reco, x)
        unfsvd.IncludeSystematics()
        unfres = unfsvd.Hreco()
        witherror = ROOT.RooUnfold.kCovariance
        unfcov = unfsvd.Ereco(witherror)

        unf_cen, unf_err = th1_to_arr(unfres)
        mc_cen, _ = th1_to_arr(hist_true)

        svd_err_a, svd_err_b, svd_err_c, svd_err_d, svd_err_e, svd_err_f, svd_err_g = study_complex_errors(
            unf_cen, mc_cen, unfcov)
        err_a.append(svd_err_a)
        err_b.append(svd_err_b)
        err_c.append(svd_err_c)
        err_d.append(svd_err_d)
        err_e.append(svd_err_e)
        err_f.append(svd_err_f)
        err_g.append(svd_err_g)
        k_arr.append(x)

        for y in range(nbins):
            unf_cen_all[x - 2, y] = unf_cen[y]
            unf_err_all[x - 2, y] = unf_err[y]

    return unf_cen_all, unf_err_all, err_a, err_b, err_c, err_d, err_e, err_f, err_g, k_arr
예제 #5
0
 def check_bias(self):
     """ Define and calculate figures of merit
     Returns:
     a : sum of absolute biases
     b : sum of normalised absolute biases
     c : sum of biases
     d : total stat error after unfolding taking into account bin-to-bin correlations
     e : ratio of sumed absoluted biases and total stat error after unfolding
     f : sum of two terms in type e
     g : sum of bin-wise ratio between biases and unfolding error
     
     """
     self.bias_a, self.bias_b, self.bias_c, self.bias_d, self.bias_e, self.bias_f, self.bias_g = study_complex_errors(
         th1_to_arr(self.unfres)[0],
         th1_to_arr(self.hist_test_true)[0], self.result_cov)
예제 #6
0
            ax2.text(bin_layout[j]+txt_offset, bin_layout[i]+txt_offset, int(n_matrix[i,j]),fontsize = txt_fontsize,
                 color="gray", ha="center", va="center", fontweight="bold")
    if fname_n:
            plt.savefig(fname_n)
    plt.show()
    plt.close()

    return mig_matrix, n_matrix, mig_fig, n_fig


# compare unfolded reslut obtained in SVD and inv
@ps.WG1_decorator
def plot_unf_scan(hist_test_reco, hist_test_true,  respon_matrix, unf_cen_svd,
                     unf_err_svd, unf_cen_inv,unf_err_inv,unfold_bin, xlab, leg, case, yup=None, fname=None):
    
    mea_cen, mea_err = th1_to_arr(hist_test_reco)
    mc_cen, mc_err = th1_to_arr(hist_test_true)


    fig = plt.figure(figsize=(9.0, 5.5))
    
    plt.hist(get_bin_centers(unfold_bin), bins=unfold_bin, weights=mea_cen, color=p.p_blue,
              density=False, histtype='step', lw=1.5, label=r'Reco MC '+case+' with $\sigma^{tot}$')
    plt.bar(get_bin_centers(unfold_bin),
           height= 2*mea_err,
           width=get_bin_widths(unfold_bin),
           bottom=mea_cen - mea_err, #label='stat. uncertainty',
           alpha=0.5, color=p.p_light_blue
           )
    
    plt.bar(get_bin_centers(unfold_bin),
예제 #7
0
def do_unfold(hist_true, hist_measure, hist_respon, method=None, para=None, mea_cov='False', kcovtoy=False, mc_stat_err=0):
    """ do unfolding on a measured distribution with a response matrix.
        
        Args:
        hist_respon (RooUnfoldResponse) : response matrix (x=reco, y=true)
        hist_measure (TH1D):  measured distrbution and to be unfolded
        method  : string for unfold method: 'Ids', 'Svd', 'Bayes', 'TUnfold', 'Invert', 'BinByBin'
        para    : parameters for 'Ids', 'Svd' and 'Bayes' methods.
        mea_cov (optional) : measured covariance matrix, default is statistical covariance
        kcovtoy (optional) : flag provided by ROOUNFOLD. Default is False and the full covariance matrix 'reco_cov' propagated through unfolding. If True, the error propagation is based on toys generated internally by RooUnfold.
        mc_stat_err (optional) : relate to ROOUNFOLD::includeSystematics().  Default "0" is to leave out the effect of statistical uncertainties on the migration matrix. "1" is to include the effect. "2" is for only counting the statistical uncertainties of measured distribtuon and migration matrix. The effect is valueated by internal toys.
        
        
        Returns:
        df_unf :  dataframe including bin_index, truth, measured and unfolded result
        cov_array :  2D array, covariance matrix after unfolding
        """
    unfres = None
    unfcov = None
    
    if(kcovtoy):
        witherror = ROOT.RooUnfold.kCovToy  #  error propagation based on toys generated internally by RooUnfold
    else:
        witherror = ROOT.RooUnfold.kCovariance   #  error propagation based on full covariance matrix 'mea_cov'
    
        
    if method is None : print('Please indicate one method for unfolding: \'Ids\', \'Svd\', \'Bayes\', \'TUnfold\', \'Invert\', \'BinByBin\'.'
                              + "\n" + 'e.g. do_unfold(hist_respon, hist_measure, \'Svd\', 5)')
        
    elif method=='Ids':
        #print('Use IDS method with iteration number = '+ str(para) + '.')
        if para is None or para <0 : print('Ids method requires a iteration number (>=0).')
        elif para>=0 :
                    unf = ROOT.RooUnfoldIds(hist_respon, hist_measure, para)
                    if(incl_mig_err):
                        unf.IncludeSystematics()
                    if(mea_cov!='False'):
                        unf.SetMeasuredCov(ndarr_to_tmatrix(mea_cov))
                    unfres = unf.Hreco()
                    unfcov = unf.Ereco(witherror)


    elif method=='Svd':
        #print('Use SVD method with regularisation number = '+ str(para) + '.')
        if para is None or para <0 : print('Svd method require a regularisation number (<= nbins, 0 using default nbins/2).')
        elif para > hist_measure.GetNbinsX(): print('Svd method do not work when regularisation number > nbins.')
        elif para>= 0 & para <= hist_measure.GetNbinsX():
            unf = ROOT.RooUnfoldSvd(hist_respon, hist_measure, para)
            if(mc_stat_err > 0):
                unf.IncludeSystematics(mc_stat_err)
            if(mea_cov!='False'):
                unf.SetMeasuredCov(ndarr_to_tmatrix(mea_cov))
            unfres = unf.Hreco()
            unfcov = unf.Ereco(witherror)

    
    elif method=='Bayes':
        #print('Use iterative Bayes method with iteration number = '+ str(para) + '.')
        if para is None :
            print('Bayes method requires a iteration number (default 4).')
            para=4
        unf = ROOT.RooUnfoldBayes(hist_respon, hist_measure, para)
        if(mc_stat_err > 0):
            unf.IncludeSystematics(mc_stat_err)
        if(mea_cov!='False'):
            unf.SetMeasuredCov(ndarr_to_tmatrix(mea_cov))
        unfres = unf.Hreco()
        unfcov = unf.Ereco(witherror)

    elif method=='Invert':
        #print('Use matrix invert method.')
        if para is not None: print('Unregularised matrix inerson method does not need parameter. Input parameter was ignored.')
        unf = ROOT.RooUnfoldInvert(hist_respon, hist_measure)
        if(mc_stat_err > 0):
            unf.IncludeSystematics(mc_stat_err)
        if(mea_cov!='False'):
            unf.SetMeasuredCov(ndarr_to_tmatrix(mea_cov))
        unfres = unf.Hreco()
        unfcov = unf.Ereco(witherror)

    
    elif method=='TUnfold':
        #print('Use TUnfold method.')
        if para is not None: print('TUfold method does not need input parameter. Input parameter was ignored.')
        unf = ROOT.RooUnfoldTUnfold(hist_respon, hist_measure)
        if(mc_stat_err > 0):
            unf.IncludeSystematics(mc_stat_err)
        if(mea_cov!='False'):
            unf.SetMeasuredCov(ndarr_to_tmatrix(mea_cov))
        unfres = unf.Hreco()
        unfcov = unf.Ereco(witherror)
    
    
    elif method=='BinByBin':
        #print('Use bin-by-bin correction method.')
        if para is not None: print('Bin-by-bin correction method does not need parameter. Input parameter was ignored.')
        unf = ROOT.RooUnfoldBinByBin(hist_respon, hist_measure)
        if(mc_stat_err > 0):
            unf.IncludeSystematics(mc_stat_err)
        if(mea_cov!='False'):
            unf.SetMeasuredCov(ndarr_to_tmatrix(mea_cov))
        unfres = unf.Hreco()
        unfcov = unf.Ereco(witherror)

    else : print('Method not found! Please select one from \'Ids\', \'Svd\', \'Bayes\', \'Invert\', \'BinByBin\'.')

    # convert results to numpy format
    bins=[]
    nbins = hist_measure.GetNbinsX()
    cov_array = np.zeros((nbins, nbins))
    
    for x in range(nbins):
        for y in range(nbins):
            cov_array[x][y]= unfcov[x][y]

    df_unf = pd.DataFrame(range(0, nbins), columns=['bin_index'])
    df_unf['true_central'], df_unf['true_error'] =th1_to_arr(hist_true)
    df_unf['measured_central'], df_unf['measured_error'] = th1_to_arr(hist_measure)
    df_unf['unfolded_central'], df_unf['unfolded_error'] = th1_to_arr(unfres)

  
    return df_unf, cov_array