def __init__(self, csv=None, wp_key=None, eff_file=None, pattern=None):
        '''SF computation according to method 1a of 
        https://twiki.cern.ch/twiki/bin/view/CMS/BTagSFMethods
        Inputs: csv, wp_key, eff_file, pattern
        csv: path to a b-tagging CSV file
        wp_key: a tuple of three elements containing (Algo name, SF method, WP name) 
        eff_file: root file containing the efficiency histograms
        pattern: formattable string that accepts one parameter for flavour definition'''
        parsed_csv = reshuffle_sf_dict(convert_btag_csv_file(csv))
        self.sf_ = recursive_compile(
            parsed_csv[wp_key[0]][wp_key[1]][wp_key[2]])
        # FIXME: move to correlated/uncorrelated
        # Define, by hand, the proper correlation among taggers,
        # somewhere unfortunately needs to be hardcoded by hand
        # tuple of names for UDSG, B, C
        self.schema_ = {
            'central': ('UDSG_central', 'C_central', 'B_central'),
            'bc_up': ('UDSG_central', 'C_up', 'B_up'),
            'bc_down': ('UDSG_central', 'C_down', 'B_down'),
            'udgs_up': ('UDSG_up', 'C_central', 'B_central'),
            'udsg_down': ('UDSG_down', 'C_central', 'B_central'),
        }

        effs = convert_histo_root_file(eff_file)
        self.eff_ = {
            'B':
            dense_lookup(*effs[(pattern.format('bottom'), 'dense_lookup')]),
            'C':
            dense_lookup(*effs[(pattern.format('charm'), 'dense_lookup')]),
            'UDSG':
            dense_lookup(*effs[(pattern.format('light'), 'dense_lookup')]),
        }
示例#2
0
    def __init__(self):

        self.lepSFs_ = {}
        for lepton in lep_info[year].keys():
            SFfile = convert_histo_root_file(
                '%s/inputs/data/%s' %
                (proj_dir, lep_info[year][lepton]['filename']))
            for sf_type in lep_info[year][lepton]['SFs'].keys():
                if lep_info[year][lepton]['SFs'][sf_type]['available']:
                    self.lepSFs_['%s_%s' % (lepton, sf_type)] = dense_lookup(
                        *SFfile[(sf_type, 'dense_lookup')])
        print('Lepton SF constructed')
示例#3
0
def test_554():
    import uproot
    from coffea.lookup_tools.root_converters import convert_histo_root_file

    f_in = "tests/samples/PR554_SkipReadOnlyDirectory.root"
    rf = uproot.open(f_in)

    # check that input file contains uproot.ReadOnlyDirectory
    assert any(isinstance(v, uproot.ReadOnlyDirectory) for v in rf.values())
    # check that we can do the conversion now and get histograms out of uproot.ReadOnlyDirectories
    out = convert_histo_root_file(f_in)
    assert out
    # check that output does not contain any Directory-like keys
    rfkeys = set(k.rsplit(";")[0] for k in rf.keys())
    assert all(not isinstance(rf[k], uproot.ReadOnlyDirectory)
               for k, _ in out.keys() if k in rfkeys)
示例#4
0
          25]  # available widths (%)
scales = [
    "nominal", "uF_up", "uF_down", "uR_up", "uR_down", "uF_up_uR_up",
    "uF_down_uR_down"
]  # available scale
channels = {"ll": "DiLep", "lj": "SL"}
procs = {"res": "Res", "int": "Int"}
types = [
    "xsec", "xabs"
]  # <type> can be either xsec for actual cross section or xabs for the absolute cross section i.e. sum of the magnitude of positive and negative parts of the cross section

pos_evt_fraction_name = "int_mg5_pdf_325500_scale_dyn_0p5mtt_positive_event_fraction"  # <A/H>_int_mg5_pdf_325500_scale_dyn_0p5mtt_positive_event_fraction contain the fraction of positive events for each signal points
kfactors_name = "sushi_nnlo_mg5_lo_kfactor_pdf_325500"  # <A/H>_<res/int>_sushi_nnlo_mg5_lo_kfactor_pdf_325500_<scale> contain the NNLO to LO k-factors
mg5_LO_xsecs_name = "mg5_pdf_325500_scale_dyn_0p5mtt"  # <A/H>_<res/int>_mg5_pdf_325500_scale_dyn_0p5mtt_<scale>_<type>_<channel> contain the MG5 LO cross section for the relevant process and channel

fdict = convert_histo_root_file(rname)

widthTOname = lambda width: str(float(width)).replace('.', 'p')

# create dict of the fraction of positive events for each signal points
#outdict = {}
#for boson in bosons:
#    sig_dname = "_".join([boson, pos_evt_fraction_name]) # name of signal dist
#    vals = dense_lookup(*fdict[(sig_dname, "dense_lookup")])
#    errs = dense_lookup(*fdict[(f"{sig_dname}_error", "dense_lookup")])

# create dict of LO xsection values and errors
#set_trace()
LO_outdict = {}
for boson in bosons:
    for proc, proc_name in procs.items():
args = parser.parse_args()

proj_dir = os.environ['PROJECT_DIR']
base_jobid = os.environ['base_jobid']
analyzer = 'NNLOqcd_dists'

f_ext = 'TOT.coffea'
outdir = os.path.join(proj_dir, 'plots', base_jobid, analyzer)
if not os.path.isdir(outdir):
    os.makedirs(outdir)

    ## get values from NNLO root file
nnlo_fname = 'MATRIX_ttmVStheta.root'  # 'xsec_central' dist has only statistical uncs
#nnlo_fname = 'matrixhists_NNPDF.root' # 'cen' dist has only statistical uncs
#nnlo_fname = 'MATRIX_17_abs.root' # has scale and statistical uncs
nnlo_file = convert_histo_root_file(
    os.path.join(proj_dir, 'NNLO_files', nnlo_fname))
nnlo_var = 'xsec_central'
nnlo_dict = Plotter.root_converters_dict_to_hist(
    nnlo_file,
    vars=[nnlo_var],
    sparse_axes_list=[{
        'name': 'dataset',
        'label': "Event Process",
        'fill': 'nnlo'
    }],
    #dense_axes_list=[{'name': 'mtt', 'idx' : 1}, {'name' : 'ctstar', 'idx' : 0}],
    #transpose_da=True,
    dense_axes_list=[{
        'name': 'ctstar',
        'idx': 0
    }, {
                'Central': {},
                'Error': {},
            },
            'Trig': {
                'Central': {},
                'Error': {},
            },
        },
    }
    for year in years_to_run
}

for lep in leptons.keys():
    for year, fname in leptons[lep]['fnames'].items():
        sf_file = convert_histo_root_file(
            os.path.join(proj_dir, 'inputs', 'data', base_jobid, 'lepSFs',
                         fname))
        eta_binning = sf_file[(leptons[lep]['eta'], 'dense_lookup')][1][0]

        #if lep == 'Muons': set_trace()
        sf_output[year][lep]['eta_ranges'] = [
            (eta_binning[idx], eta_binning[idx + 1])
            for idx in range(len(eta_binning) - 1)
        ]
        for idx in range(len(eta_binning) - 1):
            # reco/ID SFs
            sf_output[year][lep]['Reco_ID']['Central'][
                'eta_bin%i' % idx] = dense_lookup(
                    *(sf_file[(leptons[lep]['pt']['reco_id'][idx],
                               'dense_lookup')][0],
                      sf_file[(leptons[lep]['pt']['reco_id'][idx],
mc_input_dir = os.path.join(proj_dir, 'results',
                            '%s_%s' % (args.year, base_jobid), analyzer)
f_ext = 'TOT.coffea'
mc_fnames = sorted([
    os.path.join(mc_input_dir, fname) for fname in os.listdir(mc_input_dir)
    if fname.endswith(f_ext)
])
mc_hdict = plt_tools.add_coffea_files(
    mc_fnames) if len(mc_fnames) > 1 else load(mc_fnames[0])

# get hists
mc_nTrueInt_histo = mc_hdict['PU_nTrueInt']

data_input_dir = os.path.join(proj_dir, 'inputs', 'data', base_jobid, 'Pileup')
# central
data_pu_central = convert_histo_root_file(
    os.path.join(data_input_dir, '%s_data.meta.pu.root' % args.year))
data_pu_dict = Plotter.root_converters_dict_to_hist(data_pu_central,
                                                    vars=['pileup'],
                                                    sparse_axes_list=[{
                                                        'name':
                                                        'dataset',
                                                        'label':
                                                        "Event Process",
                                                        'fill':
                                                        'data'
                                                    }])
# up
data_pu_up = convert_histo_root_file(
    os.path.join(data_input_dir, '%s_data.meta.pu_up.root' % args.year))
data_pu_up_dict = Plotter.root_converters_dict_to_hist(data_pu_up,
                                                       vars=['pileup'],