def test_jec_txt_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testJEC * tests/samples/Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.jec.txt",
        "* * tests/samples/Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs.jec.txt.gz",
        "* * tests/samples/Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi.junc.txt",
        "* * tests/samples/Autumn18_V8_MC_UncertaintySources_AK4PFchs.junc.txt",
        "* * tests/samples/Spring16_25nsV10_MC_SF_AK4PFPuppi.jersf.txt"
    ])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    
    jec_out = evaluator['testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi'](test_eta,test_pt)

    print(evaluator['testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi'])
    
    jec_out = evaluator['Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs'](test_eta,test_pt)
    
    print(evaluator['Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs'])

    jersf = evaluator['Spring16_25nsV10_MC_SF_AK4PFPuppi']
    
    print(evaluator['Spring16_25nsV10_MC_SF_AK4PFPuppi'])
    
    junc_out = evaluator['Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi'](test_eta,test_pt)

    print(evaluator['Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi'])
    
    assert('Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale' in evaluator.keys())
    junc_out = evaluator['Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale'](test_eta,test_pt)
    print(evaluator['Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale'])
Exemple #2
0
def test_jet_correction_regrouped_uncertainty_sources():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = []
    levels = []
    for name in dir(evaluator):
        if 'Regrouped_Fall17_17Nov2017_V32_MC_UncertaintySources_AK4PFchs' in name:
            junc_names.append(name)
            if len(name.split('_')) == 9:
                levels.append("_".join(name.split('_')[-2:]))
            else:
                levels.append(name.split('_')[-1])
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, tpl in enumerate(
            list(junc.getUncertainty(JetEta=test_eta, JetPt=test_pt))):
        assert (tpl[0] in levels)
        assert (tpl[1].shape[0] == test_eta.shape[0])
        assert (ak.all(tpl[1] == ak.flatten(juncs_jag[i][1])))
Exemple #3
0
def test_root_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"
    ])

    extractor.finalize(reduce_list=["testSF2d"])

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    # test flat eval
    test_out = evaluator["testSF2d"](test_eta, test_pt)

    # print it
    print(evaluator["testSF2d"])

    # test structured eval
    test_eta_jagged = ak.unflatten(test_eta, counts)
    test_pt_jagged = ak.unflatten(test_pt, counts)
    test_out_jagged = evaluator["testSF2d"](test_eta_jagged, test_pt_jagged)

    assert ak.all(ak.num(test_out_jagged) == counts)
    assert ak.all(ak.flatten(test_out_jagged) == test_out)

    print(test_out)

    diff = np.abs(test_out - _testSF2d_expected_output)
    print("Max diff: %.16f" % diff.max())
    print("Median diff: %.16f" % np.median(diff))
    print("Diff over threshold rate: %.1f %%" %
          (100 * (diff >= 1.0e-8).sum() / diff.size))
    assert (diff < 1.0e-8).all()
Exemple #4
0
def test_jet_resolution_sf_2d():
    from coffea.jetmet_tools import JetResolutionScaleFactor
    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    resosf = JetResolutionScaleFactor(
        **{name: evaluator[name]
           for name in ["Autumn18_V7_MC_SF_AK4PFchs"]})
    resosfs = resosf.getScaleFactor(JetPt=test_pt, JetEta=test_eta)
Exemple #5
0
def test_jet_correction_uncertainty_sources():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    junc_names = []
    levels = []
    for name in dir(evaluator):
        if 'Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi' in name:
            junc_names.append(name)
            levels.append(name.split('_')[-1])
        #test for underscore in dataera
        if 'Fall17_17Nov2017_V6_MC_UncertaintySources_AK4PFchs_AbsoluteFlavMap' in name:
            junc_names.append(name)
            levels.append(name.split('_')[-1])
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    for level, corrs in juncs:
        assert (level in levels)
        assert (corrs.shape[0] == test_eta.shape[0])
Exemple #6
0
def test_factorized_jet_corrector():
    from coffea.jetmet_tools import FactorizedJetCorrector

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_Rho = np.full_like(test_eta, 100.)
    test_A = np.full_like(test_eta, 5.)

    jec_names = [
        'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi'
    ]
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names})

    print(corrector)

    pt_copy = np.copy(test_pt)

    corrs = corrector.getCorrection(JetEta=test_eta,
                                    Rho=test_Rho,
                                    JetPt=test_pt,
                                    JetA=test_A)

    assert ((np.abs(pt_copy - test_pt) < 1e-6).all())
Exemple #7
0
def test_export1d():
    import uproot
    import os
    from coffea.hist import export1d

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    h_regular_bins = hist.Hist("regular_joe", hist.Bin("x", "x", 20, 0, 200))
    h_regular_bins.fill(x=test_pt)

    hout = export1d(h_regular_bins)

    filename = 'test_export1d.root'

    with uproot.create(filename) as fout:
        fout['regular_joe'] = hout
        fout.close()

    with uproot.open(filename) as fin:
        hin = fin['regular_joe']

    assert (np.all(hin.edges == hout.edges))
    assert (np.all(hin.values == hout.values))

    del hin
    del fin

    if os.path.exists(filename):
        os.remove(filename)
Exemple #8
0
def test_jet_correction_uncertainty_sources():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = []
    levels = []
    for name in dir(evaluator):
        if 'Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi' in name:
            junc_names.append(name)
            levels.append(name.split('_')[-1])
        #test for underscore in dataera
        if 'Fall17_17Nov2017_V6_MC_UncertaintySources_AK4PFchs_AbsoluteFlavMap' in name:
            junc_names.append(name)
            levels.append(name.split('_')[-1])
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs):
        assert (level in levels)
        assert (corrs.shape[0] == test_eta.shape[0])
        tic = time.time()
        assert (ak.all(corrs == ak.flatten(juncs_jag[i][1])))
        toc = time.time()
Exemple #9
0
def test_weights_partial():
    from coffea.analysis_tools import Weights

    counts, _, _ = dummy_jagged_eta_pt()
    w1 = np.random.normal(loc=1.0, scale=0.01, size=counts.size)
    w2 = np.random.normal(loc=1.3, scale=0.05, size=counts.size)

    weights = Weights(counts.size, storeIndividual=True)
    weights.add("w1", w1)
    weights.add("w2", w2)

    test_exclude_none = weights.weight()
    assert np.all(np.abs(test_exclude_none - w1 * w2) < 1e-6)

    test_exclude1 = weights.partial_weight(exclude=["w1"])
    assert np.all(np.abs(test_exclude1 - w2) < 1e-6)

    test_include1 = weights.partial_weight(include=["w1"])
    assert np.all(np.abs(test_include1 - w1) < 1e-6)

    test_exclude2 = weights.partial_weight(exclude=["w2"])
    assert np.all(np.abs(test_exclude2 - w1) < 1e-6)

    test_include2 = weights.partial_weight(include=["w2"])
    assert np.all(np.abs(test_include2 - w2) < 1e-6)

    test_include_both = weights.partial_weight(include=["w1", "w2"])
    assert np.all(np.abs(test_include_both - w1 * w2) < 1e-6)

    # Check that exception is thrown if arguments are incompatible
    error_raised = False
    try:
        weights.partial_weight(exclude=["w1"], include=["w2"])
    except ValueError:
        error_raised = True
    assert error_raised

    error_raised = False
    try:
        weights.partial_weight()
    except ValueError:
        error_raised = True
    assert error_raised

    # Check that exception is thrown if individual weights
    # are not saved from the start
    weights = Weights(counts.size, storeIndividual=False)
    weights.add("w1", w1)
    weights.add("w2", w2)

    error_raised = False
    try:
        weights.partial_weight(exclude=["test"], include=["test"])
    except ValueError:
        error_raised = True
    assert error_raised
Exemple #10
0
def test_jet_correction_uncertainty():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = ["Summer16_23Sep2016V3_MC_Uncertainty_AK4PFPuppi"]
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs):
        assert corrs.shape[0] == test_eta.shape[0]
        assert ak.all(corrs == ak.flatten(juncs_jag[i][1]))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag.tolist())
    print("eta:", test_eta_jag.tolist(), "\n")

    juncs_jag_ref = ak.unflatten(
        np.array([
            [1.053504214, 0.946495786],
            [1.033343349, 0.966656651],
            [1.065159157, 0.934840843],
            [1.033140127, 0.966859873],
            [1.016858652, 0.983141348],
            [1.130199999, 0.869800001],
            [1.039968468, 0.960031532],
            [1.033100002, 0.966899998],
        ]),
        counts,
    )
    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs_jag):
        print("Index:", i)
        print("Correction level:", level)
        print("Reference Uncertainties (jagged):", juncs_jag_ref)
        print("Uncertainties (jagged):", corrs)
        assert ak.all(
            np.abs(ak.flatten(juncs_jag_ref) - ak.flatten(corrs)) < 1e-6)
Exemple #11
0
def test_jet_resolution():
    from coffea.jetmet_tools import JetResolution

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    test_Rho = np.full_like(test_eta, 10.0)

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)
    test_Rho_jag = ak.unflatten(test_Rho, counts)

    jer_names = ["Spring16_25nsV10_MC_PtResolution_AK4PFPuppi"]
    reso = JetResolution(**{name: evaluator[name] for name in jer_names})

    print(reso)

    resos = reso.getResolution(JetEta=test_eta, Rho=test_Rho, JetPt=test_pt)
    resos_jag = reso.getResolution(JetEta=test_eta_jag,
                                   Rho=test_Rho_jag,
                                   JetPt=test_pt_jag)
    assert ak.all(np.abs(resos - ak.flatten(resos_jag)) < 1e-6)

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    test_Rho_jag = test_Rho_jag[0:3]
    test_Rho_jag = ak.concatenate(
        [test_Rho_jag[:-1], [ak.concatenate([test_Rho_jag[-1, :-1], 100.0])]])
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag)
    print("rho:", test_Rho_jag, "\n")

    resos_jag_ref = ak.unflatten(
        np.array([
            0.21974642,
            0.32421591,
            0.33702479,
            0.27420327,
            0.13940689,
            0.48134521,
            0.26564994,
            1.0,
        ]),
        counts,
    )
    resos_jag = reso.getResolution(JetEta=test_eta_jag,
                                   Rho=test_Rho_jag,
                                   JetPt=test_pt_jag)
    print("Reference Resolution (jagged):", resos_jag_ref)
    print("Resolution (jagged):", resos_jag)
    # NB: 5e-4 tolerance was agreed upon by lgray and aperloff, if the differences get bigger over time
    #     we need to agree upon how these numbers are evaluated (double/float conversion is kinda random)
    assert ak.all(
        np.abs(ak.flatten(resos_jag_ref) - ak.flatten(resos_jag)) < 5e-4)
Exemple #12
0
def test_jet_resolution_sf():
    from coffea.jetmet_tools import JetResolutionScaleFactor

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    
    jersf_names = ['Spring16_25nsV10_MC_SF_AK4PFPuppi']
    resosf = JetResolutionScaleFactor(**{name: evaluator[name] for name in jersf_names})
    
    print(resosf)
    
    resosfs = resosf.getScaleFactor(JetEta=test_eta)
Exemple #13
0
def test_factorized_jet_corrector():
    from coffea.jetmet_tools import FactorizedJetCorrector

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_Rho = np.full_like(test_eta, 100.)
    test_A = np.full_like(test_eta, 5.)

    jec_names = [
        'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi'
    ]
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names})

    print(corrector)

    pt_copy = np.copy(test_pt)

    corrs = corrector.getCorrection(JetEta=test_eta,
                                    Rho=test_Rho,
                                    JetPt=test_pt,
                                    JetA=test_A)

    assert ((np.abs(pt_copy - test_pt) < 1e-6).all())

    # Test for bug #320
    def make_starts_stops(counts, data, padding):
        cumcounts = np.cumsum(counts)
        first_nonempty_event = cumcounts[np.nonzero(counts > 1)[0][0]]
        data_pad = np.empty(data.size + padding)
        data_pad[:first_nonempty_event] = data[:first_nonempty_event]
        data_pad[first_nonempty_event + padding:] = data[first_nonempty_event:]
        starts = np.r_[0, cumcounts[:-1]]
        starts[starts >= first_nonempty_event] += padding
        return awkward.JaggedArray(starts, starts + counts, data_pad)

    test_pt_jag = make_starts_stops(counts, test_pt, 5)
    test_eta_jag = make_starts_stops(counts, test_eta, 3)

    test_Rho_jag = awkward.JaggedArray.fromcounts(counts, test_Rho)
    test_A_jag = awkward.JaggedArray.fromcounts(counts, test_A)

    corrs_jag = corrector.getCorrection(JetEta=test_eta_jag,
                                        Rho=test_Rho_jag,
                                        JetPt=test_pt_jag,
                                        JetA=test_A_jag)

    assert ((np.abs(pt_copy - test_pt_jag.flatten()) < 1e-6).all())
    assert ((np.abs(corrs - corrs_jag.flatten()) < 1e-6).all())
def test_histo_json_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets(["testJson * tests/samples/EIDISO_WH_out.histo.json"])
    extractor.finalize()
    
    evaluator = extractor.make_evaluator()
    
    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    
    sf_out = evaluator['testJsonEIDISO_WH/eta_pt_ratio_value'](test_eta, test_pt)
    sf_err_out = evaluator['testJsonEIDISO_WH/eta_pt_ratio_error'](test_eta, test_pt)
    print(sf_out)
    print(sf_err_out)
Exemple #15
0
def test_jet_resolution():
    from coffea.jetmet_tools import JetResolution

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_Rho = np.full_like(test_eta, 100.)

    jer_names = ['Spring16_25nsV10_MC_PtResolution_AK4PFPuppi']
    reso = JetResolution(**{name: evaluator[name] for name in jer_names})

    print(reso)

    resos = reso.getResolution(JetEta=test_eta, Rho=test_Rho, JetPt=test_pt)
Exemple #16
0
def test_btag_csv_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets(["testBTag * tests/samples/testBTagSF.btag.csv"])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    # discriminant used for reshaping, zero otherwise
    test_discr = np.zeros_like(test_eta)

    sf_out = evaluator['testBTagCSVv2_1_comb_up_0'](test_eta, test_pt,
                                                    test_discr)
    print(sf_out)
Exemple #17
0
def test_jet_correction_uncertainty():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    
    junc_names = ['Summer16_23Sep2016V3_MC_Uncertainty_AK4PFPuppi']
    junc = JetCorrectionUncertainty(**{name: evaluator[name] for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    for level, corrs in juncs:
        assert(corrs.shape[0] == test_eta.shape[0])
Exemple #18
0
def test_root_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"
    ])

    extractor.finalize(reduce_list=['testSF2d'])

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    # test flat eval
    test_out = evaluator["testSF2d"](test_eta, test_pt)

    # print it
    print(evaluator["testSF2d"])

    # test structured eval
    test_eta_jagged = ak.unflatten(test_eta, counts)
    test_pt_jagged = ak.unflatten(test_pt, counts)
    test_out_jagged = evaluator["testSF2d"](test_eta_jagged, test_pt_jagged)

    assert ak.all(ak.num(test_out_jagged) == counts)
    assert ak.all(ak.flatten(test_out_jagged) == test_out)

    # From make_expected_lookup.py
    expected_output = np.array([
        0.90780139, 0.82748538, 0.86332178, 0.86332178, 0.97981155, 0.79701495,
        0.88245934, 0.82857144, 0.91884059, 0.97466666, 0.94072163, 1.00775194,
        0.82748538, 1.00775194, 0.97203946, 0.98199672, 0.80655736, 0.90893763,
        0.88245934, 0.79701495, 0.82748538, 0.82857144, 0.91884059, 0.90893763,
        0.97520661, 0.97520661, 0.82748538, 0.91884059, 0.97203946, 0.88245934,
        0.79701495, 0.9458763, 1.00775194, 0.80655736, 1.00775194, 1.00775194,
        0.98976982, 0.98976982, 0.86332178, 0.94072163, 0.80655736, 0.98976982,
        0.96638656, 0.9458763, 0.90893763, 0.9529984, 0.9458763, 0.9529984,
        0.80655736, 0.80655736, 0.80655736, 0.98976982, 0.97466666, 0.98199672,
        0.86332178, 1.03286386, 0.94072163, 1.03398061, 0.82857144, 0.80655736,
        1.00775194, 0.80655736
    ])

    print(test_out)

    diff = np.abs(test_out - expected_output)
    print("Max diff: %.16f" % diff.max())
    print("Median diff: %.16f" % np.median(diff))
    print("Diff over threshold rate: %.1f %%" %
          (100 * (diff >= 1.e-8).sum() / diff.size))
    assert (diff < 1.e-8).all()
def test_evaluator_exceptions():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets(["testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"])

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    test_eta_jagged = awkward.JaggedArray.fromcounts(counts, test_eta)
    test_pt_jagged = awkward.JaggedArray.fromcounts(counts, test_pt)

    extractor.finalize()
    evaluator = extractor.make_evaluator()
    
    try:
        test_out = evaluator["testSF2d"](test_pt_jagged, test_eta)
    except Exception as e:
        assert(e.args[0] == 'do not mix JaggedArrays and numpy arrays when calling a derived class of lookup_base')
Exemple #20
0
def test_jec_txt_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testJEC * tests/samples/Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.jec.txt"
    ])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    jec_out = evaluator[
        'testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi'](test_eta,
                                                                test_pt)

    print(jec_out)
Exemple #21
0
def test_weights():
    from coffea.analysis_tools import Weights

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    scale_central = np.random.normal(loc=1.0, scale=0.01, size=counts.size)
    scale_up = scale_central * 1.10
    scale_down = scale_central * 0.95
    scale_up_shift = 0.10 * scale_central
    scale_down_shift = 0.05 * scale_central

    weight = Weights(counts.size)
    weight.add("test", scale_central, weightUp=scale_up, weightDown=scale_down)
    weight.add(
        "testShift",
        scale_central,
        weightUp=scale_up_shift,
        weightDown=scale_down_shift,
        shift=True,
    )

    var_names = weight.variations
    expected_names = ["testShiftUp", "testShiftDown", "testUp", "testDown"]
    for name in expected_names:
        assert name in var_names

    test_central = weight.weight()
    exp_weight = scale_central * scale_central

    assert np.all(np.abs(test_central - (exp_weight)) < 1e-6)

    test_up = weight.weight("testUp")
    exp_up = scale_central * scale_central * 1.10

    assert np.all(np.abs(test_up - (exp_up)) < 1e-6)

    test_down = weight.weight("testDown")
    exp_down = scale_central * scale_central * 0.95

    assert np.all(np.abs(test_down - (exp_down)) < 1e-6)

    test_shift_up = weight.weight("testUp")

    assert np.all(np.abs(test_shift_up - (exp_up)) < 1e-6)

    test_shift_down = weight.weight("testDown")

    assert np.all(np.abs(test_shift_down - (exp_down)) < 1e-6)
Exemple #22
0
def test_jet_resolution_sf():
    from coffea.jetmet_tools import JetResolutionScaleFactor

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    jersf_names = ["Spring16_25nsV10_MC_SF_AK4PFPuppi"]
    resosf = JetResolutionScaleFactor(
        **{name: evaluator[name]
           for name in jersf_names})

    print(resosf)

    # 0-jet compatibility
    assert resosf.getScaleFactor(JetEta=test_eta[:0]).shape == (0, 3)

    resosfs = resosf.getScaleFactor(JetEta=test_eta)
    resosfs_jag = resosf.getScaleFactor(JetEta=test_eta_jag)
    assert ak.all(resosfs == ak.flatten(resosfs_jag))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag, "\n")

    resosfs_jag_ref = ak.unflatten(
        np.array([
            [1.857, 1.928, 1.786],
            [1.084, 1.095, 1.073],
            [1.364, 1.403, 1.325],
            [1.177, 1.218, 1.136],
            [1.138, 1.151, 1.125],
            [1.364, 1.403, 1.325],
            [1.177, 1.218, 1.136],
            [1.082, 1.117, 1.047],
        ]),
        counts,
    )
    resosfs_jag = resosf.getScaleFactor(JetEta=test_eta_jag)
    print("Reference Resolution SF (jagged):", resosfs_jag_ref)
    print("Resolution SF (jagged):", resosfs_jag)
    assert ak.all(
        np.abs(ak.flatten(resosfs_jag_ref) - ak.flatten(resosfs_jag)) < 1e-6)
Exemple #23
0
def test_jet_resolution_sf_2d():
    from coffea.jetmet_tools import JetResolutionScaleFactor

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    resosf = JetResolutionScaleFactor(
        **{name: evaluator[name]
           for name in ["Autumn18_V7_MC_SF_AK4PFchs"]})

    print(resosf)

    # 0-jet compatibility
    assert resosf.getScaleFactor(JetPt=test_pt[:0],
                                 JetEta=test_eta[:0]).shape == (0, 3)

    resosfs = resosf.getScaleFactor(JetPt=test_pt, JetEta=test_eta)
    resosfs_jag = resosf.getScaleFactor(JetPt=test_pt_jag, JetEta=test_eta_jag)
    assert ak.all(resosfs == ak.flatten(resosfs_jag))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag, "\n")

    resosfs_jag_ref = ak.unflatten(
        np.array([
            [1.11904, 1.31904, 1.0],
            [1.1432, 1.2093, 1.0771],
            [1.16633, 1.36633, 1.0],
            [1.17642, 1.37642, 1.0],
            [1.1808, 1.1977, 1.1640],
            [1.15965, 1.35965, 1.0],
            [1.17661, 1.37661, 1.0],
            [1.1175, 1.1571, 1.0778],
        ]),
        counts,
    )
    resosfs_jag = resosf.getScaleFactor(JetPt=test_pt_jag, JetEta=test_eta_jag)
    print("Reference Resolution SF (jagged):", resosfs_jag_ref)
    print("Resolution SF (jagged):", resosfs_jag)
    assert ak.all(
        np.abs(ak.flatten(resosfs_jag_ref) - ak.flatten(resosfs_jag)) < 1e-6)
Exemple #24
0
def test_evaluator_exceptions():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"
    ])

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    # test_eta_jagged = ak.unflatten(test_eta, counts)
    test_pt_jagged = ak.unflatten(test_pt, counts)

    extractor.finalize()
    evaluator = extractor.make_evaluator()

    try:
        evaluator["testSF2d"](test_pt_jagged, test_eta)
    except Exception as e:
        assert isinstance(e, TypeError)
def test_jec_txt_effareas():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets(['* * tests/samples/photon_id.ea.txt'])
    extractor.finalize()

    evaluator = extractor.make_evaluator()
    
    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    ch_out = evaluator['photon_id_EA_CHad'](test_eta)
    print(evaluator['photon_id_EA_CHad'])

    nh_out = evaluator['photon_id_EA_NHad'](test_eta)
    print(evaluator['photon_id_EA_NHad'])

    ph_out = evaluator['photon_id_EA_Pho'](test_eta)
    print(evaluator['photon_id_EA_Pho'])
Exemple #26
0
def test_weights():
    from coffea.processor import Weights

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    scale_central = np.random.normal(loc=1.0, scale=0.01, size=counts.size)
    scale_up = scale_central * 1.10
    scale_down = scale_central * 0.95
    scale_up_shift = 0.10 * scale_central
    scale_down_shift = 0.05 * scale_central

    weight = Weights(counts.size)
    weight.add('test', scale_central, weightUp=scale_up, weightDown=scale_down)
    weight.add('testShift',
               scale_central,
               weightUp=scale_up_shift,
               weightDown=scale_down_shift,
               shift=True)

    var_names = weight.variations
    expected_names = ['testShiftUp', 'testShiftDown', 'testUp', 'testDown']
    for name in expected_names:
        assert (name in var_names)

    test_central = weight.weight()
    exp_weight = scale_central * scale_central

    assert (np.all(np.abs(test_central - (exp_weight)) < 1e-6))

    test_up = weight.weight('testUp')
    exp_up = scale_central * scale_central * 1.10

    assert (np.all(np.abs(test_up - (exp_up)) < 1e-6))

    test_down = weight.weight('testDown')
    exp_down = scale_central * scale_central * 0.95

    assert (np.all(np.abs(test_down - (exp_down)) < 1e-6))

    test_shift_up = weight.weight('testUp')

    assert (np.all(np.abs(test_shift_up - (exp_up)) < 1e-6))

    test_shift_down = weight.weight('testDown')

    assert (np.all(np.abs(test_shift_down - (exp_down)) < 1e-6))
Exemple #27
0
def test_packed_selection():
    from coffea.processor import PackedSelection

    sel = PackedSelection()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    all_true = np.full(shape=counts.shape, fill_value=True, dtype=np.bool)
    all_false = np.full(shape=counts.shape, fill_value=False, dtype=np.bool)
    ones = np.ones(shape=counts.shape, dtype=np.uint64)
    wrong_shape = ones = np.ones(shape=(counts.shape[0] - 5, ), dtype=np.bool)

    sel.add('all_true', all_true)
    sel.add('all_false', all_false)

    assert (np.all(sel.require(all_true=True, all_false=False) == all_true))
    assert (np.all(sel.all('all_true', 'all_false') == all_false))

    try:
        sel.require(all_true=1, all_false=0)
    except ValueError:
        pass

    try:
        sel.add('wrong_shape', wrong_shape)
    except ValueError:
        pass

    try:
        sel.add('ones', ones)
    except ValueError:
        pass

    try:
        overpack = PackedSelection()
        for i in range(65):
            overpack.add('sel_%d', all_true)
    except RuntimeError:
        pass
def test_packed_selection():
    from coffea.analysis_tools import PackedSelection

    sel = PackedSelection()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    all_true = np.full(shape=counts.shape, fill_value=True, dtype=np.bool)
    all_false = np.full(shape=counts.shape, fill_value=False, dtype=np.bool)
    ones = np.ones(shape=counts.shape, dtype=np.uint64)
    wrong_shape = ones = np.ones(shape=(counts.shape[0] - 5, ), dtype=np.bool)

    sel.add("all_true", all_true)
    sel.add("all_false", all_false)

    assert np.all(sel.require(all_true=True, all_false=False) == all_true)
    assert np.all(sel.all("all_true", "all_false") == all_false)

    try:
        sel.require(all_true=1, all_false=0)
    except ValueError:
        pass

    try:
        sel.add("wrong_shape", wrong_shape)
    except ValueError:
        pass

    try:
        sel.add("ones", ones)
    except ValueError:
        pass

    try:
        overpack = PackedSelection()
        for i in range(65):
            overpack.add("sel_%d", all_true)
    except RuntimeError:
        pass
Exemple #29
0
def test_jet_correction_uncertainty():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = ['Summer16_23Sep2016V3_MC_Uncertainty_AK4PFPuppi']
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs):
        assert (corrs.shape[0] == test_eta.shape[0])
        assert (ak.all(corrs == ak.flatten(juncs_jag[i][1])))
Exemple #30
0
def test_jec_txt_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testJEC * tests/samples/Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.jec.txt",
        "* * tests/samples/Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs.jec.txt.gz",
        "* * tests/samples/Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi.junc.txt",
        "* * tests/samples/Autumn18_V8_MC_UncertaintySources_AK4PFchs.junc.txt",
        "* * tests/samples/Spring16_25nsV10_MC_SF_AK4PFPuppi.jersf.txt",
        "* * tests/samples/Autumn18_V7b_MC_SF_AK8PFchs.jersf.txt.gz",
        "* * tests/samples/Fall17_17Nov2017_V32_MC_L2Relative_AK4Calo.jec.txt.gz",
        "* * tests/samples/Fall17_17Nov2017_V32_MC_L1JPTOffset_AK4JPT.jec.txt.gz",
        "* * tests/samples/Fall17_17Nov2017B_V32_DATA_L2Relative_AK4Calo.txt.gz",
        "* * tests/samples/Autumn18_V7b_DATA_SF_AK4PF.jersf.txt",
        "* * tests/samples/Autumn18_RunC_V19_DATA_L2Relative_AK8PFchs.jec.txt.gz",
        "* * tests/samples/Autumn18_RunA_V19_DATA_L2Relative_AK4Calo.jec.txt",
    ])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    # test structured eval
    test_eta_jagged = ak.unflatten(test_eta, counts)
    test_pt_jagged = ak.unflatten(test_pt, counts)

    jec_out = evaluator[
        "testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi"](test_eta,
                                                                test_pt)
    jec_out_jagged = evaluator[
        "testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi"](
            test_eta_jagged, test_pt_jagged)

    print(evaluator["testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi"])

    jec_out = evaluator["Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs"](
        test_eta, test_pt)
    jec_out_jagged = evaluator[
        "Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs"](test_eta_jagged,
                                                               test_pt_jagged)
    print(jec_out)
    print(jec_out_jagged)
    print(evaluator["Summer16_07Aug2017_V11_L1fix_MC_L2Relative_AK4PFchs"])

    jersf_out = evaluator["Spring16_25nsV10_MC_SF_AK4PFPuppi"](test_eta,
                                                               test_pt)
    jersf_out_jagged = evaluator["Spring16_25nsV10_MC_SF_AK4PFPuppi"](
        test_eta_jagged, test_pt_jagged)
    print(jersf_out)
    print(jersf_out_jagged)
    print(evaluator["Spring16_25nsV10_MC_SF_AK4PFPuppi"])

    junc_out = evaluator["Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi"](
        test_eta, test_pt)
    junc_out_jagged = evaluator[
        "Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi"](test_eta_jagged,
                                                          test_pt_jagged)
    print(junc_out)
    print(junc_out_jagged)
    print(evaluator["Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi"])

    assert ("Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale"
            in evaluator.keys())
    junc_out = evaluator[
        "Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale"](test_eta,
                                                                    test_pt)
    junc_out_jagged = evaluator[
        "Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale"](
            test_eta_jagged, test_pt_jagged)
    print(junc_out)
    print(junc_out_jagged)
    print(
        evaluator["Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale"])