Ejemplo n.º 1
0
def test_jet_resolution():
    from coffea.jetmet_tools import JetResolution

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    test_Rho = np.full_like(test_eta, 10.0)

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)
    test_Rho_jag = ak.unflatten(test_Rho, counts)

    jer_names = ["Spring16_25nsV10_MC_PtResolution_AK4PFPuppi"]
    reso = JetResolution(**{name: evaluator[name] for name in jer_names})

    print(reso)

    resos = reso.getResolution(JetEta=test_eta, Rho=test_Rho, JetPt=test_pt)
    resos_jag = reso.getResolution(JetEta=test_eta_jag,
                                   Rho=test_Rho_jag,
                                   JetPt=test_pt_jag)
    assert ak.all(np.abs(resos - ak.flatten(resos_jag)) < 1e-6)

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    test_Rho_jag = test_Rho_jag[0:3]
    test_Rho_jag = ak.concatenate(
        [test_Rho_jag[:-1], [ak.concatenate([test_Rho_jag[-1, :-1], 100.0])]])
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag)
    print("rho:", test_Rho_jag, "\n")

    resos_jag_ref = ak.unflatten(
        np.array([
            0.21974642,
            0.32421591,
            0.33702479,
            0.27420327,
            0.13940689,
            0.48134521,
            0.26564994,
            1.0,
        ]),
        counts,
    )
    resos_jag = reso.getResolution(JetEta=test_eta_jag,
                                   Rho=test_Rho_jag,
                                   JetPt=test_pt_jag)
    print("Reference Resolution (jagged):", resos_jag_ref)
    print("Resolution (jagged):", resos_jag)
    # NB: 5e-4 tolerance was agreed upon by lgray and aperloff, if the differences get bigger over time
    #     we need to agree upon how these numbers are evaluated (double/float conversion is kinda random)
    assert ak.all(
        np.abs(ak.flatten(resos_jag_ref) - ak.flatten(resos_jag)) < 5e-4)
Ejemplo n.º 2
0
def test_normal_interval():
    from coffea.hist.plot import normal_interval

    # Reference weighted efficiency and error from ROOTs TEfficiency

    denom = np.array([
        89.01457591590004,
        2177.066076428943,
        6122.5256890981855,
        0.0,
        100.27757990710668,
    ])
    num = np.array([
        75.14287743709515,
        2177.066076428943,
        5193.454723043864,
        0.0,
        84.97723540536361,
    ])
    denom_sumw2 = np.array([
        94.37919737476827, 10000.0, 6463.46795877633, 0.0, 105.90898005417333
    ])
    num_sumw2 = np.array(
        [67.2202147680005, 10000.0, 4647.983931785646, 0.0, 76.01275761253757])
    ref_hi = np.array([
        0.0514643476600107, 0.0, 0.0061403263960343, np.nan, 0.0480731185500146
    ])
    ref_lo = np.array([
        0.0514643476600107, 0.0, 0.0061403263960343, np.nan, 0.0480731185500146
    ])

    interval = normal_interval(num, denom, num_sumw2, denom_sumw2)
    threshold = 1e-6

    lo, hi = interval

    assert len(ref_hi) == len(hi)
    assert len(ref_lo) == len(lo)

    for i in range(len(ref_hi)):
        if np.isnan(ref_hi[i]):
            assert np.isnan(ref_hi[i])
        elif ref_hi[i] == 0.0:
            assert hi[i] == 0.0
        else:
            assert np.abs(hi[i] / ref_hi[i] - 1) < threshold

        if np.isnan(ref_lo[i]):
            assert np.isnan(ref_lo[i])
        elif ref_lo[i] == 0.0:
            assert lo[i] == 0.0
        else:
            assert np.abs(lo[i] / ref_lo[i] - 1) < threshold
Ejemplo n.º 3
0
def test_factorized_jet_corrector():
    from coffea.jetmet_tools import FactorizedJetCorrector

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_Rho = np.full_like(test_eta, 100.)
    test_A = np.full_like(test_eta, 5.)

    jec_names = [
        'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi'
    ]
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names})

    print(corrector)

    pt_copy = np.copy(test_pt)

    corrs = corrector.getCorrection(JetEta=test_eta,
                                    Rho=test_Rho,
                                    JetPt=test_pt,
                                    JetA=test_A)

    assert ((np.abs(pt_copy - test_pt) < 1e-6).all())

    # Test for bug #320
    def make_starts_stops(counts, data, padding):
        cumcounts = np.cumsum(counts)
        first_nonempty_event = cumcounts[np.nonzero(counts > 1)[0][0]]
        data_pad = np.empty(data.size + padding)
        data_pad[:first_nonempty_event] = data[:first_nonempty_event]
        data_pad[first_nonempty_event + padding:] = data[first_nonempty_event:]
        starts = np.r_[0, cumcounts[:-1]]
        starts[starts >= first_nonempty_event] += padding
        return awkward.JaggedArray(starts, starts + counts, data_pad)

    test_pt_jag = make_starts_stops(counts, test_pt, 5)
    test_eta_jag = make_starts_stops(counts, test_eta, 3)

    test_Rho_jag = awkward.JaggedArray.fromcounts(counts, test_Rho)
    test_A_jag = awkward.JaggedArray.fromcounts(counts, test_A)

    corrs_jag = corrector.getCorrection(JetEta=test_eta_jag,
                                        Rho=test_Rho_jag,
                                        JetPt=test_pt_jag,
                                        JetA=test_A_jag)

    assert ((np.abs(pt_copy - test_pt_jag.flatten()) < 1e-6).all())
    assert ((np.abs(corrs - corrs_jag.flatten()) < 1e-6).all())
Ejemplo n.º 4
0
def test_factorized_jet_corrector():
    from coffea.jetmet_tools import FactorizedJetCorrector

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_Rho = np.full_like(test_eta, 100.)
    test_A = np.full_like(test_eta, 5.)

    jec_names = [
        'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi',
        'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi'
    ]
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names})

    print(corrector)

    pt_copy = np.copy(test_pt)

    corrs = corrector.getCorrection(JetEta=test_eta,
                                    Rho=test_Rho,
                                    JetPt=test_pt,
                                    JetA=test_A)

    assert ((np.abs(pt_copy - test_pt) < 1e-6).all())
Ejemplo n.º 5
0
def test_root_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"
    ])

    extractor.finalize(reduce_list=["testSF2d"])

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    # test flat eval
    test_out = evaluator["testSF2d"](test_eta, test_pt)

    # print it
    print(evaluator["testSF2d"])

    # test structured eval
    test_eta_jagged = ak.unflatten(test_eta, counts)
    test_pt_jagged = ak.unflatten(test_pt, counts)
    test_out_jagged = evaluator["testSF2d"](test_eta_jagged, test_pt_jagged)

    assert ak.all(ak.num(test_out_jagged) == counts)
    assert ak.all(ak.flatten(test_out_jagged) == test_out)

    print(test_out)

    diff = np.abs(test_out - _testSF2d_expected_output)
    print("Max diff: %.16f" % diff.max())
    print("Median diff: %.16f" % np.median(diff))
    print("Diff over threshold rate: %.1f %%" %
          (100 * (diff >= 1.0e-8).sum() / diff.size))
    assert (diff < 1.0e-8).all()
Ejemplo n.º 6
0
def test_weights():
    from coffea.processor import Weights

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    scale_central = np.random.normal(loc=1.0, scale=0.01, size=counts.size)
    scale_up = scale_central * 1.10
    scale_down = scale_central * 0.95
    scale_up_shift = 0.10 * scale_central
    scale_down_shift = 0.05 * scale_central

    weight = Weights(counts.size)
    weight.add('test', scale_central, weightUp=scale_up, weightDown=scale_down)
    weight.add('testShift',
               scale_central,
               weightUp=scale_up_shift,
               weightDown=scale_down_shift,
               shift=True)

    var_names = weight.variations
    expected_names = ['testShiftUp', 'testShiftDown', 'testUp', 'testDown']
    for name in expected_names:
        assert (name in var_names)

    test_central = weight.weight()
    exp_weight = scale_central * scale_central

    assert (np.all(np.abs(test_central - (exp_weight)) < 1e-6))

    test_up = weight.weight('testUp')
    exp_up = scale_central * scale_central * 1.10

    assert (np.all(np.abs(test_up - (exp_up)) < 1e-6))

    test_down = weight.weight('testDown')
    exp_down = scale_central * scale_central * 0.95

    assert (np.all(np.abs(test_down - (exp_down)) < 1e-6))

    test_shift_up = weight.weight('testUp')

    assert (np.all(np.abs(test_shift_up - (exp_up)) < 1e-6))

    test_shift_down = weight.weight('testDown')

    assert (np.all(np.abs(test_shift_down - (exp_down)) < 1e-6))
Ejemplo n.º 7
0
def test_jet_correction_uncertainty():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = ["Summer16_23Sep2016V3_MC_Uncertainty_AK4PFPuppi"]
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs):
        assert corrs.shape[0] == test_eta.shape[0]
        assert ak.all(corrs == ak.flatten(juncs_jag[i][1]))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag.tolist())
    print("eta:", test_eta_jag.tolist(), "\n")

    juncs_jag_ref = ak.unflatten(
        np.array([
            [1.053504214, 0.946495786],
            [1.033343349, 0.966656651],
            [1.065159157, 0.934840843],
            [1.033140127, 0.966859873],
            [1.016858652, 0.983141348],
            [1.130199999, 0.869800001],
            [1.039968468, 0.960031532],
            [1.033100002, 0.966899998],
        ]),
        counts,
    )
    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs_jag):
        print("Index:", i)
        print("Correction level:", level)
        print("Reference Uncertainties (jagged):", juncs_jag_ref)
        print("Uncertainties (jagged):", corrs)
        assert ak.all(
            np.abs(ak.flatten(juncs_jag_ref) - ak.flatten(corrs)) < 1e-6)
Ejemplo n.º 8
0
 def __init__(self):
     self.p4 = thep4
     self.px = px
     self.py = py
     self.pz = pz
     self.en = energy
     self.pt = np.hypot(px, py)
     self.phi = np.arctan2(py, px)
     self.eta = np.arctanh(pz / np.sqrt(px * px + py * py + pz * pz))
     self.mass = np.sqrt(
         np.abs(energy * energy - (px * px + py * py + pz * pz)))
     self.blah = energy * px
     self.count = counts
Ejemplo n.º 9
0
def test_root_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"
    ])

    extractor.finalize(reduce_list=['testSF2d'])

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    # test flat eval
    test_out = evaluator["testSF2d"](test_eta, test_pt)

    # print it
    print(evaluator["testSF2d"])

    # test structured eval
    test_eta_jagged = ak.unflatten(test_eta, counts)
    test_pt_jagged = ak.unflatten(test_pt, counts)
    test_out_jagged = evaluator["testSF2d"](test_eta_jagged, test_pt_jagged)

    assert ak.all(ak.num(test_out_jagged) == counts)
    assert ak.all(ak.flatten(test_out_jagged) == test_out)

    # From make_expected_lookup.py
    expected_output = np.array([
        0.90780139, 0.82748538, 0.86332178, 0.86332178, 0.97981155, 0.79701495,
        0.88245934, 0.82857144, 0.91884059, 0.97466666, 0.94072163, 1.00775194,
        0.82748538, 1.00775194, 0.97203946, 0.98199672, 0.80655736, 0.90893763,
        0.88245934, 0.79701495, 0.82748538, 0.82857144, 0.91884059, 0.90893763,
        0.97520661, 0.97520661, 0.82748538, 0.91884059, 0.97203946, 0.88245934,
        0.79701495, 0.9458763, 1.00775194, 0.80655736, 1.00775194, 1.00775194,
        0.98976982, 0.98976982, 0.86332178, 0.94072163, 0.80655736, 0.98976982,
        0.96638656, 0.9458763, 0.90893763, 0.9529984, 0.9458763, 0.9529984,
        0.80655736, 0.80655736, 0.80655736, 0.98976982, 0.97466666, 0.98199672,
        0.86332178, 1.03286386, 0.94072163, 1.03398061, 0.82857144, 0.80655736,
        1.00775194, 0.80655736
    ])

    print(test_out)

    diff = np.abs(test_out - expected_output)
    print("Max diff: %.16f" % diff.max())
    print("Median diff: %.16f" % np.median(diff))
    print("Diff over threshold rate: %.1f %%" %
          (100 * (diff >= 1.e-8).sum() / diff.size))
    assert (diff < 1.e-8).all()
Ejemplo n.º 10
0
def test_weights_partial():
    from coffea.processor import Weights
    counts, _, _ = dummy_jagged_eta_pt()
    w1 = np.random.normal(loc=1.0, scale=0.01, size=counts.size)
    w2 = np.random.normal(loc=1.3, scale=0.05, size=counts.size)

    weights = Weights(counts.size, storeIndividual=True)
    weights.add('w1', w1)
    weights.add('w2', w2)

    test_exclude_none = weights.weight()
    assert (np.all(np.abs(test_exclude_none - w1 * w2) < 1e-6))

    test_exclude1 = weights.partial_weight(exclude=['w1'])
    assert (np.all(np.abs(test_exclude1 - w2) < 1e-6))

    test_include1 = weights.partial_weight(include=['w1'])
    assert (np.all(np.abs(test_include1 - w1) < 1e-6))

    test_exclude2 = weights.partial_weight(exclude=['w2'])
    assert (np.all(np.abs(test_exclude2 - w1) < 1e-6))

    test_include2 = weights.partial_weight(include=['w2'])
    assert (np.all(np.abs(test_include2 - w2) < 1e-6))

    test_include_both = weights.partial_weight(include=['w1', 'w2'])
    assert (np.all(np.abs(test_include_both - w1 * w2) < 1e-6))

    # Check that exception is thrown if arguments are incompatible
    error_raised = False
    try:
        weights.partial_weight(exclude=['w1'], include=['w2'])
    except ValueError:
        error_raised = True
    assert (error_raised)

    error_raised = False
    try:
        weights.partial_weight()
    except ValueError:
        error_raised = True
    assert (error_raised)

    # Check that exception is thrown if individual weights
    # are not saved from the start
    weights = Weights(counts.size, storeIndividual=False)
    weights.add('w1', w1)
    weights.add('w2', w2)

    error_raised = False
    try:
        weights.partial_weight(exclude=['test'], include=['test'])
    except ValueError:
        error_raised = True
    assert (error_raised)
Ejemplo n.º 11
0
def test_jet_resolution_sf():
    from coffea.jetmet_tools import JetResolutionScaleFactor

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    jersf_names = ["Spring16_25nsV10_MC_SF_AK4PFPuppi"]
    resosf = JetResolutionScaleFactor(
        **{name: evaluator[name]
           for name in jersf_names})

    print(resosf)

    # 0-jet compatibility
    assert resosf.getScaleFactor(JetEta=test_eta[:0]).shape == (0, 3)

    resosfs = resosf.getScaleFactor(JetEta=test_eta)
    resosfs_jag = resosf.getScaleFactor(JetEta=test_eta_jag)
    assert ak.all(resosfs == ak.flatten(resosfs_jag))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag, "\n")

    resosfs_jag_ref = ak.unflatten(
        np.array([
            [1.857, 1.928, 1.786],
            [1.084, 1.095, 1.073],
            [1.364, 1.403, 1.325],
            [1.177, 1.218, 1.136],
            [1.138, 1.151, 1.125],
            [1.364, 1.403, 1.325],
            [1.177, 1.218, 1.136],
            [1.082, 1.117, 1.047],
        ]),
        counts,
    )
    resosfs_jag = resosf.getScaleFactor(JetEta=test_eta_jag)
    print("Reference Resolution SF (jagged):", resosfs_jag_ref)
    print("Resolution SF (jagged):", resosfs_jag)
    assert ak.all(
        np.abs(ak.flatten(resosfs_jag_ref) - ak.flatten(resosfs_jag)) < 1e-6)
Ejemplo n.º 12
0
def test_jet_resolution_sf_2d():
    from coffea.jetmet_tools import JetResolutionScaleFactor

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    resosf = JetResolutionScaleFactor(
        **{name: evaluator[name]
           for name in ["Autumn18_V7_MC_SF_AK4PFchs"]})

    print(resosf)

    # 0-jet compatibility
    assert resosf.getScaleFactor(JetPt=test_pt[:0],
                                 JetEta=test_eta[:0]).shape == (0, 3)

    resosfs = resosf.getScaleFactor(JetPt=test_pt, JetEta=test_eta)
    resosfs_jag = resosf.getScaleFactor(JetPt=test_pt_jag, JetEta=test_eta_jag)
    assert ak.all(resosfs == ak.flatten(resosfs_jag))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag, "\n")

    resosfs_jag_ref = ak.unflatten(
        np.array([
            [1.11904, 1.31904, 1.0],
            [1.1432, 1.2093, 1.0771],
            [1.16633, 1.36633, 1.0],
            [1.17642, 1.37642, 1.0],
            [1.1808, 1.1977, 1.1640],
            [1.15965, 1.35965, 1.0],
            [1.17661, 1.37661, 1.0],
            [1.1175, 1.1571, 1.0778],
        ]),
        counts,
    )
    resosfs_jag = resosf.getScaleFactor(JetPt=test_pt_jag, JetEta=test_eta_jag)
    print("Reference Resolution SF (jagged):", resosfs_jag_ref)
    print("Resolution SF (jagged):", resosfs_jag)
    assert ak.all(
        np.abs(ak.flatten(resosfs_jag_ref) - ak.flatten(resosfs_jag)) < 1e-6)
Ejemplo n.º 13
0
def test_analysis_objects():
    counts, px, py, pz, energy = dummy_four_momenta()
    thep4 = np.stack((px, py, pz, energy)).T

    #test JaggedTLorentzVectorArray
    tlva1 = uproot_methods.TLorentzVectorArray(px, py, pz, energy)
    tlva2 = uproot_methods.TLorentzVectorArray(thep4[:, 0], thep4[:, 1],
                                               thep4[:, 2], thep4[:, 3])
    jtlva1 = JaggedTLorentzVectorArray.fromcounts(counts, tlva1)
    jtlva2 = JaggedTLorentzVectorArray.fromcounts(counts, tlva2)

    jtlva1_selection1 = jtlva1[jtlva1.counts > 0]
    jtlva1_selection2 = jtlva1_selection1[jtlva1_selection1.pt > 5]

    jtlva2_selection1 = jtlva2[jtlva2.counts > 0]
    jtlva2_selection2 = jtlva1_selection1[jtlva2_selection1.pt > 5]

    diffx = np.abs(jtlva1.x - jtlva2.x)
    diffy = np.abs(jtlva1.y - jtlva2.y)
    diffz = np.abs(jtlva1.z - jtlva2.z)
    difft = np.abs(jtlva1.t - jtlva2.t)
    assert (diffx < 1e-8).flatten().all()
    assert (diffy < 1e-8).flatten().all()
    assert (diffz < 1e-8).flatten().all()
    assert (difft < 1e-8).flatten().all()

    #test JaggedCandidateArray
    jca1 = JaggedCandidateArray.candidatesfromcounts(counts, p4=thep4)
    jca2 = JaggedCandidateArray.candidatesfromcounts(counts, p4=thep4)
    assert ((jca1.offsets == jca2.offsets).all())

    addon1 = jca1.zeros_like()
    addon2 = jca2.ones_like()
    jca1['addon'] = addon1
    jca2['addon'] = addon2

    jca1.add_attributes(addonFlat=addon1.flatten(), addonJagged=addon1)

    diffm = np.abs(jca1.p4.mass - jca2.p4.mass)
    assert ((jca1.offsets == jca2.offsets).all())
    diffpt = np.abs(jca1.p4.pt - jca2.p4.pt)
    assert ((jca1.offsets == jca2.offsets).all())
    eta2 = jca2.p4.eta
    eta1 = jca1.p4.eta
    print(np.sum(eta1.counts), np.sum(eta2.counts))
    diffeta_temp = np.abs(eta1 - eta2)
    diffeta = np.abs(jca1.p4.eta - jca2.p4.eta)
    assert ((jca1.offsets == jca2.offsets).all())
    assert (diffm < 1e-8).flatten().all()
    assert (diffpt < 1e-8).flatten().all()
    assert (diffeta < 1e-8).flatten().all()

    #test fast functions
    fastfs = ['pt', 'eta', 'phi', 'mass']
    for func in fastfs:
        func1 = getattr(jca1, func)
        func2 = getattr(jca1.p4, func)
        dfunc = np.abs(func1 - func2)
        assert (dfunc < 1e-8).flatten().all()

    adistinct = jca1.distincts()
    apair = jca1.pairs()
    across = jca1.cross(jca2)
    achoose2 = jca1.choose(2)
    achoose3 = jca1.choose(3)

    assert 'p4' in adistinct.columns
    assert 'p4' in apair.columns
    assert 'p4' in across.columns
    assert 'p4' in achoose2.columns
    assert 'p4' in achoose3.columns

    admsum = (adistinct.i0.p4 + adistinct.i1.p4).mass
    apmsum = (apair.i0.p4 + apair.i1.p4).mass
    acmsum = (across.i0.p4 + across.i1.p4).mass
    ach3msum = (achoose3.i0.p4 + achoose3.i1.p4 + achoose3.i2.p4).mass
    diffadm = np.abs(adistinct.p4.mass - admsum)
    diffapm = np.abs(apair.p4.mass - apmsum)
    diffacm = np.abs(across.p4.mass - acmsum)
    diffachm = np.abs(achoose2.p4.mass - admsum)
    diffach3m = np.abs(achoose3.p4.mass - ach3msum)

    assert (diffadm < 1e-8).flatten().all()
    assert (diffapm < 1e-8).flatten().all()
    assert (diffacm < 1e-8).flatten().all()
    assert (diffachm < 1e-8).flatten().all()
    assert (diffach3m < 1e-8).flatten().all()

    selection11 = jca1[jca1.counts > 0]
    selection12 = selection11[selection11.p4.pt > 5]

    selection21 = jca2[jca2.counts > 0]
    selection22 = selection21[selection21.p4.pt > 5]

    diffcnts = selection12.counts - jtlva1_selection2.counts
    diffm = np.abs(selection12.p4.mass - jtlva1_selection2.mass)
    diffaddon = selection12.addon - selection22.addon
    assert (diffcnts == 0).flatten().all()
    assert (diffm < 1e-8).flatten().all()
    assert (diffaddon == -1).flatten().all()

    #test gen-reco matching
    gen, reco = gen_reco_TLV()
    flat_gen = gen.flatten()
    gen_px, gen_py, gen_pz, gen_e = flat_gen.x, flat_gen.y, flat_gen.z, flat_gen.t
    flat_reco = reco.flatten()
    reco_px, reco_py, reco_pz, reco_e = flat_reco.x, flat_reco.y, flat_reco.z, flat_reco.t
    jca_gen = JaggedCandidateArray.candidatesfromcounts(gen.counts,
                                                        px=gen_px,
                                                        py=gen_py,
                                                        pz=gen_pz,
                                                        energy=gen_e)
    jca_reco = JaggedCandidateArray.candidatesfromcounts(reco.counts,
                                                         px=reco_px,
                                                         py=reco_py,
                                                         pz=reco_pz,
                                                         energy=reco_e)
    print('gen eta: ', jca_gen.p4.eta, '\n gen phi:', jca_gen.p4.phi)
    print('reco eta: ', jca_reco.p4.eta, '\n reco phi:', jca_reco.p4.phi)
    match_mask = jca_reco.match(jca_gen, deltaRCut=0.3)
    print('match mask: ', match_mask)
    fast_match_mask = jca_reco.fastmatch(jca_gen, deltaRCut=0.3)
    print('fastmatch mask: ', fast_match_mask)
    assert ((match_mask == fast_match_mask).all().all())
    print('arg matches: ', jca_reco.argmatch(jca_gen, deltaRCut=0.3))
    argmatch_nocut = jca_gen.argmatch(jca_reco).flatten()
    argmatch_dr03 = jca_gen.argmatch(jca_reco, deltaRCut=0.3).flatten()
    argmatch_dr03_dpt01 = jca_gen.argmatch(jca_reco,
                                           deltaRCut=0.3,
                                           deltaPtCut=0.1).flatten()
    assert (argmatch_nocut.size == 5)
    assert (argmatch_dr03[argmatch_dr03 != -1].size == 3)
    assert (argmatch_dr03_dpt01[argmatch_dr03_dpt01 != -1].size == 2)
    assert (jca_gen.match(jca_reco,
                          deltaRCut=0.3).flatten().flatten().sum() == 3)
    assert (jca_gen.match(jca_reco, deltaRCut=0.3,
                          deltaPtCut=0.1).flatten().flatten().sum() == 2)

    # test various four-momentum constructors
    ptetaphiE_test = JaggedCandidateArray.candidatesfromcounts(
        jca_reco.counts,
        pt=jca_reco.pt,
        eta=jca_reco.eta,
        phi=jca_reco.phi,
        energy=jca_reco.p4.energy)

    pxpypzM_test = JaggedCandidateArray.candidatesfromcounts(
        jca_reco.counts,
        px=jca_reco.p4.x,
        py=jca_reco.p4.y,
        pz=jca_reco.p4.z,
        mass=jca_reco.mass)

    ptphipzE_test = JaggedCandidateArray.candidatesfromcounts(
        jca_reco.counts,
        pt=jca_reco.pt,
        phi=jca_reco.phi,
        pz=jca_reco.p4.z,
        energy=jca_reco.p4.energy)

    pthetaphiE_test = JaggedCandidateArray.candidatesfromcounts(
        jca_reco.counts,
        p=jca_reco.p4.p,
        theta=jca_reco.p4.theta,
        phi=jca_reco.phi,
        energy=jca_reco.p4.energy)
Ejemplo n.º 14
0
def test_corrected_jets_factory():
    import os
    from coffea.jetmet_tools import CorrectedJetsFactory, CorrectedMETFactory, JECStack

    events = None
    from coffea.nanoevents import NanoEventsFactory

    factory = NanoEventsFactory.from_root(
        os.path.abspath("tests/samples/nano_dy.root"))
    events = factory.events()

    jec_stack_names = [
        "Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi",
        "Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi",
        "Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi",
        "Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi",
        "Spring16_25nsV10_MC_PtResolution_AK4PFPuppi",
        "Spring16_25nsV10_MC_SF_AK4PFPuppi",
    ]
    for key in evaluator.keys():
        if "Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi" in key:
            jec_stack_names.append(key)

    jec_inputs = {name: evaluator[name] for name in jec_stack_names}
    jec_stack = JECStack(jec_inputs)

    name_map = jec_stack.blank_name_map
    name_map["JetPt"] = "pt"
    name_map["JetMass"] = "mass"
    name_map["JetEta"] = "eta"
    name_map["JetA"] = "area"

    jets = events.Jet

    jets["pt_raw"] = (1 - jets["rawFactor"]) * jets["pt"]
    jets["mass_raw"] = (1 - jets["rawFactor"]) * jets["mass"]
    jets["pt_gen"] = ak.values_astype(ak.fill_none(jets.matched_gen.pt, 0),
                                      np.float32)
    jets["rho"] = ak.broadcast_arrays(events.fixedGridRhoFastjetAll,
                                      jets.pt)[0]
    name_map["ptGenJet"] = "pt_gen"
    name_map["ptRaw"] = "pt_raw"
    name_map["massRaw"] = "mass_raw"
    name_map["Rho"] = "rho"

    jec_cache = cachetools.Cache(np.inf)

    print(name_map)

    tic = time.time()
    jet_factory = CorrectedJetsFactory(name_map, jec_stack)
    toc = time.time()

    print("setup corrected jets time =", toc - tic)

    tic = time.time()
    prof = pyinstrument.Profiler()
    prof.start()
    corrected_jets = jet_factory.build(jets, lazy_cache=jec_cache)
    prof.stop()
    toc = time.time()

    print("corrected_jets build time =", toc - tic)

    print(prof.output_text(unicode=True, color=True, show_all=True))

    tic = time.time()
    print("Generated jet pt:", corrected_jets.pt_gen)
    print("Original jet pt:", corrected_jets.pt_orig)
    print("Raw jet pt:", jets.pt_raw)
    print("Corrected jet pt:", corrected_jets.pt)
    print("Original jet mass:", corrected_jets.mass_orig)
    print("Raw jet mass:", jets["mass_raw"])
    print("Corrected jet mass:", corrected_jets.mass)
    print("jet eta:", jets.eta)
    for unc in jet_factory.uncertainties():
        print(unc)
        print(corrected_jets[unc].up.pt)
        print(corrected_jets[unc].down.pt)
    toc = time.time()

    print("build all jet variations =", toc - tic)

    # Test that the corrections were applied correctly
    from coffea.jetmet_tools import (
        FactorizedJetCorrector,
        JetResolution,
        JetResolutionScaleFactor,
    )

    scalar_form = ak.without_parameters(jets["pt_raw"]).layout.form
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_stack_names[0:4]})
    corrs = corrector.getCorrection(JetEta=jets["eta"],
                                    Rho=jets["rho"],
                                    JetPt=jets["pt_raw"],
                                    JetA=jets["area"])
    reso = JetResolution(
        **{name: evaluator[name]
           for name in jec_stack_names[4:5]})
    jets["jet_energy_resolution"] = reso.getResolution(
        JetEta=jets["eta"],
        Rho=jets["rho"],
        JetPt=jets["pt_raw"],
        form=scalar_form,
        lazy_cache=jec_cache,
    )
    resosf = JetResolutionScaleFactor(
        **{name: evaluator[name]
           for name in jec_stack_names[5:6]})
    jets["jet_energy_resolution_scale_factor"] = resosf.getScaleFactor(
        JetEta=jets["eta"], lazy_cache=jec_cache)

    # Filter out the non-deterministic (no gen pt) jets
    def smear_factor(jetPt, pt_gen, jersf):
        return (ak.full_like(jetPt, 1.0) +
                (jersf[:, 0] - ak.full_like(jetPt, 1.0)) *
                (jetPt - pt_gen) / jetPt)

    test_gen_pt = ak.concatenate(
        [corrected_jets.pt_gen[0, :-2], corrected_jets.pt_gen[-1, :-1]])
    test_raw_pt = ak.concatenate([jets.pt_raw[0, :-2], jets.pt_raw[-1, :-1]])
    test_pt = ak.concatenate(
        [corrected_jets.pt[0, :-2], corrected_jets.pt[-1, :-1]])
    test_eta = ak.concatenate([jets.eta[0, :-2], jets.eta[-1, :-1]])
    test_jer = ak.concatenate([
        jets.jet_energy_resolution[0, :-2], jets.jet_energy_resolution[-1, :-1]
    ])
    test_jer_sf = ak.concatenate([
        jets.jet_energy_resolution_scale_factor[0, :-2],
        jets.jet_energy_resolution_scale_factor[-1, :-1],
    ])
    test_jec = ak.concatenate([corrs[0, :-2], corrs[-1, :-1]])
    test_corrected_pt = ak.concatenate(
        [corrected_jets.pt[0, :-2], corrected_jets.pt[-1, :-1]])
    test_corr_pt = test_raw_pt * test_jec
    test_pt_smear_corr = test_corr_pt * smear_factor(test_corr_pt, test_gen_pt,
                                                     test_jer_sf)

    # Print the results of the "by-hand" calculations and confirm that the values match the expected values
    print("\nConfirm the CorrectedJetsFactory values:")
    print("Jet pt (gen)", test_gen_pt.tolist())
    print("Jet pt (raw)", test_raw_pt.tolist())
    print("Jet pt (nano):", test_pt.tolist())
    print("Jet eta:", test_eta.tolist())
    print("Jet energy resolution:", test_jer.tolist())
    print("Jet energy resolution sf:", test_jer_sf.tolist())
    print("Jet energy correction:", test_jec.tolist())
    print("Corrected jet pt (ref)", test_corr_pt.tolist())
    print("Corrected & smeared jet pt (ref):", test_pt_smear_corr.tolist())
    print("Corrected & smeared jet pt:", test_corrected_pt.tolist(), "\n")
    assert ak.all(np.abs(test_pt_smear_corr - test_corrected_pt) < 1e-6)

    name_map["METpt"] = "pt"
    name_map["METphi"] = "phi"
    name_map["JetPhi"] = "phi"
    name_map["UnClusteredEnergyDeltaX"] = "MetUnclustEnUpDeltaX"
    name_map["UnClusteredEnergyDeltaY"] = "MetUnclustEnUpDeltaY"

    tic = time.time()
    met_factory = CorrectedMETFactory(name_map)
    toc = time.time()

    print("setup corrected MET time =", toc - tic)

    met = events.MET
    tic = time.time()
    # prof = pyinstrument.Profiler()
    # prof.start()
    corrected_met = met_factory.build(met,
                                      corrected_jets,
                                      lazy_cache=jec_cache)
    # prof.stop()
    toc = time.time()

    # print(prof.output_text(unicode=True, color=True, show_all=True))

    print("corrected_met build time =", toc - tic)

    tic = time.time()
    print(corrected_met.pt_orig)
    print(corrected_met.pt)
    prof = pyinstrument.Profiler()
    prof.start()
    for unc in jet_factory.uncertainties() + met_factory.uncertainties():
        print(unc)
        print(corrected_met[unc].up.pt)
        print(corrected_met[unc].down.pt)
    prof.stop()
    toc = time.time()

    print("build all met variations =", toc - tic)

    print(prof.output_text(unicode=True, color=True, show_all=True))
Ejemplo n.º 15
0
def test_factorized_jet_corrector():
    from coffea.jetmet_tools import FactorizedJetCorrector

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    test_Rho = np.full_like(test_eta, 100.0)
    test_A = np.full_like(test_eta, 5.0)

    # Check that the FactorizedJetCorrector is functional
    jec_names = [
        "Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi",
        "Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi",
        "Summer16_23Sep2016V3_MC_L2L3Residual_AK4PFPuppi",
        "Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi",
    ]
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names})

    print(corrector)

    pt_copy = np.copy(test_pt)

    # Check that the corrector can be evaluated for flattened arrays
    corrs = corrector.getCorrection(JetEta=test_eta,
                                    Rho=test_Rho,
                                    JetPt=test_pt,
                                    JetA=test_A)

    assert (np.abs(pt_copy - test_pt) < 1e-6).all()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)
    test_Rho_jag = ak.unflatten(test_Rho, counts)
    test_A_jag = ak.unflatten(test_A, counts)

    # Check that the corrector can be evaluated for jagges arrays
    corrs_jag = corrector.getCorrection(JetEta=test_eta_jag,
                                        Rho=test_Rho_jag,
                                        JetPt=test_pt_jag,
                                        JetA=test_A_jag)

    assert ak.all(np.abs(pt_copy - ak.flatten(test_pt_jag)) < 1e-6)
    assert ak.all(np.abs(corrs - ak.flatten(corrs_jag)) < 1e-6)

    # Check that the corrector returns the correct answers for each level of correction
    # Use a subset of the values so that we can check the corrections by hand
    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    test_Rho_jag = test_Rho_jag[0:3]
    test_A_jag = test_A_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag)
    print("eta:", test_eta_jag)
    print("rho:", test_Rho_jag)
    print("area:", test_A_jag, "\n")

    # Start by checking the L1 corrections
    corrs_L1_jag_ref = ak.full_like(test_pt_jag, 1.0)
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names[0:1]})
    corrs_L1_jag = corrector.getCorrection(JetEta=test_eta_jag,
                                           Rho=test_Rho_jag,
                                           JetPt=test_pt_jag,
                                           JetA=test_A_jag)
    print("Reference L1 corrections:", corrs_L1_jag_ref)
    print("Calculated L1 corrections:", corrs_L1_jag)
    assert ak.all(
        np.abs(ak.flatten(corrs_L1_jag_ref) - ak.flatten(corrs_L1_jag)) < 1e-6)

    # Apply the L1 corrections and save the result
    test_ptL1_jag = test_pt_jag * corrs_L1_jag
    print("L1 corrected pT values:", test_ptL1_jag, "\n")
    assert ak.all(
        np.abs(ak.flatten(test_pt_jag) - ak.flatten(test_ptL1_jag)) < 1e-6)

    # Check the L2 corrections on a subset of jets
    # Look up the parameters for the L2 corrections by hand and calculate the corrections
    # [(1.37906,35.8534,-0.00829227,7.96644e-05,5.18988e-06),
    #  (1.38034,17.9841,-0.00729638,-0.000127141,5.70889e-05),
    #  (1.74466,18.6372,-0.0367036,0.00310864,-0.000277062),
    #  (1.4759,24.8882,-0.0155333,0.0020836,-0.000198039),
    #  (1.14606,36.4215,-0.00174801,-1.76393e-05,1.91863e-06),
    #  (0.999657,4.02981,1.06597,-0.619679,-0.0494)],
    # [(1.54524,23.9023,-0.0162807,0.000665243,-4.66608e-06),
    #  (1.48431,8.68725,0.00642424,0.0252104,-0.0335696)]])
    corrs_L2_jag_ref = ak.unflatten(
        np.array([
            1.37038741364,
            1.37710384514,
            1.65148641108,
            1.46840446827,
            1.1328319784,
            1.0,
            1.50762056349,
            1.48719866989,
        ]),
        counts,
    )
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in jec_names[1:2]})
    corrs_L2_jag = corrector.getCorrection(JetEta=test_eta_jag,
                                           JetPt=test_pt_jag)
    print("Reference L2 corrections:", corrs_L2_jag_ref.tolist())
    print("Calculated L2 corrections:", corrs_L2_jag.tolist())
    assert ak.all(
        np.abs(ak.flatten(corrs_L2_jag_ref) - ak.flatten(corrs_L2_jag)) < 1e-6)

    # Apply the L2 corrections and save the result
    test_ptL1L2_jag = test_ptL1_jag * corrs_L2_jag
    print("L1L2 corrected pT values:", test_ptL1L2_jag, "\n")

    # Apply the L3 corrections and save the result
    corrs_L3_jag = ak.full_like(test_pt_jag, 1.0)
    test_ptL1L2L3_jag = test_ptL1L2_jag * corrs_L3_jag
    print("L1L2L3 corrected pT values:", test_ptL1L2L3_jag, "\n")

    # Check that the corrections can be chained together
    corrs_L1L2L3_jag_ref = ak.unflatten(
        np.array([
            1.37038741364,
            1.37710384514,
            1.65148641108,
            1.46840446827,
            1.1328319784,
            1.0,
            1.50762056349,
            1.48719866989,
        ]),
        counts,
    )
    corrector = FactorizedJetCorrector(
        **{name: evaluator[name]
           for name in (jec_names[0:2] + jec_names[3:])})
    corrs_L1L2L3_jag = corrector.getCorrection(JetEta=test_eta_jag,
                                               Rho=test_Rho_jag,
                                               JetPt=test_pt_jag,
                                               JetA=test_A_jag)
    print("Reference L1L2L3 corrections:", corrs_L1L2L3_jag_ref)
    print("Calculated L1L2L3 corrections:", corrs_L1L2L3_jag)
    assert ak.all(
        np.abs(
            ak.flatten(corrs_L1L2L3_jag_ref) -
            ak.flatten(corrs_L1L2L3_jag)) < 1e-6)

    # Apply the L1L2L3 corrections and save the result
    test_ptL1L2L3chain_jag = test_pt_jag * corrs_L1L2L3_jag
    print("Chained L1L2L3 corrected pT values:", test_ptL1L2L3chain_jag, "\n")
    assert ak.all(
        np.abs(
            ak.flatten(test_ptL1L2L3_jag) -
            ak.flatten(test_ptL1L2L3chain_jag)) < 1e-6)
Ejemplo n.º 16
0
def test_jet_correction_regrouped_uncertainty_sources():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = []
    levels = []
    for name in dir(evaluator):
        if "Regrouped_Fall17_17Nov2017_V32_MC_UncertaintySources_AK4PFchs" in name:
            junc_names.append(name)
            if len(name.split("_")) == 9:
                levels.append("_".join(name.split("_")[-2:]))
            else:
                levels.append(name.split("_")[-1])
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, tpl in enumerate(
            list(junc.getUncertainty(JetEta=test_eta, JetPt=test_pt))):
        assert tpl[0] in levels
        assert tpl[1].shape[0] == test_eta.shape[0]
        assert ak.all(tpl[1] == ak.flatten(juncs_jag[i][1]))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag.tolist())
    print("eta:", test_eta_jag.tolist(), "\n")

    juncs_jag_ref = ak.unflatten(
        np.array([
            [1.119159088, 0.880840912],
            [1.027003404, 0.972996596],
            [1.135201275, 0.864798725],
            [1.039665259, 0.960334741],
            [1.015064503, 0.984935497],
            [1.149900004, 0.850099996],
            [1.079960600, 0.920039400],
            [1.041200001, 0.958799999],
        ]),
        counts,
    )
    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))
    for i, (level, corrs) in enumerate(juncs_jag):
        if level != "Total":
            continue
        print("Index:", i)
        print("Correction level:", level)
        print("Reference Uncertainties (jagged):", juncs_jag_ref)
        print("Uncertainties (jagged):", corrs, "\n")
        assert ak.all(
            np.abs(ak.flatten(juncs_jag_ref) - ak.flatten(corrs)) < 1e-6)
Ejemplo n.º 17
0
def test_jet_correction_uncertainty_sources():
    from coffea.jetmet_tools import JetCorrectionUncertainty

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    test_pt_jag = ak.unflatten(test_pt, counts)
    test_eta_jag = ak.unflatten(test_eta, counts)

    junc_names = []
    levels = []
    for name in dir(evaluator):
        if "Summer16_23Sep2016V3_MC_UncertaintySources_AK4PFPuppi" in name:
            junc_names.append(name)
            levels.append(name.split("_")[-1])
        # test for underscore in dataera
        if "Fall17_17Nov2017_V6_MC_UncertaintySources_AK4PFchs_AbsoluteFlavMap" in name:
            junc_names.append(name)
            levels.append(name.split("_")[-1])
    junc = JetCorrectionUncertainty(
        **{name: evaluator[name]
           for name in junc_names})

    print(junc)

    juncs = junc.getUncertainty(JetEta=test_eta, JetPt=test_pt)

    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))

    for i, (level, corrs) in enumerate(juncs):
        assert level in levels
        assert corrs.shape[0] == test_eta.shape[0]
        assert ak.all(corrs == ak.flatten(juncs_jag[i][1]))

    test_pt_jag = test_pt_jag[0:3]
    test_eta_jag = test_eta_jag[0:3]
    counts = counts[0:3]
    print("Raw jet values:")
    print("pT:", test_pt_jag.tolist())
    print("eta:", test_eta_jag.tolist(), "\n")

    juncs_jag_ref = ak.unflatten(
        np.array([
            [1.053504214, 0.946495786],
            [1.033343349, 0.966656651],
            [1.065159157, 0.934840843],
            [1.033140127, 0.966859873],
            [1.016858652, 0.983141348],
            [1.130199999, 0.869800001],
            [1.039968468, 0.960031532],
            [1.033100002, 0.966899998],
        ]),
        counts,
    )
    juncs_jag = list(
        junc.getUncertainty(JetEta=test_eta_jag, JetPt=test_pt_jag))
    for i, (level, corrs) in enumerate(juncs_jag):
        if level != "Total":
            continue
        print("Index:", i)
        print("Correction level:", level)
        print("Reference Uncertainties (jagged):", juncs_jag_ref)
        print("Uncertainties (jagged):", corrs, "\n")
        assert ak.all(
            np.abs(ak.flatten(juncs_jag_ref) - ak.flatten(corrs)) < 1e-6)