Exemplo n.º 1
0
def test_jec_txt_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testJEC * tests/samples/Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.jec.txt",
        "* * tests/samples/Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi.junc.txt",
        "* * tests/samples/Autumn18_V8_MC_UncertaintySources_AK4PFchs.junc.txt",
    ])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    jec_out = evaluator[
        'testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi'](test_eta,
                                                                test_pt)

    print(jec_out)

    junc_out = evaluator['Fall17_17Nov2017_V32_MC_Uncertainty_AK4PFPuppi'](
        test_eta, test_pt)

    print(junc_out)

    assert ('Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale'
            in evaluator.keys())
    junc_out = evaluator[
        'Autumn18_V8_MC_UncertaintySources_AK4PFchs_AbsoluteScale'](test_eta,
                                                                    test_pt)
    print(junc_out)
Exemplo n.º 2
0
def test_btag_csv_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets(["testBTag * tests/samples/testBTagSF.btag.csv"])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()
    # discriminant used for reshaping, zero otherwise
    test_discr = np.zeros_like(test_eta)

    sf_out = evaluator['testBTagCSVv2_1_comb_up_0'](test_eta, test_pt,
                                                    test_discr)
    print(sf_out)
Exemplo n.º 3
0
def test_jec_txt_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testJEC * tests/samples/Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.jec.txt"
    ])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    jec_out = evaluator[
        'testJECFall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi'](test_eta,
                                                                test_pt)

    print(jec_out)
Exemplo n.º 4
0
def test_histo_json_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets(
        ["testJson * tests/samples/EIDISO_WH_out.histo.json"])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    sf_out = evaluator['testJsonEIDISO_WH/eta_pt_ratio_value'](test_eta,
                                                               test_pt)
    sf_err_out = evaluator['testJsonEIDISO_WH/eta_pt_ratio_error'](test_eta,
                                                                   test_pt)
    print(sf_out)
    print(sf_err_out)
Exemplo n.º 5
0
def test_root_scalefactors():
    extractor = lookup_tools.extractor()
    extractor.add_weight_sets([
        "testSF2d scalefactors_Tight_Electron tests/samples/testSF2d.histo.root"
    ])
    extractor.finalize()

    evaluator = extractor.make_evaluator()

    counts, test_eta, test_pt = dummy_jagged_eta_pt()

    # test flat eval
    test_out = evaluator["testSF2d"](test_eta, test_pt)

    # test structured eval
    test_eta_jagged = awkward.JaggedArray.fromcounts(counts, test_eta)
    test_pt_jagged = awkward.JaggedArray.fromcounts(counts, test_pt)
    test_out_jagged = evaluator["testSF2d"](test_eta_jagged, test_pt_jagged)

    assert (test_out_jagged.counts == counts).all()
    assert (test_out == test_out_jagged.flatten()).all()

    # From make_expected_lookup.py
    expected_output = np.array([
        0.90780139, 0.82748538, 0.86332178, 0.86332178, 0.97981155, 0.79701495,
        0.88245934, 0.82857144, 0.91884059, 0.97466666, 0.94072163, 1.00775194,
        0.82748538, 1.00775194, 0.97203946, 0.98199672, 0.80655736, 0.90893763,
        0.88245934, 0.79701495, 0.82748538, 0.82857144, 0.91884059, 0.90893763,
        0.97520661, 0.97520661, 0.82748538, 0.91884059, 0.97203946, 0.88245934,
        0.79701495, 0.9458763, 1.00775194, 0.80655736, 1.00775194, 1.00775194,
        0.98976982, 0.98976982, 0.86332178, 0.94072163, 0.80655736, 0.98976982,
        0.96638656, 0.9458763, 0.90893763, 0.9529984, 0.9458763, 0.9529984,
        0.80655736, 0.80655736, 0.80655736, 0.98976982, 0.97466666, 0.98199672,
        0.86332178, 1.03286386, 0.94072163, 1.03398061, 0.82857144, 0.80655736,
        1.00775194, 0.80655736
    ])

    diff = np.abs(test_out - expected_output)
    print("Max diff: %.16f" % diff.max())
    print("Median diff: %.16f" % np.median(diff))
    print("Diff over threshold rate: %.1f %%" %
          (100 * (diff >= 1.e-8).sum() / diff.size))
    assert (diff < 1.e-8).all()
Exemplo n.º 6
0
from zpeak_analysis import ZpeakAnalysis

executor = UprootExecutor("zpeak")
# executor = SparkExecutor("local", "ZPeak", 20)

config = Config(
    executor = executor,
    dataset_manager=InMemoryFilesDatasetManager(database_file="demo_datasets.csv")
)
app = App(config=config)
print(app.datasets.get_names())
print(app.datasets.get_file_list("ZJetsToNuNu_HT-600To800_13TeV-madgraph"))


# Create a broadcast variable for the non-event data
weightsext = lookup_tools.extractor()
correctionDescriptions = open("newCorrectionFiles.txt").readlines()
weightsext.add_weight_sets(correctionDescriptions)
weightsext.finalize()
weights_eval = weightsext.make_evaluator()


dataset = app.read_dataset("DY Jets")
print(dataset.columns)
print(dataset.count())

slim = dataset.select_columns(["nElectron",
                               "Electron_pt",
                               "Electron_eta",
                               "Electron_phi",
                               "Electron_mass",
Exemplo n.º 7
0
#!/usr/bin/env python
import json
import gzip
import lz4.frame as lz4f
import cloudpickle
import pickle
import uproot
import numexpr
import numpy as np
from fnal_column_analysis_tools import hist, lookup_tools
from fnal_column_analysis_tools.hist import plot

corrections = {}

extractor = lookup_tools.extractor()
extractor.add_weight_sets(
    ["2017_n2ddt_ * correction_files/n2ddt_transform_2017MC.root"])
extractor.add_weight_sets([
    "2017_mutrigger_ * correction_files/EfficienciesAndSF_RunBtoF_Nov17Nov2017.root"
])
extractor.add_weight_sets(
    ["2017_muid_ * correction_files/Muon2017_RunBCDEF_SF_ID.json"])
extractor.add_weight_sets(
    ["2017_muiso_ * correction_files/Muon2017_RunBCDEF_SF_ISO.json"])
extractor.finalize()
evaluator = extractor.make_evaluator()

corrections['2017_n2ddt_rho_pt'] = evaluator['2017_n2ddt_Rho2D']
corrections['2017_mutrigweight_pt_abseta'] = evaluator[
    '2017_mutrigger_Mu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA']
corrections['2017_mutrigweight_pt_abseta_mutrigweightShift'] = evaluator[