Ejemplo n.º 1
0
            # add information needed for MC corrections
            parameters["pu_corrections_target"] = load_puhist_target(parameters["pu_corrections_file"])

            ext = extractor()
            for corr in parameters["corrections"]:
                ext.add_weight_sets([corr])
            ext.finalize()
            evaluator = ext.make_evaluator()


        if ibatch == 0:
            print(dataset.printout())

        # in case of DNN evaluation: load model
        model = None
        if args.DNN:
            model = load_model(args.path_to_model, custom_objects=dict(itertools=itertools, mse0=mse0, mae0=mae0, r2_score0=r2_score0))

        print(args.categories)
        #### this is where the magic happens: run the main analysis
        results += dataset.analyze(analyze_data, NUMPY_LIB=NUMPY_LIB, parameters=parameters, is_mc = is_mc, lumimask=lumimask, cat=args.categories, sample=args.sample, samples_info=samples_info, boosted=args.boosted, DNN=args.DNN, DNN_model=model)


    print(results)

    #Save the results
    if not os.path.isdir(args.outdir):
      os.makedirs(args.outdir)
    results.save_json(os.path.join(outdir,"out_{}.json".format(args.sample)))
Ejemplo n.º 2
0
                loaded_model_json = json_file.read()
                json_file.close()
                model = model_from_json(
                    loaded_model_json,
                    custom_objects=dict(itertools=itertools))
                model.load_weights(args.path_to_model + "model.hdf5")

            print(args.categories)

            #### this is where the magic happens: run the main analysis
            results += dataset.analyze(
                analyze_data,
                NUMPY_LIB=NUMPY_LIB,
                parameters=parameters,
                is_mc=is_mc,
                lumimask=lumimask,
                cat=args.categories,
                sample=args.sample,
                samples_info=samples_info,
                DNN=args.DNN,
                DNN_model=model,
                jets_met_corrected=args.jets_met_corrected)

    print(results)
    #Save the results
    if not os.path.isdir(args.outdir):
        os.makedirs(args.outdir)
    results.save_json(
        os.path.join(outdir, "out_{0}{1}.json".format(args.sample,
                                                      args.outtag)))
Ejemplo n.º 3
0
import numpy as np
from hepaccelerate.utils import Histogram, Results
from glob import glob
import json,os,argparse
from pdb import set_trace

flist = glob('results/201*/v12/met20_btagDDBvL086/nominal/btagEfficiencyMaps/out_btagEfficiencyMaps_*json')

def divide(h1,h2):
  contents    = h1.contents/h2.contents
  contents_w2 = h1.contents_w2/h2.contents_w2
  edges       = h1.edges
  return Histogram(contents, contents_w2, edges)

for fn in flist:
  with open(fn) as f:
    data = json.load(f)
  for h in data:
    data[h] = Histogram( *data[h].values() )

  for flav in ['b','l','lc']:
    for var in ['central','updown']:
      data[f'eff_flav{flav}_{var}'] = divide( data[f'btags_flav{flav}_{var}'], data[f'total_flav{flav}_{var}'] )

  ret = Results(data)
  ret.save_json(fn)