示例#1
0
    print("Calculating dnn scores")
    print((len(dnn_features)))
    print([feat for feat in dnn_features])
    i = 0
    print(dnn_features)
    for dnn_model in dnn_models:
        with open(dnn_model, "r") as f_in:
            model = json.load(f_in)

        dnn_features_data = dnn_helper.DNN_Features(
            name='data',
            global_features=utils.create_array(features_data, dnn_features,
                                               model["preprocess_scheme"],
                                               True),
            objects=utils.preprocess_array(
                utils.pad_array(features_data["objects_"]),
                model["preprocess_scheme"]))
        #print dnn_features_data.global_features
        #print dnn_features_data.objects
        #print dnn_features_data.features
        dnn_features_validation = dnn_helper.DNN_Features(
            name='validation',
            global_features=utils.create_array(features_validation,
                                               dnn_features,
                                               model["preprocess_scheme"],
                                               True),
            objects=utils.preprocess_array(
                utils.pad_array(features_validation["objects_"]),
                model["preprocess_scheme"]))
        dnn_features_final_fit = dnn_helper.DNN_Features(
            name='final_fit',
示例#2
0
#if args.do_top_tag:
#    dnn_features += ["top_tag_score_"]
if (args.fcnc_hut or args.fcnc_hct) and args.channel == "Hadronic" and not args.no_mass_constraint:
    dnn_features += ["m_ggj_", "m_jjj_"] 

if do_dnn:
  print("Calculating dnn scores")
  print((len(dnn_features)))
  print([feat for feat in dnn_features])
  i = 0
  print(dnn_features)
  for dnn_model in dnn_models:
    with open(dnn_model, "r") as f_in:
      model = json.load(f_in)

    dnn_features_data = dnn_helper.DNN_Features(name = 'data', global_features = utils.create_array(features_data, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_data["objects_"]), model["preprocess_scheme"]))
    #print dnn_features_data.global_features
    #print dnn_features_data.objects
    #print dnn_features_data.features
    dnn_features_validation = dnn_helper.DNN_Features(name = 'validation', global_features = utils.create_array(features_validation, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_validation["objects_"]), model["preprocess_scheme"]))
    dnn_features_final_fit = dnn_helper.DNN_Features(name = 'final_fit', global_features = utils.create_array(features_final_fit, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_final_fit["objects_"]), model["preprocess_scheme"]))
    dnn_features_train = dnn_helper.DNN_Features(name = 'train', global_features = utils.create_array(features, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features["objects_"]), model["preprocess_scheme"]))

    dnn = dnn_helper.DNN_Helper(features_validation = dnn_features_validation, features_train = dnn_features_train, features_data = dnn_features_data, features_final_fit = dnn_features_final_fit, metadata = model, weights_file = "dnn_weights/" + model["weights"], train_mode = False)
    #dnn.predict()
    #dnn_predictions.append([dnn.predictions["train"], dnn.predictions["validation"], dnn.predictions["data"]])
    dnn_predictions.append(dnn.predict(debug=True))
    feature_names.append("dnn_score_%d" % i)
    i += 1 

print(dnn_predictions)
示例#3
0
    with open(preprocess_scheme) as f_in:
        preprocess_scheme = json.load(f_in)

print("Preprocessing scheme: ", preprocess_scheme)

dnn_branches = []

if do_dnn:
    print("Calculating dnn scores")
    print((len(dnn_features)))
    print([feat for feat in dnn_features])
    i = 0
    for dnn_model in dnn_models:
        with open(dnn_model, "r") as f_in:
            model = json.load(f_in)
        dnn_features_data = dnn_helper.DNN_Features(name = 'data', global_features = utils.create_array(features_data, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_data["objects_"]), model["preprocess_scheme"]))
        dnn_features_validation = dnn_helper.DNN_Features(name = 'test', global_features = utils.create_array(features_test, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_test["objects_"]), model["preprocess_scheme"]), lumi = features_test["lumi_"], run = features_test["run_"], evt = features_test["evt_"])
        dnn_features_train = dnn_helper.DNN_Features(name = 'train', global_features = utils.create_array(features_train, dnn_features, model["preprocess_scheme"], True), objects = utils.preprocess_array(utils.pad_array(features_train["objects_"]), model["preprocess_scheme"]))

        dnn = dnn_helper.DNN_Helper(features_validation = dnn_features_validation, features_train = dnn_features_train, features_data = dnn_features_data, metadata = model, weights_file = "dnn_weights/" + model["weights"], train_mode = False)
        dnn_predictions.append(dnn.predict(debug=True))
        training_features.append("dnn_score_%d" % i)
        dnn_branches.append("dnn_score_%d" % i)
        i += 1

print(dnn_predictions)

preprocess_dict = {}
if args.z_score:
    for feat in training_features:
        mean, std = utils.get_mean_and_std(features_train[feat])
示例#4
0
    for i in range(n_object_features):
        mean, stddev = utils.get_mean_and_std(object_features[:, :, i])
        preprocess_dict["objects_" + str(i)] = {
            "mean": mean,
            "std_dev": stddev
        }

    with open("preprocess_scheme_%s_%s.json" % (args.channel, args.tag),
              "w") as f_out:
        json.dump(preprocess_dict, f_out, indent=4, sort_keys=True)

    for object_set in [
            object_features, object_features_validation, object_features_data,
            object_features_final_fit
    ]:
        object_set = utils.preprocess_array(object_set, preprocess_dict)
        #for i in range(n_object_features):
        #    object_set[:,:,i] = preprocess(object_set[:,:,i], preprocess_dict["objects_" + str(i)]["mean"], preprocess_dict["objects_" + str(i)]["std_dev"])

label = features["label_"]
multi_label = features["multi_label_"]
process_id = features["process_id_"]
weights = features["evt_weight_"]
mass = features["mass_"]
lead_sigmaEtoE = features["lead_sigmaEtoE_"]
sublead_sigmaEtoE = features["sublead_sigmaEtoE_"]
top_tag_score = features["top_tag_score_"]
tth_ttPP_mva = features["tth_ttPP_mva_"]
tth_dipho_mva = features["tth_dipho_mva_"]
tth_std_mva = features["tth_std_mva_"]
evt = features["evt_"]