def const(): X, PFCs, Y, C, N = load_data(500, 1000, eta, quality, pad, x_dim=4, momentum_scale=momentum_scale, n=1000000, max_particle_select=150, frac=0.1) consts[0] = marginal(None, DNN.predict([X, DNN.shuffle(Y)])) consts[1] = marginal(None, EFN.predict([PFCs[:, :, :3], EFN.shuffle(Y)])) consts[2] = marginal(None, PFN.predict([PFCs[:, :, :3], PFN.shuffle(Y)])) consts[3] = marginal(None, PFN_pid.predict([PFCs[:, :, :4], PFN.shuffle(Y)]))
def get_values(pt_low, pt_high): X_test, PFCs_test, Y_test, C_test, N_test = load_data( pt_low, pt_high, eta, quality, pad, x_dim=4, momentum_scale=momentum_scale, n=1000000, max_particle_select=150, frac=0.1) test = (X_test, PFCs_test, Y_test, C_test) num_events = X_test.shape[0] pt = (pt_high + pt_low) / 2 predictions = np.array( (dnn_predict(test, DNN, consts[0]), efn_predict(test, EFN, consts[1]), pfn_predict(test, PFN, consts[2]), pfn_pid_predict(test, PFN_pid, consts[3]), cms_predict(test, None, consts[4]))) means, mean_uncertainties = [], [] resolutions, resolution_uncertainties = [], [] MIs = [] for i in range(5): mean, mean_uncertainty = [*norm.fit(predictions[i, 0])] resolution, resolution_uncertainty = [*norm.fit(predictions[i, 1])] means.append(mean / pt) mean_uncertainties.append(mean_uncertainty / pt) resolutions.append(resolution / pt) resolution_uncertainties.append(resolution_uncertainty / pt) MIs.append(predictions[i, 3]) return means, mean_uncertainties, resolutions, resolution_uncertainties, MIs, num_events
d_multiplier = param_dict["d_multiplier"] # Dataset Parameters cache_dir = dataset_dict["cache_dir"] momentum_scale = dataset_dict["momentum_scale"] n = dataset_dict["n"] pad = dataset_dict["pad"] pt_lower, pt_upper = dataset_dict["pt_lower"], dataset_dict["pt_upper"] eta = dataset_dict["eta"] quality = dataset_dict["quality"] # ############################# # ########## DATASET ########## # ############################# X, PFCs, Y, C, N = load_data(cache_dir, pt_lower, pt_upper, eta, quality, pad, x_dim = x_dim, momentum_scale = momentum_scale, n = n, max_particle_select = 150, amount = dataset_dict["amount"]) X_test, PFCs_test, Y_test, C_test, N_test = load_data(cache_dir, pt_lower, pt_upper, eta, quality, pad, x_dim = x_dim, momentum_scale = momentum_scale, n = 50) print(X.shape, PFCs.shape, Y.shape) # ############################ # ########## MODELS ########## # ############################ MI_histories = [] retrain_points = [] for train_count in range(retrain + 1): print("TRAINING %d" % (train_count)) # Pretain
n = dataset_dict["n"] pad = dataset_dict["pad"] pt_lower, pt_upper = dataset_dict["pt_lower"], dataset_dict["pt_upper"] eta = dataset_dict["eta"] quality = dataset_dict["quality"] # ############################# # ########## DATASET ########## # ############################# X, Y, C, N = load_data(cache_dir, pt_lower, pt_upper, eta, quality, pad, momentum_scale=momentum_scale, n=n, max_particle_select=None, amount=dataset_dict["amount"], return_pfcs=False) X_test, Y_test, C_test, N_test = load_data(cache_dir, pt_lower, pt_upper, eta, quality, pad, momentum_scale=momentum_scale, n=50, return_pfcs=False)
pt_lower, pt_upper = 695, 705 eta = 2.4 quality = 2 epochs = 150 d_multiplier = 0.0 # ############################# # ########## DATASET ########## # ############################# X_test, PFCs_test, Y_test, C_test, N_test = load_data( cache_dir, pt_lower, pt_upper, eta, quality, pad, x_dim=4, momentum_scale=momentum_scale, n=n, max_particle_select=150) test = (X_test, PFCs_test, Y_test, C_test) plt.hist(N_test, bins=25, histtype='step', color="red", label="# of Particles", density=True) plt.xlabel(r"$N$") plt.ylabel("Density")