# plt.close() # fig, ax = plt.subplots(figsize =(5, 3)) # plt.hist2d(x[:,0], x[:,1],bins =bin_size,norm=mpl.colors.LogNorm())# cmap = plt.cm.nipy_spectral) # plt.colorbar() # plt.show() # fig, ax = plt.subplots(figsize =(5, 3)) # plt.hist2d(z[:,0], z[:,1],bins =bin_size,norm=mpl.colors.LogNorm())# cmap = plt.cm.nipy_spectral) # plt.colorbar() # plt.show() #sys.exit() #construct an nflow model flow, optimizer = make_model(num_layers, num_features, num_hidden_features, device) print("number of params: ", sum(p.numel() for p in flow.parameters())) #scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.98) start = datetime.now() start_time = start.strftime("%H:%M:%S") print("Start Time =", start_time) losses = [] for i in range(num_epoch): sampleDict = xz.sample(training_sample_size) x_train = sampleDict["x"][:, 0:num_features].to(device) z_train = sampleDict["z"][:, 0:num_features].to(device) optimizer.zero_grad() loss = -flow.log_prob(inputs=x_train, context=z_train).mean()
#model_path = "models/" model_path = "models/Cond/3features/" feature_subset = [1, 2, 3, 5, 6, 7, 9, 10, 11] #All 16 features print(" reading electron NF model ") model_name = "TM-Final-UMNN_elec_3_6_80_128_-9.54.pt" model_name = "TM-Final-UMNN_elec_3_6_80_128_-8.26.pt" params = model_name.split("_") num_features = int(params[2]) num_layers = int(params[3]) num_hidden_features = int(params[4]) training_sample_size = int(params[5]) print(num_features, num_layers, num_hidden_features, training_sample_size) flow_e, optimizer_e = make_model(num_layers, num_features, num_hidden_features, device) print("number of params: ", sum(p.numel() for p in flow_e.parameters())) flow_e.load_state_dict(torch.load(model_path + model_name)) flow_e.eval() print(" reading proton NF model ") model_name = "TM-Final-UMNN_prot_3_6_80_128_-9.97.pt" model_name = "TM-Final-UMNN_prot_3_6_80_128_-9.23.pt" params = model_name.split("_") num_features = int(params[2]) num_layers = int(params[3]) num_hidden_features = int(params[4]) training_sample_size = int(params[5]) print(num_features, num_layers, num_hidden_features, training_sample_size)
#reonstruct an nflow model #model_path = "models/" model_path = "models/Cond/3features/" feature_subset = [9, 10, 11, 13, 14, 15] #All 16 features print(" reading photon NF model ") model_name = "TM-Final-UMNN_phot_3_10_80_128_-5.38.pt" params = model_name.split("_") num_features = int(params[2]) num_layers = int(params[3]) num_hidden_features = int(params[4]) training_sample_size = int(params[5]) print(num_features, num_layers, num_hidden_features, training_sample_size) flow_g, optimizer_g = make_model(num_layers, num_features, num_hidden_features, device) print("number of params: ", sum(p.numel() for p in flow_g.parameters())) flow_g.load_state_dict(torch.load(model_path + model_name)) flow_g.eval() print(" reading photon2 NF model ") model_name = "TM-Final-UMNN_phot2_3_10_80_128_-6.04.pt" params = model_name.split("_") num_features = int(params[2]) num_layers = int(params[3]) num_hidden_features = int(params[4]) training_sample_size = int(params[5]) print(num_features, num_layers, num_hidden_features, training_sample_size)