Ejemplo n.º 1
0
num_layers = int(params[3])
num_hidden_features = int(params[4])
training_sample_size = int(params[5])

print(num_features, num_layers, num_hidden_features, training_sample_size)

flow_g, optimizer_g = make_model(num_layers, num_features, num_hidden_features,
                                 device)

print("number of params: ", sum(p.numel() for p in flow_g.parameters()))
flow_g.load_state_dict(torch.load(model_path + model_name))
flow_g.eval()

print("reading truth data")
train = dataXZ.dataXZ(feature_subset=feature_subset,
                      file="data/train.pkl",
                      mode="epg")
truth_entire = train.truth.detach().numpy()
reco_entire = train.reco.detach().numpy()
print("done with reading truth data")

print("reading validation data")
validation = dataXZ.dataXZ(feature_subset=feature_subset,
                           file="data/validation.pkl",
                           mode="epg")
truth_validation = validation.truth.detach().numpy()
reco_validation = validation.reco.detach().numpy()
print("done with reading validation data")

trials = 10000  #Number of overall loops
Ejemplo n.º 2
0
#These are parameters for the Normalized Flow model
num_layers = 10
num_hidden_features = 10

#These are training parameters
num_epoch = 3000
training_sample_size = 800

if feature_subset == "all":
    num_features = 16
else:
    num_features = len(feature_subset)

#read the data, with the defined data class
xz = dataXZ.dataXZ(feature_subset=feature_subset)

# print("trying to sample")
# sampleDict = xz.sample(100000)
# x = sampleDict["x"][:, 0:num_features]
# z = sampleDict["z"][:, 0:num_features]
# x= x.detach().numpy()
# z = z.detach().numpy()
# print("trying to plot")

# bin_size = [100,100]

# plt.hist(x[:,0],color = "red", density=True,bins=100)
# plt.savefig(outdir+"feature0"+"_noQT")
# plt.close()
Ejemplo n.º 3
0
#Photon 2:
# model_path = "models/Cond/photon2/"
# model_name = "TM-UMNN_4_10_10_800_799_-10.20.pt"
# feature_subset = [12,13,14,15] #Just photon2 features
# For QT:
# model_path = "models/Cond/QT/photon2/"
# model_name = "TM-UMNN_3_10_10_800_1599_-5.98.pt"
# feature_subset = [13,14,15]
# #For INV
# model_path = "models/Cond/QT/INV/photon2/"
# model_name = "TM-UMNN_3_10_10_800_1199_-5.53.pt"
# feature_subset = [13,14,15]

#Initialize dataXZ object for quantile inverse transform
xz = dataXZ.dataXZ(feature_subset=feature_subset, test=True)
QuantTran_x = xz.qt_x
QuantTran_z = xz.qt_z

dev = "cuda:0" if torch.cuda.is_available() else "cpu"
#dev = "cpu"
print(dev)
device = torch.device(dev)

#model_name = "TM_16_18_20_100_799_-15.19.pt" #For initial double precision studies
#model_name = "TM_4_6_4_100_3199_-0.88.pt" #4 features with QD, initial training

#model_name = "TM_16_16_32_400_4399_-14.42.pt" #16 feature with QD
#feature_subset = "all" #All 16 features

#model_name = "TM-Final_4_6_80_400_-1.97.pt" #4 feature (electron) train, done 5/10 at 4 PM
Ejemplo n.º 4
0
#These are parameters for the Normalized Flow model
num_layers = 6
num_hidden_features = 80

#These are training parameters
num_epoch = 10000
training_sample_size = 128

if feature_subset == "all":
    num_features = 16
else:
    num_features = len(feature_subset)

#read the data, with the defined data class
xb = dataXZ.dataXZ(feature_subset=feature_subset,
                   file="data/train.pkl",
                   mode="epg")
print("done with reading data")
print(xb.reco)
print(xb.truth)
#construct an nflow model
flow, optimizer = make_model(num_layers, num_features, num_hidden_features,
                             device)
print("number of params: ", sum(p.numel() for p in flow.parameters()))

start = datetime.now()
start_time = start.strftime("%H:%M:%S")
print("Start Time =", start_time)
losses = []

save = True
Ejemplo n.º 5
0
from nflows.distributions.normal import StandardNormal, ConditionalDiagonalNormal
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import DiagonalNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation

from utils import dataXZ

# Define device to be used
dev = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(dev)

#read the data, with the defined data class
xz = dataXZ.dataXZ()
sampleDict = xz.sample(100000)  #Get a subset of the datapoints
x = sampleDict["x"]
x = x.detach().numpy()

# #visualize the data
bin_size = [80, 80]
fig, ax = plt.subplots(figsize=(10, 7))
plt.rcParams["font.size"] = "16"
ax.set_xlabel("Electron Momentum")
ax.set_ylabel("Proton Momentum")
plt.title('Microphysics Simulated EP Distribution')

plt.hist2d(x[:, 0], x[:, 1], bins=bin_size,
           norm=mpl.colors.LogNorm())  # cmap = plt.cm.nipy_spectral)
plt.xlim([1, 6.5])
Ejemplo n.º 6
0
num_layers = int(params[3])
num_hidden_features = int(params[4])
training_sample_size = int(params[5])

print(num_features, num_layers, num_hidden_features, training_sample_size)

flow_g2, optimizer_g2 = make_model(num_layers, num_features,
                                   num_hidden_features, device)

print("number of params: ", sum(p.numel() for p in flow_g2.parameters()))
flow_g2.load_state_dict(torch.load(model_path + model_name))
flow_g2.eval()

print("reading truth data")
train = dataXZ.dataXZ(feature_subset=feature_subset,
                      file="data/epgg_train.pkl",
                      mode="epgg")
truth_entire = train.truth.detach().numpy()
reco_entire = train.reco.detach().numpy()
print("done with reading truth data")

print("reading test data")
test = dataXZ.dataX(feature_subset=feature_subset,
                    file="data/epgg_test.pkl",
                    mode="epgg")
reco_test = test.reco.detach().numpy()
print("done with reading test data")

trials = 10000  #Number of overall loops

means = np.mean(reco_entire - truth_entire, axis=1)