Beispiel #1
0
import tensorflow as tf
import numpy as np
import py_datatools.datatools as dt
import py_ML.custom_models as cmod
import py_ML.custom_metrics as cmet
from py_datatools.plotting_tools import *

train_tracks, infosets = dt.load_whole_named_dataset(
    '6_tracklets_large_calib_deconvoluted_train')
train_tracklets = train_tracks[:100000].reshape((-1, 17, 24, 1))
train_labels = np.repeat(infosets[:100000][:, 0], 6)

# test_tracks, infosets = dt.load_whole_named_dataset('6_tracklets_large_calib_test')
# test_tracklets = test_tracks.reshape((-1, 17, 24, 1))
# test_labels = np.repeat(infosets[:, 0], 6)

pid_model = cmod.SimpleSingleTrackletConvPID(2,
                                             kernel_size=(17, 24),
                                             use_bias=True)

pid_model.compile(
    optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
    loss='binary_crossentropy',
    metrics=['accuracy'],  #, cmet.PionEfficiencyAtElectronEfficiency(0.9)],
)

pid_model.fit(train_tracklets,
              train_labels,
              batch_size=32,
              epochs=100,
              validation_split=0.2)
Beispiel #2
0
import py_datatools.datatools as dt
import numpy as np

tracks, info_set = dt.load_whole_named_dataset('6_tracklets_large_calib')

e_tracks = tracks[info_set[:, 0] == 1]
e_info_set = info_set[info_set[:, 0] == 1]

p_tracks = tracks[info_set[:, 0] == 0]
p_info_set = info_set[info_set[:, 0] == 0]

num_electrons = len(e_tracks)
num_train_electrons = int(num_electrons)
num_test_electrons = num_electrons - num_train_electrons
num_train_pions = num_train_electrons
num_test_pions = num_train_pions

e_train_tracks = e_tracks[:num_train_electrons]
e_train_info_set = e_info_set[:num_train_electrons]

p_train_tracks = p_tracks[:num_train_pions]
p_train_info_set = p_info_set[:num_train_pions]

train_tracks = np.concatenate([e_train_tracks, p_train_tracks])
train_info_set = np.concatenate([e_train_info_set, p_train_info_set])

randomize = np.arange(len(train_tracks))
np.random.shuffle(randomize)

train_tracks = train_tracks[randomize]
train_info_set = train_info_set[randomize]
Beispiel #3
0
import tensorflow as tf
import numpy as np
import py_datatools.datatools as dt
import py_ML.custom_models as cmod
import py_ML.custom_metrics as cmet

train_tracks, train_infosets = dt.load_whole_named_dataset(
    '6_tracklets_large_train')
train_tracks = np.expand_dims(train_tracks, axis=-1)
train_labels = train_infosets[:, 0]

test_tracks, test_infosets = dt.load_whole_named_dataset(
    '6_tracklets_large_test')
test_tracks = np.expand_dims(test_tracks, axis=-1)
test_labels = test_infosets[:, 0]

tracklet_pid_model = cmod.ComplexConvTrackletPID()
track_pid_model = cmod.TrackletModelMultiplexer(tracklet_pid_model)

track_pid_model.compile(
    optimizer=tf.train.AdamOptimizer(learning_rate=0.00001),
    loss='binary_crossentropy',
    metrics=['accuracy',
             cmet.PionEfficiencyAtElectronEfficiency(0.9)],
)

history = track_pid_model.fit(
    train_tracks,
    train_labels,
    batch_size=512,
    epochs=10000,
Beispiel #4
0
import tensorflow as tf
import numpy as np
import py_datatools.datatools as dt
import py_ML.custom_models as cmod
import py_ML.custom_metrics as cmet
from py_datatools.plotting_tools import *

train_tracks, train_infosets = dt.load_whole_named_dataset(
    '6_tracklets_large_calib_train')
train_tracks = np.expand_dims(train_tracks, axis=-1)
train_labels = train_infosets[:, 0]

tracklet_pid_model = cmod.ComplexConvTrackletPID()
track_pid_model = cmod.TrackletModelMultiplexer(tracklet_pid_model)

track_pid_model.compile(
    optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
    loss='binary_crossentropy',
    metrics=['accuracy',
             cmet.PionEfficiencyAtElectronEfficiency(0.9)],
)

history = track_pid_model.fit(
    train_tracks,
    train_labels,
    batch_size=512,
    epochs=100,
    validation_split=0.2,
)
# validation_data=(test_tracks, test_labels),)
Beispiel #5
0
import tensorflow as tf
import numpy as np
import py_datatools.datatools as dt
import py_ML.custom_models as cmod
import py_ML.custom_metrics as cmet

train_tracks, infosets = dt.load_whole_named_dataset('medium_train')
train_tracklets = train_tracks.reshape((-1, 17, 24, 1))
train_labels = np.repeat(infosets[:, 0], 6)

test_tracks, infosets = dt.load_whole_named_dataset('medium_test')
test_tracklets = test_tracks.reshape((-1, 17, 24, 1))
test_labels = np.repeat(infosets[:, 0], 6)

pid_model = cmod.SimpleSingleTrackletConvPID(10, kernel_size=(3, 2), use_bias=True)

pid_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
				  loss='binary_crossentropy',
				  metrics=['accuracy', cmet.PionEfficiencyAtElectronEfficiency(0.9)],
				  shuffle=True
				  )

pid_model.fit(train_tracklets,
			  train_labels,
			  batch_size=32,
			  epochs=100,
			  validation_data=(test_tracklets, test_labels), )
Beispiel #6
0
import numpy as np
import os
import glob

calib_params = {}
dataset_name = 'all_tracks_6_tracklets_valid_run_numbers'
new_dataset_name = dataset_name + '_calib'

for fil in glob.glob(
        os.path.dirname(__file__) + '/calib_files/combined_local_gains*.txt'):
    print('Loading calib file:', fil)
    run_no = fil.split('_')[-1].split('.')[0]
    calib_params[run_no] = np.genfromtxt(fil, delimiter=', ')[:, 2:].reshape(
        (540, 16, 144))

tracks, infosets = dt.load_whole_named_dataset(dataset_name)

for i in range(len(tracks)):
    run_no = infosets[i, 13]
    run_gains = calib_params[str(int(run_no))]

    dets = infosets[i, 14:20].astype(int)
    rows = infosets[i, 21:27].astype(int)
    cols = infosets[i, 28:34].astype(int)

    track_gains = np.expand_dims(run_gains[dets, rows,
                                           [cols - 8 + i
                                            for i in range(17)]].T,
                                 axis=-1)

    tracks[i] *= track_gains