Beispiel #1
0
norm2 = np.sum(train_tracklets[:, :, 12:], axis=(1, 2), keepdims=True)

# train_tracklets[:,:,:12] /= norm1
# train_tracklets[:,:,12:] /= norm2

# test_tracks, infosets = dt.load_whole_named_dataset('6_tracklets_large_calib_deconvoluted_test')
# test_tracklets = test_tracks.reshape((-1, 17, 24, 1))
# test_labels = np.repeat(infosets[:, 0], 6)

pid_model = cmod.VeryComplexConvTrackletPID()

pid_model.compile(
    optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
    loss='binary_crossentropy',
    metrics=['accuracy',
             cmet.PionEfficiencyAtElectronEfficiency(0.9)],
)

pid_model.fit(
    train_tracklets,
    train_labels,
    batch_size=512,
    epochs=100,
    validation_split=0.2,
)
# validation_data=(test_tracklets, test_labels),)

units = pid_model.get_weights()[0]
u1 = units[:, :, 0, 0]
u2 = units[:, :, 0, 1]
Beispiel #2
0
train_labels = train_infosets[:, 0]

test_tracks, test_infosets = dt.load_whole_named_dataset('6_tracklets_large_test')
test_tracks = np.expand_dims(test_tracks, axis=-1)
test_labels = test_infosets[:, 0]

for i in range(100):
	test_tracks /= np.sum(test_tracks, axis=(2,3), keepdims=True)
	train_tracks /= np.sum(train_tracks, axis=(2,3), keepdims=True)

	tracklet_pid_model = cmod.PartialTrackletConvPID()
	track_pid_model = cmod.TrackletModelMultiplexer(tracklet_pid_model)

	track_pid_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
					  loss='binary_crossentropy',
					  metrics=['accuracy', cmet.PionEfficiencyAtElectronEfficiency(0.9)],
					  )

	history = track_pid_model.fit(train_tracks,
				  train_labels,
				  batch_size=512,
				  epochs=1,
				  validation_data=(test_tracks, test_labels),)

	conv_unit = track_pid_model.get_weights()[0][:,:,0,0]

	# plt.imshow(conv_unit)
	# plt.show()

	new_train_tracks = dt.project_conv_unit_out_of_dataset(train_tracks, conv_unit, [1,1], 'valid').reshape((-1, 6, 17, 24))
	new_test_tracks = dt.project_conv_unit_out_of_dataset(test_tracks, conv_unit, [1,1], 'valid').reshape((-1, 6, 17, 24))