def create_weights_biases(n_input, layers, sigma=1.0, n_output=1): """ Create arrays of weights and vectors of biases for the multilayer perceptron if a given configuration (with a single output neuron). n_input : number of input neurons layers : list of numbers of neurons in the hidden layers output : weights, biases """ n_hidden = [n_input] + layers weights = [] biases = [] for i in range(len(n_hidden) - 1): weights += [ tf.Variable( sigma * np.random.normal(size=[n_hidden[i], n_hidden[i + 1]]), dtype=atfi.fptype(), ) ] biases += [ tf.Variable(sigma * np.random.normal(size=[n_hidden[i + 1]]), dtype=atfi.fptype()) ] weights += [ tf.Variable(sigma * np.random.normal(size=[n_hidden[-1], n_output]), dtype=atfi.fptype()) ] biases += [ tf.Variable(sigma * np.random.normal(size=[n_output]), dtype=atfi.fptype()) ] return (weights, biases)
def init_fixed_weights_biases(init): """ Initialise constant weights and biases from numpy array """ init_weights = init[0] init_biases = init[1] weights = [] biases = [] for i in range(len(init_weights) - 1): weights += [tf.constant(init_weights[i], dtype=atfi.fptype())] biases += [tf.constant(init_biases[i], dtype=atfi.fptype())] weights += [tf.constant(init_weights[-1], dtype=atfi.fptype())] biases += [tf.constant(init_biases[-1], dtype=atfi.fptype())] return (weights, biases)
def square_dalitz_plot_jacobian(self, sample): """ sample: [mAB^2, mBC^2] Return the jacobian determinant (|J|) of tranformation from dmAB^2*dmBC^2 -> |J|*dMpr*dThpr where Mpr, Thpr are defined in (AC) frame. """ mPrime = self.m_prime_ac(sample) thPrime = self.theta_prime_ac(sample) diff_AC = tf.cast( atfi.sqrt(self.maxac) - atfi.sqrt(self.minac), atfi.fptype()) mAC = atfi.const(0.5) * diff_AC * ( Const(1.0) + atfi.cos(atfi.pi() * mPrime)) + tf.cast( atfi.sqrt(self.minac), atfi.fptype()) mACSq = mAC * mAC eAcmsAC = (atfi.const(0.5) * (mACSq - tf.cast(self.mc2, atfi.fptype()) + tf.cast(self.ma2, atfi.fptype())) / mAC) eBcmsAC = (atfi.const(0.5) * (tf.cast(self.md, atfi.fptype())**2.0 - mACSq - tf.cast(self.mb2, atfi.fptype())) / mAC) pAcmsAC = atfi.sqrt(eAcmsAC**2.0 - tf.cast(self.ma2, atfi.fptype())) pBcmsAC = atfi.sqrt(eBcmsAC**2.0 - tf.cast(self.mb2, atfi.fptype())) deriv1 = Pi() * atfi.const(0.5) * diff_AC * atfi.sin( atfi.pi() * mPrime) deriv2 = Pi() * atfi.sin(atfi.pi() * thPrime) return atfi.const(4.0) * pAcmsAC * pBcmsAC * mAC * deriv1 * deriv2
def unfiltered_sample(self, size, maximum = None): """ Return TF graph for uniform sample of point within phase space. size : number of _initial_ points to generate. Not all of them will fall into phase space, so the number of points in the output will be <size. majorant : if majorant>0, add 3rd dimension to the generated tensor which is uniform number from 0 to majorant. Useful for accept-reject toy MC. """ v = [tf.random.uniform([size], self.minab, self.maxab, dtype = atfi.fptype()), tf.random.uniform([size], self.minbc, self.maxbc, dtype = atfi.fptype())] if maximum is not None : v += [tf.random.uniform([size], 0., maximum, dtype = atfi.fptype())] return tf.stack(v, axis = 1)
def main() : nev = int(sys.argv[1]) outfile = sys.argv[2] chunk_size = 1000000 # Events will be generated in chunks of this size atfi.set_seed(nev) n = 0 # Current tuple size arrays = [] while(True) : # Create Dalitz plot sample unfiltered_sample = observables_phase_space.unfiltered_sample(chunk_size) # Unfiltered array sample = observables_phase_space.filter(unfiltered_sample) size = sample.shape[0] print(f"Filtered chunk size = {size}") # Generate final state momenta from Dalitz plot and run through selection rnd = tf.random.uniform([size, random_array_size], dtype = atfi.fptype() ) # Auxiliary random array array = atfi.stack(generate_selection( true_cuts, rnd, constant_cuts = True ), axis = 1) arrays += [ array ] # Increment counters and check if we are done size = array.shape[0] n += size if n > nev : break print(f"Selected size = {n}, last = {size}") tfr.write_tuple(outfile, atfi.concat(arrays, axis = 0)[:nev,:], observables_toys)
def main(): nev = 1000000 outfile = "toy_tuple.root" atfi.set_seed(nev + 1) chunk_size = 1000000 # Events will be generated in chunks of this size bounds = {i[0]: (i[2], i[3]) for i in parameters_list } # Bounds and exponential factor for generation of cuts branches = generated_variables + [i[0] for i in parameters_list] n = 0 # Current tuple size arrays = [] while (True): # Generate final state momenta from Dalitz plot and run through selection rnd = tf.random.uniform([chunk_size, random_array_size + len(bounds)], dtype=atfi.fptype()) # Auxiliary random array array = atfi.stack(generate_candidates_and_cuts(rnd), axis=1) arrays += [array] # Increment counters and check if we are done size = array.shape[0] n += size if n > nev: break print(f"Selected size = {n}, last = {size}") tfr.write_tuple(outfile, atfi.concat(arrays, axis=0)[:nev, :], branches)
def metric_tensor(): """ Constant metric tensor for Lorentz space :returns: Metric tensor """ return tf.constant([-1.0, -1.0, -1.0, 1.0], dtype=atfi.fptype())
def run_toymc(pdf, phsp, size, maximum, chunk=200000, seed=None): """ Create toy MC sample. To save memory, the sample is generated in "chunks" of a fixed size pdf : Function returning PDF graph for a given sample as an agrument phsp : phase space size : size of the target data sample (if >0) or number of chunks (if <0) maximum : maximum PDF value for accept-reject method chunk : chunk size seed : initial random seed. Not initalised if None """ length, nchunk, curr_maximum = 0, 0, maximum dim = phsp.dimensionality() data = tf.Variable(np.empty((0, dim)), shape=(None, dim), dtype=atfi.fptype()) if seed is not None: atfi.set_seed(seed) def condition(length, size, nchunk): return length < size or nchunk < -size @atfi.function def pdf_vals(chunk, curr_maximum): d = accept_reject_sample( pdf, phsp.filter(phsp.unfiltered_sample(chunk, curr_maximum))) return d, pdf(d) print(type(length), type(size), type(nchunk)) while condition(length, size, nchunk): d, v = pdf_vals(chunk, curr_maximum) over_maximum = v[v > curr_maximum] if len(over_maximum) > 0: new_maximum = tf.reduce_max(over_maximum) * 1.5 print( f' Updating maximum: {curr_maximum} -> {new_maximum}. Starting over.' ) length, nchunk, curr_maximum = 0, 0, new_maximum data = tf.Variable(np.empty((0, dim)), shape=(None, dim), dtype=atfi.fptype()) continue data = tf.concat([data, d], axis=0) length += len(d) nchunk += 1 print(f' Chunk {nchunk}, size={len(d)}, total length={length}') return data[:size] if size > 0 else data
def model(x, pars): # Constant vectors of fit parameters (the same for every data point) vec = tf.reshape( tf.concat([tf.constant(ndim * [0.], dtype=atfi.fptype()), pars], axis=0), [1, ndim + len(pars)]) # Input tensor for MLP, 5 first variables are data, the rest are constant optimisable parameters x2 = tf.pad(x, [[0, 0], [0, len(pars)]], 'CONSTANT') + vec return scale * tfn.multilayer_perceptron(x2, ranges, weights, biases)
def relativistic_breit_wigner(m2, mres, wres): """ Relativistic Breit-Wigner """ if wres.dtype is atfi.ctype(): return tf.math.reciprocal( atfi.cast_complex(mres * mres - m2) - atfi.complex(atfi.const(0.), mres) * wres) if wres.dtype is atfi.fptype(): return tf.math.reciprocal(atfi.complex(mres * mres - m2, -mres * wres)) return None
def __init__(self, name, init_value, lower_limit, upper_limit, step_size = 1e-6) : global __all_variables__ ResourceVariable.__init__(self, init_value, dtype = atfi.fptype(), trainable = True) self.init_value = init_value self.par_name = name self.step_size = step_size self.lower_limit = lower_limit self.upper_limit = upper_limit self.prev_value = None self.fixed = False self.error = 0. self.positive_error = 0. self.negative_error = 0. self.fitted_value = init_value
def main(): nev = 2000000 outfile = "toy_tuple.root" atfi.set_seed(nev + 1) chunk_size = 1000000 # Events will be generated in chunks of this size bounds = {i[0]: (i[2], i[3]) for i in parameters_list } # Bounds and exponential factor for generation of cuts branches = observables_toys + [i[0] for i in parameters_list] n = 0 # Current tuple size arrays = [] while (True): # Create Dalitz plot sample unfiltered_sample = observables_phase_space.unfiltered_sample( chunk_size) # Unfiltered array sample = observables_phase_space.filter(unfiltered_sample) size = sample.shape[0] print(f"Filtered chunk size = {size}") # Generate final state momenta from Dalitz plot and run through selection rnd = tf.random.uniform([size, random_array_size], dtype=atfi.fptype()) # Auxiliary random array array = atfi.stack(selection_with_random_cuts(sample, rnd), axis=1) arrays += [array] # Increment counters and check if we are done size = array.shape[0] n += size if n > nev: break print(f"Selected size = {n}, last = {size}") tfr.write_tuple(outfile, atfi.concat(arrays, axis=0)[:nev, :], branches)
def __init__(self, name, init_value, lower_limit, upper_limit, step_size=1e-6): self.var = ResourceVariable(init_value, shape=(), name=name, dtype=atfi.fptype(), trainable=True) self.init_value = init_value self.name = name self.step_size = step_size self.lower_limit = lower_limit self.upper_limit = upper_limit self.prev_value = None self.fixed = False self.error = 0.0 self.positive_error = 0.0 self.negative_error = 0.0 self.fitted_value = init_value
def estimate_density_old( phsp, data, ranges, labels, weight=None, transform=None, transform_ranges=None, learning_rate=0.001, training_epochs=100000, norm_size=1000000, print_step=50, display_step=500, weight_penalty=1.0, n_hidden=[32, 8], initfile="init.npy", outfile="train", seed=1, fig=None, axes=None, ): tf.compat.v1.disable_eager_execution() n_input = len(ranges) bins = n_input * [50] #tf.compat.v1.set_random_seed(seed) #np.random.seed(seed + 12345) try: init_w = np.load(initfile, allow_pickle=True) except: init_w = None if isinstance(init_w, np.ndarray): print("Loading saved weights") (weights, biases) = init_weights_biases(init_w) else: print("Creating random weights") (weights, biases) = create_weights_biases(n_input, n_hidden) data_ph = tf.compat.v1.placeholder(atfi.fptype(), shape=(None, None), name="data") norm_ph = tf.compat.v1.placeholder(atfi.fptype(), shape=(None, None), name="norm") if not transform_ranges: transform_ranges = ranges def model(x): if transform: x2 = transform(x) else: x2 = x # to make sure PDF is always strictly positive return multilayer_perceptron(x2, transform_ranges, weights, biases) + 1e-20 data_model = model(data_ph) norm_model = model(norm_ph) def unbinned_nll(pdf, integral): return -tf.reduce_sum(atfi.log(pdf / integral)) def integral(pdf): return tf.reduce_mean(pdf) # Define loss and optimizer nll = (unbinned_nll(data_model, integral(norm_model)) + l2_regularisation(weights) * weight_penalty) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(nll) # Initializing the variables init = tf.compat.v1.global_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(init) data_sample = sess.run(phsp.filter(data_ph), feed_dict={data_ph: data}) norm_sample = sess.run(phsp.uniform_sample(norm_size)) print("Normalisation sample size = ", len(norm_sample)) print(norm_sample[:, 0]) print(norm_sample[:, 1]) print(norm_sample[:, 2]) print(norm_sample[:, 3]) print("Data sample size = ", len(data_sample)) print(data_sample) # Training cycle best_cost = 1e10 display = atfp.MultidimDisplay(data_sample, norm_sample, bins, ranges, labels, fig, axes) plt.ion() plt.show() plt.pause(0.1) for epoch in range(training_epochs): _, c = sess.run([train_op, nll], feed_dict={ data_ph: data_sample, norm_ph: norm_sample }) if epoch % display_step == 0 and fig: w = sess.run(norm_model, feed_dict={norm_ph: norm_sample}) display.draw(w) plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0) plt.draw() plt.pause(0.1) plt.savefig(outfile + ".pdf") if epoch % print_step == 0: s = "Epoch %d, cost %.9f" % (epoch + 1, c) print(s) if c < best_cost: best_cost = c w = sess.run(norm_model, feed_dict={norm_ph: norm_sample}) scale = 1.0 / np.mean(w) np.save(outfile, [scale, transform_ranges] + sess.run([weights, biases])) f = open(outfile + ".txt", "w") f.write(s + "\n") f.close() print("Optimization Finished!")
def metric_tensor(): """Metric tensor for Lorentz space (constant)""" return tf.constant([-1., -1., -1., 1.], dtype=atfi.fptype())
import sys import tensorflow as tf sys.path.append("../") import amplitf.interface as atfi import amplitf.kinematics as atfk atfi.set_seed(2) rndvec = tf.random.uniform([32, 3], dtype=atfi.fptype()) v = rndvec[:, 0] th = atfi.acos(rndvec[:, 1]) phi = (rndvec[:, 2] * 2 - 1) * atfi.pi() p = atfk.lorentz_vector( atfk.vector(atfi.zeros(v), atfi.zeros(v), atfi.zeros(v)), atfi.ones(v)) bp = atfk.lorentz_boost( p, atfk.rotate_euler(atfk.vector(v, atfi.zeros(v), atfi.zeros(v)), th, phi, atfi.zeros(v))) print(bp) print(atfk.mass(bp))