def nll(fix_coeffs_model):
            attempt = 1
            while True:
                fit_coeffs = bmf.coeffs.fit(args.fit_init, args.signal_model,
                                            fix_coeffs_model)
                optimizer = bmf.Optimizer(fit_coeffs,
                                          signal_events,
                                          opt_name=args.opt_name,
                                          learning_rate=args.learning_rate,
                                          opt_params=opt_params,
                                          grad_clip=args.grad_clip,
                                          grad_max_cutoff=args.grad_max_cutoff)

                while True:
                    optimizer.minimize()
                    if args.log:
                        log.coefficients('q_test_stat_{}'.format(iteration),
                                         optimizer, signal_coeffs)
                    if optimizer.converged():
                        # Multiply the normalized_nll up into the full nll
                        return fit_coeffs, optimizer.normalized_nll * args.signal_count
                    if optimizer.step >= args.max_step:
                        bmf.stderr(
                            'No convergence after {} steps. Restarting iteration'
                            .format(args.max_step))
                        attempt = attempt + 1
                        break
def fit(fit_coeffs_, signal_events_, learning_rate_):
    optimizer = bmf.Optimizer(fit_coeffs_,
                              signal_events_,
                              learning_rate=learning_rate_)

    for step in range(max_iterations):
        optimizer.minimize()
        if optimizer.converged():
            nll_ = optimizer.normalized_nll.numpy()  # * signal_count
            print('{} {} {} {}'.format(bmf.coeffs.names[c_idx], step,
                                       fit_coeffs_[c_idx].numpy(), nll_))
            return fit_coeffs_[c_idx].numpy(), nll_
Exemple #3
0
                                 args.iterations + 1,
                                 initial=iteration,
                                 total=args.iterations,
                                 unit='fit'):
        # Time each iteration for CSV writing
        script.timer_start('fit')

        signal_events = bmf.signal.generate(signal_coeffs,
                                            events_total=args.signal_count)

        fit_coeffs = bmf.coeffs.fit(args.fit_init, args.signal_model)

        optimizer = bmf.Optimizer(fit_coeffs,
                                  signal_events,
                                  opt_name=args.opt_name,
                                  learning_rate=args.learning_rate,
                                  opt_params=opt_params,
                                  grad_clip=args.grad_clip,
                                  grad_max_cutoff=args.grad_max_cutoff)

        fit_vars = []
        for coeff in fit_coeffs:
            if type(coeff) == type(fit_coeffs[0]): fit_vars.append(coeff)

        pars_init = []
        for coeff in fit_vars:
            pars_init.append(coeff.numpy())

        # feed_dict_ = {}
        # for coeff in fit_coeffs: feed_dict_[coeff[0:-2]] = 0
Exemple #4
0
#!/usr/bin/env python
"""
Benchmark time taken to run key functions.

Used to check for performance regressions.
"""
import tensorflow.compat.v2 as tf
import timeit

import b_meson_fit as bmf

tf.enable_v2_behavior()

times = [10, 100, 1000]
functions = {
    "nll": lambda: bmf.signal.nll(fit_coeffs, signal_events),
    "minimize": lambda: optimizer.minimize()
}

with bmf.Script() as script:
    signal_coeffs = bmf.coeffs.signal(bmf.coeffs.SM)
    signal_events = bmf.signal.generate(signal_coeffs)
    fit_coeffs = bmf.coeffs.fit()
    optimizer = bmf.Optimizer(fit_coeffs, signal_events)

    for n, f in functions.items():
        for t in times:
            time_taken = timeit.timeit(f, number=t)
            bmf.stdout("{}() x {}: ".format(n, t), time_taken)
    def tf_fit(self,
               Ncall=None,
               init='DEFAULT',
               fixed=None,
               coefini=None,
               verbose=False,
               opt_params=None):
        if init == None or init == 'DEFAULT':
            A = bmf.coeffs.fit(bmf.coeffs.fit_initialization_scheme_default,
                               current_signal_model=self.model,
                               fix=fixed)

        elif init == 'SAME SIGN':
            A = bmf.coeffs.fit(bmf.coeffs.fit_initialization_same,
                               current_signal_model=self.model,
                               fix=fixed)

        elif init == 'ANY SIGN':
            A = bmf.coeffs.fit(bmf.coeffs.fit_initialization_any,
                               current_signal_model=self.model,
                               fix=fixed)

        if coefini is not None:
            A = bmf.coeffs.fit(initialization=coefini, fix=fixed)
        events = tf.convert_to_tensor(self.events)

        if verbose:
            print('\n', "Coeffs used for MC:", self.coeffs)
            print("Initial coeffs for tensorflow fit:",
                  [A[j].numpy() for j in range(len(A))])

        self.coeff_init = [A[i].numpy() for i in range(len(A))]
        optimizer = bmf.Optimizer(A,
                                  events,
                                  opt_name='AMSGrad',
                                  learning_rate=0.20,
                                  opt_params=opt_params)
        converged = False
        j = 0
        t0 = time.time()
        while converged == False:
            optimizer.minimize()
            if Ncall is not None and j > Ncall:
                tfCoeff = [
                    optimizer.fit_coeffs[i].numpy()
                    for i in range(len(optimizer.fit_coeffs))
                ]
                self.coeff_fit = tfCoeff
                self.NLL = self.nll_iminuit(self.coeff_fit)
                return optimizer, tfCoeff

            j += 1
            if optimizer.converged() == True:
                converged = True
        t1 = time.time()

        tfCoeff = [
            optimizer.fit_coeffs[i].numpy()
            for i in range(len(optimizer.fit_coeffs))
        ]
        self.coeff_fit = tfCoeff
        self.NLL = self.nll_iminuit(tfCoeff)
        if verbose:
            print('\n', ' Fitted coefficients : ', self.coeff_fit)
            print('\n', "Time taken to fit :", t1 - t0)
        return optimizer, tfCoeff
# Set all default fit coefficients to the same value to make comparison possible
fit_default = 1.0

with bmf.Script() as script:
    signal_coeffs = bmf.coeffs.signal(bmf.coeffs.SM)
    signal_events = bmf.signal.generate(signal_coeffs)

    log = bmf.Log(script.name)

    # Draw a signal line on each coefficient plot so we can compare how well the optimizers do
    log.signal_line(bmf.coeffs.fit(), signal_coeffs, iterations)

    for combo in combos:
        test_name, name, learning_rate, params, clip = combo

        optimizer = bmf.Optimizer(
            bmf.coeffs.fit(
                fit_default),  # Generate new fit coefficients for each run
            signal_events,
            opt_name=name,
            learning_rate=learning_rate,
            opt_params=params,
            grad_clip=clip,
        )

        # Use tqdm's trange() to print a progress bar for each optimizer/learning rate combo
        with tqdm.trange(iterations, desc=test_name) as t:
            for i in t:
                optimizer.minimize()
                log.coefficients(test_name, optimizer, signal_coeffs)
Exemple #7
0
import tensorflow.compat.v2 as tf

import b_meson_fit as bmf

tf.enable_v2_behavior()

with bmf.Script() as script:
    if not bmf.user_is_root():
        bmf.stderr(
            'This script needs root permissions. You can run it from the project folder with:'
        )
        bmf.stderr(
            'sudo -E --preserve-env=PYTHONPATH ./bin/performance_profile.py')
        exit(1)

    log = bmf.Log(script.name)

    signal_coeffs = bmf.coeffs.signal(bmf.coeffs.SM)
    optimizer = bmf.Optimizer(
        bmf.coeffs.fit(),
        bmf.signal.generate(signal_coeffs),
    )

    for i in range(1000):
        tf.summary.trace_on(graph=True, profiler=True)
        optimizer.minimize()
        tf.summary.trace_export(name='trace_%d' % optimizer.step,
                                step=optimizer.step,
                                profiler_outdir=log.dir())
        tf.summary.flush()