Esempio n. 1
0
def print_msg(msg, log=True):
    """prints the msg string with elapsed time and current memory usage.

    Args:
        msg (str): the string to print
        log (bool): write the msg to the log as well

    """
    if os.name == 'posix':
        mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6
        mem_used = sp.around(mem_used, 2)
        memstr = '(' + str(mem_used) + ' GB): '
        timestr = tp.humantime(sp.around(time.time() - t0, 2))
        print(colorama.Fore.CYAN + timestr + '\t' + memstr + '\t' +
              colorama.Fore.GREEN + msg)
        if log:
            with open('log.log', 'a+') as fH:
                log_str = timestr + '\t' + memstr + '\t' + msg
                fH.writelines(log_str)
    else:
        timestr = tp.humantime(sp.around(time.time() - t0, 2))
        print(colorama.Fore.CYAN + timestr + '\t' + colorama.Fore.GREEN + msg)
        if log:
            with open('log.log', 'a+') as fH:
                log_str = timestr + '\t' + '\t' + msg
                fH.writelines(log_str)
    pass
Esempio n. 2
0
        def minimize(self, f_df, x0, display=sys.stdout, maxiter=1e3):

            self.display = display
            self.theta = x0

            # setup
            xk = self.algorithm.send(destruct(x0).copy())
            store = defaultdict(list)
            runtimes = []
            if len(self.operators) == 0:
                self.operators = [proxops.identity()]

            # setup
            obj, grad = wrap(f_df, x0)
            transform = compose(destruct, *reversed(self.operators), self.restruct)

            self.optional_print(tp.header(['Iteration', 'Objective', '||Grad||', 'Runtime']))
            try:
                for k in count():

                    # setup
                    tstart = perf_counter()
                    f = obj(xk)
                    df = grad(xk)
                    xk = transform(self.algorithm.send(df))
                    runtimes.append(perf_counter() - tstart)
                    store['f'].append(f)

                    # Update display
                    self.optional_print(tp.row([k,
                                                f,
                                                np.linalg.norm(destruct(df)),
                                                tp.humantime(runtimes[-1])]))

                    if k >= maxiter:
                        break

            except KeyboardInterrupt:
                pass

            self.optional_print(tp.bottom(4))

            # cleanup
            self.optional_print(u'\u279b Final objective: {}'.format(store['f'][-1]))
            self.optional_print(u'\u279b Total runtime: {}'.format(tp.humantime(sum(runtimes))))
            self.optional_print(u'\u279b Per iteration runtime: {} +/- {}'.format(
                tp.humantime(np.mean(runtimes)),
                tp.humantime(np.std(runtimes)),
            ))

            # result
            return OptimizeResult({
                'x': self.restruct(xk),
                'f': f,
                'df': self.restruct(df),
                'k': k,
                'obj': np.array(store['f']),
            })
Esempio n. 3
0
    def cleanup(self, d, runtimes, exit_message):

        print(self.hr)
        print(u'\u279b Final objective: {}'.format(d.obj))
        print(u'\u279b Total runtime: {}'.format(tp.humantime(sum(runtimes))))
        print(u'\u279b Per iteration runtime: {} +/- {}'.format(
            tp.humantime(np.mean(runtimes)),
            tp.humantime(np.std(runtimes)),
        ))

        if exit_message:
            print(u'\u279b ' + exit_message)

        print(u'\u279b All done!\n')
Esempio n. 4
0
        def optimize_param(f_df_wrapper, param_key, check_grad, cur_iter):

            # initialize the SFO instance
            loglikelihood_optimizer = SFO(
                f_df_wrapper,
                theta_current[param_key],
                train_data,
                display=0)

            # check gradient
            if check_grad == param_key:
                loglikelihood_optimizer.check_grad()

            # initialize the optimizer object
            opt = Optimizer('sfo', optimizer=loglikelihood_optimizer, num_steps=num_likelihood_steps)

            # add regularization terms
            [opt.add_regularizer(reg) for reg in self.regularizers[param_key]]

            # run the optimization procedure
            t0 = perf_counter()
            opt.minimize(
                theta_current[param_key],
                max_iter=max_iter,
                disp=disp,
                callback=callback)
            t1 = perf_counter() - t0
            print('Finished optimizing ' + param_key + '. Elapsed time: ' + tp.humantime(t1))

            return opt.theta
Esempio n. 5
0
    def __init__(self, **kwargs):

        opts = merge(defaults, kwargs)

        self.every = opts['every']
        self.width = opts['width']
        self.spec = opts['spec']
        self.column_names = []
        self.columns = []

        if opts['iter']:
            self.column_names.append('Iteration')
            self.columns.append(lambda d: d.iteration)

        if opts['obj']:
            self.column_names.append('Objective')
            self.columns.append(lambda d: d.obj)

        if opts['gradnorm']:
            self.column_names.append('||Gradient||')
            self.columns.append(lambda d: norm(destruct(d.grad)))

        if opts['runtime']:
            self.column_names.append('Runtime')
            self.columns.append(lambda d: tp.humantime(d.runtime))

        self.ncols = len(self.column_names)
Esempio n. 6
0
def train(model, experiment, monitor, num_epochs, augment=False):
    """Train the given network against the given data

    Parameters
    ----------
    model : keras.models.Model or glms.GLM
        A GLM or Keras Model object

    experiment : experiments.Experiment
        An Experiment object

    monitor : io.Monitor
        Saves the model parameters and plots of performance progress

    num_epochs : int
        Number of epochs to train for

    reduce_lr_every : int
        How often to reduce the learning rate

    reduce_rate : float
        A fraction (constant) to multiply the learning rate by

    """
    assert isinstance(model, (Model, GLM)), "'model' must be a GLM or Keras model"

    # initialize training iteration
    iteration = 0
    train_start = time()

    # loop over epochs
    try:
        for epoch in range(num_epochs):
            tp.banner('Epoch #{} of {}'.format(epoch + 1, num_epochs))
            print(tp.header(["Iteration", "Loss", "Runtime"]), flush=True)

            # loop over data batches for this epoch
            for X, y in experiment.train(shuffle=True):

                # update on save_every, assuming it is positive
                if (monitor is not None) and (iteration % monitor.save_every == 0):

                    # performs validation, updates performance plots, saves results to dropbox
                    monitor.save(epoch, iteration, X, y, model.predict)

                # train on the batch
                tstart = time()
                loss = model.train_on_batch({'stim':X, 'loss':y})[0]
                elapsed_time = time() - tstart

                # update
                iteration += 1
                print(tp.row([iteration, float(loss), tp.humantime(elapsed_time)]), flush=True)

            print(tp.bottom(3))

    except KeyboardInterrupt:
        print('\nCleaning up')

    # allows the monitor to perform any post-training visualization
    if monitor is not None:
        elapsed_time = time() - train_start
        monitor.cleanup(iteration, elapsed_time)

    tp.banner('Training complete!')
Esempio n. 7
0
def test_humantime():

    # test numeric input
    assert humantime(1e6) == u'1 weeks, 4 days, 13 hours, 46 min., 40 s'
    assert humantime(2e5) == u'2 days, 7 hours, 33 min., 20 s'
    assert humantime(5e3) == u'1 hours, 23 min., 20 s'
    assert humantime(60) == u'1 min., 0 s'
    assert humantime(1) == u'1 s'
    assert humantime(0) == u'0 s'
    assert humantime(0.1) == u'100 ms'
    assert humantime(0.005) == u'5 ms'
    assert humantime(1e-5) == u'10 μs'
    assert humantime(5.25e-4) == u'525 μs'
    assert humantime(5e-7) == u'500 ns'
    assert humantime(1e-12) == u'0.001 ns'

    # test non-numeric input
    for val in ('abc', [], {'x': 5}):

        with pytest.raises(ValueError) as context:
            humantime(val)

        assert 'Input must be numeric' in str(context.value)
Esempio n. 8
0
    def cleanup(self, d, runtimes):

        print(self.hr)
        print('-> Final objective: {}'.format(d.obj))
        print('-> Total runtime: {}'.format(tp.humantime(sum(runtimes))))
        print('-> All done!\n')