Beispiel #1
0
    def setUp(self):
        np.seterr(all='raise')

        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5

        Y = self.Y = rnd.randn(n1, n2, n3)
        A = self.A = rnd.randn(n1, n2, n3)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (6th order)
        Y1 = Y.reshape(n1, n2, n3, 1, 1, 1)
        Y2 = Y.reshape(1, 1, 1, n1, n2, n3)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n1 * n2 * n3).reshape(n1, n2, n3, n1, n2, n3)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, 1, n1, n2, n3)

        self.correct_hess = np.sum(H * Atensor, axis=(3, 4, 5))

        self.backend = AutogradBackend()
Beispiel #2
0
    def setUp(self):
        np.seterr(all='raise')
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.squeeze(np.array(Amat.dot(H)))

        self.backend = AutogradBackend()
Beispiel #3
0
    def setUp(self):
        np.seterr(all='raise')

        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        m = self.m = 10
        n = self.n = 15

        Y = self.Y = rnd.randn(m, n)
        A = self.A = rnd.randn(m, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (4th order)
        Y1 = Y.reshape(m, n, 1, 1)
        Y2 = Y.reshape(1, 1, m, n)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(m * n).reshape(m, n, m, n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, m, n)

        self.correct_hess = np.sum(H * Atensor, axis=(2, 3))

        self.backend = AutogradBackend()
Beispiel #4
0
    def setUp(self):
        np.seterr(all='raise')

        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5

        Y = self.Y = rnd.randn(n1, n2, n3)
        A = self.A = rnd.randn(n1, n2, n3)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y**2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y**2))

        # ... and hess
        # First form hessian tensor H (6th order)
        Y1 = Y.reshape(n1, n2, n3, 1, 1, 1)
        Y2 = Y.reshape(1, 1, 1, n1, n2, n3)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n1 * n2 * n3).reshape(n1, n2, n3, n1, n2, n3)

        H = np.exp(np.sum(Y**2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, 1, n1, n2, n3)

        self.correct_hess = np.sum(H * Atensor, axis=(3, 4, 5))

        self.backend = AutogradBackend()
Beispiel #5
0
    def setUp(self):
        np.seterr(all='raise')
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y**2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y**2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y**2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.squeeze(np.array(Amat.dot(H)))

        self.backend = AutogradBackend()
Beispiel #6
0
    def setUp(self):
        np.seterr(all='raise')

        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        m = self.m = 10
        n = self.n = 15

        Y = self.Y = rnd.randn(m, n)
        A = self.A = rnd.randn(m, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y**2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y**2))

        # ... and hess
        # First form hessian tensor H (4th order)
        Y1 = Y.reshape(m, n, 1, 1)
        Y2 = Y.reshape(1, 1, m, n)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(m * n).reshape(m, n, m, n)

        H = np.exp(np.sum(Y**2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, m, n)

        self.correct_hess = np.sum(H * Atensor, axis=(2, 3))

        self.backend = AutogradBackend()
Beispiel #7
0
def mps(model):
    """
    MPS: Maximum Product Spacing

    This is the method to get the largest (geometric) average distance
    between all points. This method works really well when all points are
    unique. Some complication comes in when using repeated data. This method
    is quite good for offset distributions.
    """

    old_err_state = np.seterr(all='ignore')

    dist = model.dist
    x, c, n = model.data['x'], model.data['c'], model.data['n']
    const = model.fitting_info['const']
    inv_trans = model.fitting_info['inv_trans']
    init = model.fitting_info['init']
    offset = model.offset

    jac = jacobian(mps_fun)
    hess = hessian(mps_fun)

    res = minimize(mps_fun, init,
                   method='Newton-CG',
                   jac=jac,
                   hess=hess,
                   tol=1e-15,
                   args=(dist, x, inv_trans, const, c, n, offset))

    if (res.success is False) or (np.isnan(res.x).any()):
        res = minimize(mps_fun, init, method='BFGS', jac=jac,
                       args=(dist, x, inv_trans, const, c, n, offset))

    if (res.success is False) or (np.isnan(res.x).any()):
        res = minimize(mps_fun, init,
                       args=(dist, x, inv_trans, const, c, n, offset))

    if (res.success is False) or (np.isnan(res.x).any()):
        print("MPS FAILED: Try alternate estimation method", file=sys.stderr)

    results = {}
    params = inv_trans(const(res.x))
    results['res'] = res

    if offset:
        results['gamma'] = params[0]
        results['params'] = params[1::]
    else:
        results['params'] = params

    results['jac'] = jac

    np.seterr(**old_err_state)

    return results
Beispiel #8
0
def mse(model):
    """
    MSE: Mean Square Error
    This is simply fitting the curve to the best estimate from a non-parametric
    estimate.

    This is slightly different in that it fits it to untransformed data on
    the x and y axis. The MPP method fits the curve to the transformed data.
    This is simply fitting a the CDF sigmoid to the nonparametric estimate.
    """
    dist = model.dist
    x, c, n, t = (model.data['x'], model.data['c'], model.data['n'],
                  model.data['t'])

    const = model.fitting_info['const']
    inv_trans = model.fitting_info['inv_trans']
    init = model.fitting_info['init']

    if (-1 in c) or (2 in c):
        out = nonp.turnbull(x, c, n, t, estimator='Fleming-Harrington')
    else:
        out = nonp.fleming_harrington(x, c, n, t)

    F = 1 - out['R']
    mask = np.isfinite(out['x'])
    F = F[mask]
    x = out['x'][mask]

    jac = jacobian(mse_fun)
    hess = hessian(mse_fun)

    old_err_state = np.seterr(all='ignore')

    res = minimize(mse_fun,
                   init,
                   method='Newton-CG',
                   jac=jac,
                   hess=hess,
                   args=(dist, x, F, inv_trans, const))

    if (res.success is False) or (np.isnan(res.x).any()):
        res = minimize(mse_fun,
                       init,
                       method='BFGS',
                       jac=jac,
                       args=(dist, x, F, inv_trans, const))

    if (res.success is False) or (np.isnan(res.x).any()):
        res = minimize(mse_fun, init, args=(dist, x, F, inv_trans, const))

    results = {}
    results['res'] = res
    results['params'] = inv_trans(const(res.x))
    np.seterr(**old_err_state)
    return results
Beispiel #9
0
 def __init__(self, in_config, in_utils):
     np.seterr(all='raise')
     self.g_curr_epochs = 0
     self.config = in_config
     self.neighborhood_vals = {}
     self.g_contiguous_high_acc_epochs = 0
     self.g_highest_acc_state = {"acc": 0.0, "epoch": 1}
     self.nn_architecture = None
     self.params_values = None
     self.utils = in_utils
     print("Initialised cauverians")
     return
Beispiel #10
0
def learn_maxpl(imgs):

    img_size = np.prod(imgs[0].shape)
    ######################################################################
    ######################################################################
    weights = np.zeros((img_size, img_size))
    bias = np.zeros(img_size)
    # Complete this function
    # You are allowed to modify anything between these lines
    # Helper functions are allowed

    # Avoid "overflow encountered in exp"
    old_settings = np.seterr(all='ignore')

    # Define log PseudoLikelihood function
    def neg_logPL(W, b):
        SUM = 0
        for img in imgs:
            #flatten image
            img_f = np.reshape(img, (1, img_size))
            for i in range(len(img_f[0])):
                x = img_f[0][i]
                X = np.copy(img_f)
                X[0][i] = 0
                if x == 1:
                    SUM = SUM + np.log(
                        1 / (1 + np.exp(-np.sum(W[i] * X[0]) + b[i])))
                else:
                    SUM = SUM + np.log(
                        1 - 1 / (1 + np.exp(-np.sum(W[i] * X[0]) + b[i])))
        return -SUM

    # Gradient descent on neg_logPL
    neg_logPL_dW = grad(neg_logPL, 0)
    neg_logPL_db = grad(neg_logPL, 1)
    W = np.zeros((img_size, img_size))
    b = np.zeros((img_size))
    n_iteration = 5
    alpha = 0.01
    for i in range(n_iteration):
        dW = neg_logPL_dW(W, b)
        db = neg_logPL_db(W, b)
        W = W - (dW + np.transpose(dW)) * alpha
        b = b - db * alpha
    weights, bias = W, b

    #######################################################################
    #######################################################################

    return weights, bias
Beispiel #11
0
    def setUp(self):
        np.seterr(all='raise')

        def f(x):
            return (np.exp(np.sum(x[0]**2)) + np.exp(np.sum(x[1]**2)) +
                    np.exp(np.sum(x[2]**2)))

        self.cost = f

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5
        n4 = self.n4 = 6
        n5 = self.n5 = 7
        n6 = self.n6 = 8

        self.y = y = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))
        self.a = a = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))

        self.correct_cost = f(y)

        # CALCULATE CORRECT GRAD
        g1 = 2 * y[0] * np.exp(np.sum(y[0]**2))
        g2 = 2 * y[1] * np.exp(np.sum(y[1]**2))
        g3 = 2 * y[2] * np.exp(np.sum(y[2]**2))

        self.correct_grad = (g1, g2, g3)

        # CALCULATE CORRECT HESS
        # 1. VECTOR
        Ymat = np.matrix(y[0])
        Amat = np.matrix(a[0])

        diag = np.eye(n1)

        H = np.exp(np.sum(y[0]**2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        h1 = np.array(Amat.dot(H)).flatten()

        # 2. MATRIX
        # First form hessian tensor H (4th order)
        Y1 = y[1].reshape(n2, n3, 1, 1)
        Y2 = y[1].reshape(1, 1, n2, n3)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(n2 * n3).reshape(n2, n3, n2, n3)

        H = np.exp(np.sum(y[1]**2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[1].reshape(1, 1, n2, n3)

        h2 = np.sum(H * Atensor, axis=(2, 3))

        # 3. Tensor3
        # First form hessian tensor H (6th order)
        Y1 = y[2].reshape(n4, n5, n6, 1, 1, 1)
        Y2 = y[2].reshape(1, 1, 1, n4, n5, n6)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n4 * n5 * n6).reshape(n4, n5, n6, n4, n5, n6)

        H = np.exp(np.sum(y[2]**2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[2].reshape(1, 1, 1, n4, n5, n6)

        h3 = np.sum(H * Atensor, axis=(3, 4, 5))

        self.correct_hess = (h1, h2, h3)
        self.backend = AutogradBackend()
Beispiel #12
0
 def __init__(self, in_config, in_utils):
     np.seterr(all='raise')
     self.g_curr_epochs = 0
     self.config = in_config
     self.utils = in_utils
Beispiel #13
0
import autograd.numpy as np
from autograd import grad
import autograd.scipy.stats.norm as norm
from mindbike.util.conslist import conslistgen
from collections import namedtuple
np.seterr(over='raise')
np.seterr(invalid='raise')

Distribution = namedtuple('Distribution', ['sample', 'log_density'])

# Write tests for autograd:
# Solve with broadcasting
# Indexing with ellipses
# Ellipses in einsum


def broadcasting_jacobian(fun):
    # Jacobian over final dimension only
    def jac_fun(x):
        out_size = fun(x).shape[-1]
        jac = [
            grad(lambda x_: np.sum(fun(x_)[..., i]))(x)[..., None, :]
            for i in range(out_size)
        ]
        return np.concatenate(jac, axis=(x.ndim - 1))

    return jac_fun


def swap_final_axes(A):
    return np.swapaxes(A, -1, -2)
Beispiel #14
0
helper_vars['bounds_dict'] = bdd

fn_obj = dfre_nd.models_nd.model_from_rate_function(contrast_parametric_ori,
                                                    sizedict, helper_vars)
# -

#tr = np.zeros((dtrialwise.shape[0],8))
imax = 50
tr = np.zeros((imax, ns, nc + 4))
for isize in range(ns):
    runtrial = np.logical_and(
        np.logical_and(helper_vars['runtrial'], stimulus_id[0] == isize),
        stimulus_id[3] == 0)
    for ind in range(imax):
        print(ind)
        np.seterr(all='print')
        data_obj = dfre_nd.data_obj(stimulus_id[:, runtrial],
                                    F[ind].T[:, runtrial], F[ind].T[:,
                                                                    runtrial],
                                    F[ind].flatten(), nbefore, nafter)
        fit_obj = dfre_nd.fit_obj(data_obj, pct_spike=97.5)
        np.seterr(all='warn')
        prestim = data_obj.F[:, :nbefore].mean()
        during = data_obj.F[:, nbefore:-nafter].mean()
        if prestim < during:
            guessA = 1
            guessb = eps
        else:
            guessA = eps
            guessb = 1
        tg = {}
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from math import sqrt, floor, ceil
# Import Autograd modules here
# import jax.numpy as jnp
# from jax import grad, jit, vmap
from autograd import grad
import autograd.numpy.random as npr
from autograd.misc.optimizers import adam
import autograd.numpy as np
from autograd import jacobian
np.seterr(divide='ignore', invalid='ignore')


class mlp_classifier():
    def __init__(self, sizes, activations):

        self.params = []
        assert (len(activations) == len(sizes) - 1)
        self.activations = [self.identity]
        for z in activations:
            if z == 'relu':
                self.activations.append(self.relu)
            elif z == 'sigmoid':
                self.activations.append(self.sigmoid)
            if z == 'identity':
                self.activations.append(self.identity)
        scale = 0.01
        rs = npr.RandomState(0)
        self.params = [
Beispiel #16
0
    """Loads a text file, and turns each line into an encoded sequence."""
    encodings = dict(list(map(reversed, enumerate(string.printable))))
    digitize = lambda char: encodings[char] if char in encodings else len(encodings)
    encode_line = lambda line: np.array(list(map(digitize, line)))
    nonblank_line = lambda line: len(line) > 2

    with open(filename) as f:
        lines = f.readlines()

    encoded_lines = list(map(encode_line, list(filter(nonblank_line, lines))[:max_lines]))
    num_outputs = len(encodings) + 1

    return encoded_lines, num_outputs


if __name__ == '__main__':
    np.random.seed(0)
    np.seterr(divide='ignore')

    # callback to print log likelihoods during training
    print_loglike = lambda loglike, params: print(loglike)

    # load training data
    lstm_filename = join(dirname(__file__), 'lstm.py')
    train_inputs, num_outputs = build_dataset(lstm_filename, max_lines=60)

    # train with EM
    num_states = 20
    init_params = initialize_hmm_parameters(num_states, num_outputs)
    pi, A, B = EM(init_params, train_inputs, print_loglike)
Beispiel #17
0
    def setUp(self):
        np.seterr(all='raise')

        def f(x):
            return (np.exp(np.sum(x[0]**2)) + np.exp(np.sum(x[1]**2)) +
                    np.exp(np.sum(x[2]**2)))

        self.cost = f

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5
        n4 = self.n4 = 6
        n5 = self.n5 = 7
        n6 = self.n6 = 8

        self.y = y = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))
        self.a = a = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))

        self.correct_cost = f(y)

        # CALCULATE CORRECT GRAD
        g1 = 2 * y[0] * np.exp(np.sum(y[0] ** 2))
        g2 = 2 * y[1] * np.exp(np.sum(y[1] ** 2))
        g3 = 2 * y[2] * np.exp(np.sum(y[2] ** 2))

        self.correct_grad = (g1, g2, g3)

        # CALCULATE CORRECT HESS
        # 1. VECTOR
        Ymat = np.matrix(y[0])
        Amat = np.matrix(a[0])

        diag = np.eye(n1)

        H = np.exp(np.sum(y[0] ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        h1 = np.array(Amat.dot(H)).flatten()

        # 2. MATRIX
        # First form hessian tensor H (4th order)
        Y1 = y[1].reshape(n2, n3, 1, 1)
        Y2 = y[1].reshape(1, 1, n2, n3)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(n2 * n3).reshape(n2, n3, n2, n3)

        H = np.exp(np.sum(y[1] ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[1].reshape(1, 1, n2, n3)

        h2 = np.sum(H * Atensor, axis=(2, 3))

        # 3. Tensor3
        # First form hessian tensor H (6th order)
        Y1 = y[2].reshape(n4, n5, n6, 1, 1, 1)
        Y2 = y[2].reshape(1, 1, 1, n4, n5, n6)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n4 * n5 * n6).reshape(n4, n5, n6, n4, n5, n6)

        H = np.exp(np.sum(y[2] ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[2].reshape(1, 1, 1, n4, n5, n6)

        h3 = np.sum(H * Atensor, axis=(3, 4, 5))

        self.correct_hess = (h1, h2, h3)
        self.backend = AutogradBackend()
Beispiel #18
0
import lauricella_fd
import llh_fast
import poisson_gamma_mixtures
import copy

########################################################################################
####### Relevant Poisson generalizations from the paper https://arxiv.org/abs/1712.01293
####### and the newer one https://arxiv.org/abs/1902.08831
####### All formulas return the log-likelihood or log-probability
####### Formulas are not optimized for speed, but for clarity (except c implementations to some extent).
####### They can definately be sped up by smart indexing etc., and everyone has to adjust them to their use case anyway.
####### Formulas are not necessarily vetted, please try them out yourself first.
####### Any questions: [email protected]
########################################################################################

numpy.seterr(divide="warn")


######################################
#### standard Poisson likelihood
######################################
def poisson(k, lambd):

    return (-lambd + k * numpy.log(lambd) - scipy.special.gammaln(k + 1)).sum()


################################################################
### Simple Poisson-Gamma mixture with equal weights (eq. 21 - https://arxiv.org/abs/1712.01293)
### multi bin expression, one k, one k_mc and one avg_weight item for each bin, all given by an array
def pg_equal_weights(k, k_mc, avgweights, prior_factor=0.0):
Beispiel #19
0
    def cb(self, t, on='R', alpha_ci=0.05, bound='two-sided'):
        r"""
        Confidence bounds of the ``on`` function at the ``alpa_ci`` level of
        significance. Can be the upper, lower, or two-sided confidence by
        changing value of ``bound``.

        Parameters
        ----------

        x : array like or scalar
            The values of the random variables at which the confidence bounds
            will be calculated
        on : ('sf', 'ff', 'Hf'), optional
            The function on which the confidence bound will be calculated.
        bound : ('two-sided', 'upper', 'lower'), str, optional
            Compute either the two-sided, upper or lower confidence bound(s).
            Defaults to two-sided.
        alpha_ci : scalar, optional
            The level of significance at which the bound will be computed.

        Returns
        -------

        cb : scalar or numpy array
            The value(s) of the upper, lower, or both confidence bound(s) of
            the selected function at x

        """
        if self.method != 'MLE':
            raise Exception('Only MLE has confidence bounds')

        hess_inv = np.copy(self.hess_inv)

        pvars = hess_inv[np.triu_indices(hess_inv.shape[0])]
        old_err_state = np.seterr(all='ignore')

        if hasattr(self.dist, 'R_cb'):
            def R_cb(x):
                return self.dist.R_cb(x - self.gamma,
                                      *self.params,
                                      hess_inv,
                                      alpha_ci=alpha_ci,
                                      bound=bound)

        else:
            def R_cb(x):
                def sf_func(params):
                    return self.dist.sf(x - self.gamma, *params)
                jac = np.atleast_2d(jacobian(sf_func)(np.array(self.params)))

                # Second-Order Taylor Series Expansion of Variance
                var_R = []
                for i, j in enumerate(jac):
                    j = np.atleast_2d(j).T * j
                    j = j[np.triu_indices(j.shape[0])]
                    var_R.append(np.sum(j * pvars))

                # First-Order Taylor Series Expansion of Variance
                # var_R = (jac**2 * np.diag(hess_inv)).sum(axis=1).T

                R_hat = self.sf(x)
                if bound == 'two-sided':
                    diff = (z(alpha_ci / 2)
                            * np.sqrt(np.array(var_R))
                            * np.array([1., -1.]).reshape(2, 1))
                elif bound == 'upper':
                    diff = z(alpha_ci) * np.sqrt(np.array(var_R))
                else:
                    diff = -z(alpha_ci) * np.sqrt(np.array(var_R))

                exponent = diff / (R_hat * (1 - R_hat))
                R_cb = R_hat / (R_hat + (1 - R_hat) * np.exp(exponent))
                return R_cb.T

        # Default cb is R
        cb = R_cb(t)

        if (on == 'ff') or (on == 'F'):
            cb = 1. - cb

        elif on == 'Hf':
            cb = -np.log(cb)

        elif on == 'hf':
            def cb_hf(x):
                out = []
                for v in x:
                    out.append(jacobian(lambda x: -np.log(R_cb(x)))(v))
                return np.concatenate(out)
            cb = cb_hf(t)

        elif on == 'df':
            def cb_df(x):
                out = []
                for v in x:
                    out.append(jacobian(lambda x: (-np.log(R_cb(x)))(v)
                                        * self.sf(v)))
                return np.concatenate(out)
            cb = cb_df(t)

        np.seterr(**old_err_state)
        return cb
Beispiel #20
0
            cumreg += G_batch.sum()

            batch_iterator = utils.create_batch_iterator(config.batch_size, X_buff, A_buff, R_buff)
            l = 0.
            # l = loss(params, model, X_buff, Y_buff, R_buff)
            for X_batch, A_batch, R_batch in batch_iterator:
                # l += loss(params, model, X_batch, A_batch, R_batch)
                gradients = grad(params, model, X_batch, A_batch, R_batch)

                model.update(params, gradients, lr=config.lr)

            pbar.set_description(f'Epoch:{t:>3}; Loss:{l:>10.2f}; CumReg:{cumreg}')


if __name__ == '__main__':
    np.seterr(all='raise')

    parser = argparse.ArgumentParser()
    parser.add_argument('model', type=str, choices=['mlp', 'bnn'])
    parser.add_argument('--epochs', type=int, default=100,
            help='Number of epochs to train classifier for')
    parser.add_argument('--hidden_layers', type=int, nargs='+', default=[100, 100],
            help='Number of neurons in each hidden layer')
    parser.add_argument('--batch_size', type=int, default=64,
            help='Number of samples in a minibatch')
    parser.add_argument('--lr', type=float, default=1e-5,
            help='Learning rate')
    parser.add_argument('--epsilon', type=float, default=0.,
            help='Epsilon exploration strategy')

    parser.add_argument('--train_batch', action='store_true',
Beispiel #21
0
    encodings = dict(map(reversed, enumerate(string.printable)))
    digitize = lambda char: encodings[char] if char in encodings else len(
        encodings)
    encode_line = lambda line: np.array(list(map(digitize, line)))
    nonblank_line = lambda line: len(line) > 2

    with open(filename) as f:
        lines = f.readlines()

    encoded_lines = map(encode_line, filter(nonblank_line, lines)[:max_lines])
    num_outputs = len(encodings) + 1

    return encoded_lines, num_outputs


if __name__ == '__main__':
    np.random.seed(0)
    np.seterr(divide='ignore')

    # callback to print log likelihoods during training
    print_loglike = lambda loglike, params: print(loglike)

    # load training data
    lstm_filename = join(dirname(__file__), 'lstm.py')
    train_inputs, num_outputs = build_dataset(lstm_filename, max_lines=60)

    # train with EM
    num_states = 20
    init_params = initialize_hmm_parameters(num_states, num_outputs)
    pi, A, B = EM(init_params, train_inputs, print_loglike)
Beispiel #22
0
    return lrs

    # lr += hlr * (gradients_1 @ gradients_0)
    # return lr


def response(params, inputs=None, hps=None):
    return np.argmax(forward(params, inputs=inputs, hps=hps)[-1], axis=1)


# - - - - - - - - - - - - - - - - - -

if __name__ == '__main__':
    import utils
    # np.random.seed(0)
    np.seterr('raise')

    data = utils.organize_data_from_txt('iris.csv')

    sigmoid = lambda x: 1 / (1 + np.exp(-x))
    sigmoid_deriv = lambda x: sigmoid(x) * (1 - sigmoid(x))

    hps = {
        'hyper_learning_rate':
        .000000005,  # <-- learning rate for hypergradient descent
        # 'learning_rate': .05,  # <-- learning rate
        'weight_range': [-3, 3],  # <-- weight range
        'num_hidden_nodes': 20,
        'hidden_activation': np.tanh,
        # 'hidden_activation_deriv': sigmoid_deriv,
        'output_activation': utils.softmax,  # <-- linear output function
Beispiel #23
0
 def __init__(self, in_config):
     np.seterr(all='raise')
     self.config = in_config
     self.g_exit_signalled = 0
     signal.signal(signal.SIGINT, self.signal_handler)
Beispiel #24
0
"""
Class for 2D GPCSD model, fitting, and prediction.

"""

import autograd.numpy as np
np.seterr(all='ignore')
from autograd import grad
import scipy
from tqdm import tqdm

from gpcsd.priors import GPCSDHalfNormalPrior, GPCSDInvGammaPrior
from gpcsd.covariances import GPCSD2DSpatialCovSE, GPCSDTemporalCovSE, GPCSDTemporalCovMatern
from gpcsd.utility_functions import reduce_grid, comp_eig_D, mykron

JITTER = 1e-7


class GPCSD2D:
    def __init__(self,
                 lfp,
                 x,
                 t,
                 a1=None,
                 b1=None,
                 a2=None,
                 b2=None,
                 ngl1=20,
                 ngl2=60,
                 spatial_cov=None,
                 temporal_cov_list=None,
import autograd.numpy as np
from scipy.optimize import minimize
import cov
from scipy.stats import gamma
from running_statistic import RunningMean, RunningSTD, ElementWiseRunningStatistic
np.seterr(all='warn')

_minimize_method = 'L-BFGS-B'
_minimize_options = dict(maxiter=1000, disp=False)

if _minimize_method == 'L-BFGS-B':
    _minimize_options['maxcor'] = 15
    _minimize_options['ftol'] = 0


def squared_normal_shape_scale(mu, sigma):
    # calculate alpha and beta in Eqn. (9)

    shape = (mu**2 + sigma**2)**2 / (2 * sigma**2 * (2 * mu**2 + sigma**2))
    scale = (2 * mu**2 * sigma**2 + sigma**4) / (mu**2 + sigma**2)
    return shape, scale


def squared_normal_quantiles(mu, sigma, probs, double=False):
    # quantiles of Y = 1/2 X^2 where X ~ N(mu, sigma^2)
    # if probs is none, return the mean

    assert len(mu) == len(sigma)
    factor = 2.0 if double else 1.0
    mu = mu.flatten()
    sigma = sigma.flatten()