Example #1
0
 def log_loss_regression(self, W_vect, X, Y):
     log_prior = -self.L2_reg * np.dot(W_vect, W_vect)
     Yhat = self.predicted_regression(W_vect, X)
     Y = np.ravel(Y)
     Yhat = np.ravel(Yhat)
     N = X.shape[0]
     log_lik = -0.5*np.sum(np.square(Y - Yhat))/N
     return - log_prior - log_lik   
Example #2
0
 def log_loss_regression(self, W_vect, X, Y):
     log_prior = -self.L2_reg * np.dot(W_vect, W_vect)
     Yhat = self.predicted_regression(W_vect, X)
     Y = np.ravel(Y)
     Yhat = np.ravel(Yhat)
     N = X.shape[0]
     log_lik = -0.5 * np.sum(np.square(Y - Yhat)) / N
     return -log_prior - log_lik
Example #3
0
 def NLL(self, W_vect, X, Y):
     '''Negative log likelihood.
     For classification, we assume Y is a N*C one-hot matrix.'''
     if self.output_type == 'classification':
         log_lik = np.sum(self.predictions(W_vect, X) * Y)
     else:
         log_lik = 0
         Yhat = self.predictions(W_vect, X)
         Y = np.ravel(Y)
         Yhat = np.ravel(Yhat) 
         log_lik = -0.5*np.sum(np.square(Y - Yhat))
     B = X.shape[0] # batch size
     log_lik = (log_lik / B ) * self.Ntrain
     return -log_lik 
Example #4
0
def plot_2d_func(energy_func, filename, xlims=[-4.0, 4.0], ylims=[-4.0, 4.0]):
    fig = plt.figure(0)
    fig.clf()
    ax = fig.add_subplot(111)
    x = np.linspace(*xlims, num=100)
    y = np.linspace(*ylims, num=100)
    X, Y = np.meshgrid(x, y)
    zs = np.array([
        energy_func(np.concatenate(([x], [y])))
        for x, y in zip(np.ravel(X), np.ravel(Y))
    ])
    Z = zs.reshape(X.shape)
    Z = np.exp(-Z)
    matplotlib.image.imsave(filename, Z)
    def test_e_log_lik(self):
        n_test_samples = 10000

        # Our expected log likelihood should only differ from a sample average
        # of the generated log likelihood by a constant as the parameters
        # vary.  Check this using num_param different random parameters.
        num_params = 5
        ell_by_param = np.full(num_params, float('nan'))
        sample_ell_by_param = np.full(num_params, float('nan'))
        standard_error = 0.
        for i in range(num_params):
            tau, nu, phi_mu, phi_var = \
                vi.initialize_parameters(num_samples, x_dim, k_approx)
            phi_var_expanded = np.array([phi_var for d in range(x_dim)])

            # set vb parameters
            vb_params2['phi'].set_vector(
                np.hstack([np.ravel(phi_mu.T), phi_var]))
            vb_params2['pi'].set_vector(np.ravel(tau))
            vb_params2['nu'].set_vector(np.ravel(nu))

            z_sample, a_sample, pi_sample = \
                vi.generate_parameter_draws(nu, phi_mu, phi_var_expanded, \
                                            tau, n_test_samples)

            sample_e_log_lik = [
                vi.log_lik(x, z_sample[n, :, :], a_sample[n, :, :],
                           pi_sample[n, :], sigma_eps, sigma_a, alpha,
                           k_approx) \
                for n in range(n_test_samples) ]

            sample_ell_by_param[i] = np.mean(sample_e_log_lik)
            standard_error = \
                np.max([ standard_error,
                         np.std(sample_e_log_lik) / np.sqrt(n_test_samples) ])

            # get moments
            e_log_pi1, e_log_pi2, phi_moment1, phi_moment2, nu_moment =\
                            vi.get_moments_VB(vb_params2)

            ell_by_param[i] = vi.exp_log_likelihood(nu_moment, phi_moment1,
                                                    phi_moment2, e_log_pi1,
                                                    e_log_pi2, sigma_a,
                                                    sigma_eps, x, alpha)

        print('Mean log likelihood standard error: %0.5f' % standard_error)
        self.assertTrue(np.std(ell_by_param - sample_ell_by_param) < \
                        3. * standard_error)
Example #6
0
def flatten(peps):
    vec = np.empty((0))
    for i in range(peps.shape[0]):
        for j in range(peps.shape[1]):
            vec = np.append(vec, np.ravel(peps[i, j]))

    return vec
Example #7
0
def lds_sample(As, bs, Qi_sqrts, ms, Ri_sqrts, z=None):
    """
    Sample a linear dynamical system
    """
    T, D = ms.shape
    assert As.shape == (T-1, D, D)
    assert bs.shape == (T-1, D)
    assert Qi_sqrts.shape == (T-1, D, D)
    assert Ri_sqrts.shape == (T, D, D)

    # Convert to block form
    J_diag, J_lower_diag, h = convert_lds_to_block_tridiag(As, bs, Qi_sqrts, ms, Ri_sqrts)

    # Convert blocks to banded form so we can capitalize on Lapack code
    J_banded = A_banded = blocks_to_bands(J_diag, J_lower_diag, lower=True)
    L = cholesky_banded(J_banded, lower=True)
    U = transpose_banded((2*D-1, 0), L)

    # We have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
    # where A = U^{-1}.  Samples are Az = U^{-1}z = x, or equivalently Ux = z.
    z = npr.randn(T*D,) if z is None else np.reshape(z, (T*D,))
    samples = np.reshape(solve_banded((0, 2*D-1), U, z), (T, D))

    # Get the mean mu = J^{-1} h
    mu = np.reshape(solveh_banded(J_banded, np.ravel(h), lower=True), (T, D))

    # Add the mean
    return samples + mu
def plot_true_posterior():
    true_posterior_contour_levels = [0.01, 0.2, 1.0, 10.0]

    x = np.linspace(*xlimits, num=200)
    y = np.linspace(*ylimits, num=200)
    X, Y = np.meshgrid(x, y)

    fig = plt.figure(0); fig.clf()
    fig.set_size_inches((5,4))
    ax = fig.add_subplot(111)
    zs = np.array([nllfun(np.concatenate(([x],[y]))) for x,y in zip(np.ravel(X), np.ravel(Y))])
    Z = zs.reshape(X.shape)
    plt.contour(X, Y, np.exp(-Z), true_posterior_contour_levels, colors='k')
    ax.set_yticks([])
    ax.set_xticks([])
    return ax
Example #9
0
 def NLL(self, W_vect, X, Y, N=None):
     '''Negative log likelihood.
     For classification, we assume Y is a N*C one-hot matrix.'''
     if self.output_type == 'classification':
         log_lik = np.sum(self.predictions(W_vect, X) * Y)
     else:
         log_lik = 0
         Yhat = self.predictions(W_vect, X)
         Y = np.ravel(Y)
         Yhat = np.ravel(Yhat) 
         log_lik = -0.5*np.sum(np.square(Y - Yhat))
     if N is not None:
         # Compensate for this being a minibatch
         B = X.shape[0] # batch size
         log_lik = (log_lik / B ) * N
     return -log_lik 
Example #10
0
def fit_multiclass_logistic_regression(X, y, bias=None, K=None, W0=None, mu0=0, sigmasq0=1,
                                       verbose=False, maxiter=1000):
    """
    Fit a multiclass logistic regression

        y_i ~ Cat(softmax(W x_i))

    y is a one hot vector in {0, 1}^K
    x_i is a vector in R^D
    W is a matrix R^{K x D}

    The log likelihood is,

        L(W) = sum_i sum_k y_ik * w_k^T x_i - logsumexp(W x_i)

    The prior is w_k ~ Norm(mu0, diag(sigmasq0)).
    """
    N, D = X.shape
    assert y.shape[0] == N

    # Make sure y is one hot
    if y.ndim == 1 or y.shape[1] == 1:
        assert y.dtype == int and y.min() >= 0
        K = y.max() + 1 if K is None else K
        y_oh = np.zeros((N, K), dtype=int)
        y_oh[np.arange(N), y] = 1

    else:
        K = y.shape[1]
        assert y.min() == 0 and y.max() == 1 and np.allclose(y.sum(1), 1)
        y_oh = y

    # Check that bias is correct shape
    if bias is not None:
        assert bias.shape == (K,) or bias.shape == (N, K)
    else:
        bias = np.zeros((K,))

    def loss(W_flat):
        W = np.reshape(W_flat, (K, D))
        scores = np.dot(X, W.T) + bias
        lp = np.sum(y_oh * scores) - np.sum(logsumexp(scores, axis=1))
        prior = np.sum(-0.5 * (W - mu0)**2 / sigmasq0)
        return -(lp + prior) / N

    W0 = W0 if W0 is not None else np.zeros((K, D))
    assert W0.shape == (K, D)

    itr = [0]
    def callback(W_flat):
        itr[0] += 1
        print("Iteration {} loss: {:.3f}".format(itr[0], loss(W_flat)))

    result = minimize(loss, np.ravel(W0), jac=grad(loss),
                      method="BFGS",
                      callback=callback if verbose else None,
                      options=dict(maxiter=maxiter, disp=verbose))

    W = np.reshape(result.x, (K, D))
    return W
Example #11
0
def flatten_lists(nn_params_lists):
    """Flattens a list of lists into one big list"""
    flattened = np.array([])
    for layer in nn_params_lists:
        for p_list in layer:
            flattened = ag_np.concatenate((flattened, ag_np.ravel(p_list)))
    return flattened
Example #12
0
 def NLL(self, W_vect, X, Y, N=None):
     '''Negative log likelihood.
     For classification, we assume Y is a N*C one-hot matrix.'''
     if self.output_type == 'classification':
         log_lik = np.sum(self.predictions(W_vect, X) * Y)
     else:
         log_lik = 0
         Yhat = self.predictions(W_vect, X)
         Y = np.ravel(Y)
         Yhat = np.ravel(Yhat)
         log_lik = -0.5 * np.sum(np.square(Y - Yhat))
     if N is not None:
         # Compensate for this being a minibatch
         B = X.shape[0]  # batch size
         log_lik = (log_lik / B) * N
     return -log_lik
Example #13
0
def _flatten(value):
    t = type(value)
    if t in (list, tuple):
        return _concatenate(map(_flatten, value))
    elif t is dict:
        return _concatenate(_flatten(value[k]) for k in sorted(value))
    else:
        return np.ravel(value)
Example #14
0
 def sample_gpp(x, n_samples):
     """ Samples from the gp prior x = inputs with shape [N_data]
     returns : samples from the gp prior [N_data, N_samples] """
     x = np.ravel(x)
     n_data = len(x)
     K = covariance(x[:, None], x[:, None])
     L = cholesky(K + 1e-7 * np.eye(n_data))
     e = rs.randn(n_data, n_samples)
     return np.dot(L, e)
Example #15
0
def one_hot(z, K):
    z = np.atleast_1d(z).astype(int)
    assert np.all(z >= 0) and np.all(z < K)
    shp = z.shape
    N = z.size
    zoh = np.zeros((N, K))
    zoh[np.arange(N), np.arange(K)[np.ravel(z)]] = 1
    zoh = np.reshape(zoh, shp + (K,))
    return zoh
Example #16
0
 def sample_gp_prior(x, n_samples):
     """ Samples from the gp prior x = inputs with shape [N_data]
     returns : samples from the gp prior [N_samples, N_data] """
     x = np.ravel(x)
     n_data = len(x)
     K = covariance(x[:, None], x[:, None])
     L = cholesky(K + 1e-4 * np.eye(n_data))
     e = np.random.normal(size=(n_data, n_samples))
     f_gp_prior = np.dot(L, e)
     return f_gp_prior.T
Example #17
0
def flatten(value):
    """value can be any nesting of tuples, arrays, dicts.
       returns 1D numpy array and an unflatten function."""
    if isinstance(getval(value), np.ndarray):
        def unflatten(vector):
            return np.reshape(vector, value.shape)
        return np.ravel(value), unflatten

    elif isinstance(getval(value), float):
        return np.array([value]), lambda x : x[0]

    elif isinstance(getval(value), tuple):
        if not value:
            return np.array([]), lambda x : ()
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])
        def unflatten(vector):
            N = len(flattened_first)
            return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(getval(value), list):
        if not value:
            return np.array([]), lambda x : []
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])
        def unflatten(vector):
            N = len(flattened_first)
            return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(getval(value), dict):
        flattened = []
        unflatteners = []
        lengths = []
        keys = []
        for k, v in sorted(iteritems(value), key=itemgetter(0)):
            cur_flattened, cur_unflatten = flatten(v)
            flattened.append(cur_flattened)
            unflatteners.append(cur_unflatten)
            lengths.append(len(cur_flattened))
            keys.append(k)

        def unflatten(vector):
            split_ixs = np.cumsum(lengths)
            pieces = np.split(vector, split_ixs)
            return {key: unflattener(piece)
                    for piece, unflattener, key in zip(pieces, unflatteners, keys)}

        return np.concatenate(flattened), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(type(value)))
Example #18
0
def flatten(value):
    """value can be any nesting of tuples, arrays, dicts.
       returns 1D numpy array and an unflatten function."""
    if isinstance(getval(value), np.ndarray):
        def unflatten(vector):
            return np.reshape(vector, value.shape)
        return np.ravel(value), unflatten

    elif isinstance(getval(value), float):
        return np.array([value]), lambda x : x[0]

    elif isinstance(getval(value), tuple):
        if not value:
            return np.array([]), lambda x : ()
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])
        def unflatten(vector):
            N = len(flattened_first)
            return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(getval(value), list):
        if not value:
            return np.array([]), lambda x : []
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])
        def unflatten(vector):
            N = len(flattened_first)
            return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(getval(value), dict):
        flattened = []
        unflatteners = []
        lengths = []
        keys = []
        for k, v in sorted(iteritems(value), key=itemgetter(0)):
            cur_flattened, cur_unflatten = flatten(v)
            flattened.append(cur_flattened)
            unflatteners.append(cur_unflatten)
            lengths.append(len(cur_flattened))
            keys.append(k)

        def unflatten(vector):
            split_ixs = np.cumsum(lengths)
            pieces = np.split(vector, split_ixs)
            return {key: unflattener(piece)
                    for piece, unflattener, key in zip(pieces, unflatteners, keys)}

        return np.concatenate(flattened), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(type(value)))
 def compute_xnew(inputs, lambda_):
     x, dc, dv = unpack(inputs)
     # avoid dividing by zero outside the design region
     dv = np.where(np.ravel(args['mask']) > 0, dv, 1)
     # square root is not defined for negative numbers, which can happen due to
     # small numerical errors in the computed gradients.
     xnew = x * np.maximum(-dc / (lambda_ * dv), 0) ** eta
     lower = np.maximum(0.0, x - max_move)
     upper = np.minimum(1.0, x + max_move)
     # note: autograd does not define gradients for np.clip
     return np.minimum(np.maximum(xnew, lower), upper)
Example #20
0
def plot_true_posterior():
    true_posterior_contour_levels = [0.01, 0.2, 1.0, 10.0]

    x = np.linspace(*xlimits, num=200)
    y = np.linspace(*ylimits, num=200)
    X, Y = np.meshgrid(x, y)

    fig = plt.figure(0)
    fig.clf()
    fig.set_size_inches((5, 4))
    ax = fig.add_subplot(111)
    zs = np.array([
        nllfun(np.concatenate(([x], [y])))
        for x, y in zip(np.ravel(X), np.ravel(Y))
    ])
    Z = zs.reshape(X.shape)
    plt.contour(X, Y, np.exp(-Z), true_posterior_contour_levels, colors='k')
    ax.set_yticks([])
    ax.set_xticks([])
    return ax
Example #21
0
def make_map(C):
    assert C.dtype == bool and C.ndim == 2
    N1, N2 = C.shape
    valid_inds = np.where(np.ravel(C))[0]
    C_map = np.zeros((N1 * N2, C.sum()))
    C_map[valid_inds, np.arange(C.sum())] = 1

    def unpack_vec(v):
        return np.reshape(np.dot(C_map, v), (N1, N2))

    def pack_matrix(A):
        return A[C]

    return unpack_vec, pack_matrix
Example #22
0
def batch_transform(X, f_list):
    """ Applies the `fitr.utils.transform` function over a batch of parameters

    Arguments:

        X: `ndarray((nsamples, nparams))`. Raw parameters
        f_list: `list` where `len(list) == nparams`. Functions defining coordinate transformations on each element of `x`.

    Returns:

        `ndarray((nsamples, nparams))`. Transformed parameters
    """
    return np.stack(
        np.ravel(transform(X[i], f_list)) for i in range(X.shape[0]))
Example #23
0
    def likelihood(self, par, y, t, mf):
        phi_y, chain_d_sal = self.comp_phi(par, y)
        t_phi_y = np.transpose(phi_y)
        t_t = np.transpose(t)
        mean_t = mf(t) if mf else np.zeros(t.shape)
        if self.kernel_params:
            cov_xx = self.kernel(1, *par[-self.kernel_params:]).K(
                t.reshape(-1, 1), t.reshape(-1, 1))
        else:
            cov_xx = self.kernel(1).K(t.reshape(-1, 1), t.reshape(-1, 1))

        gaussian_params = 0.5 * \
            (t_t - np.transpose(mean_t)) @ np.linalg.inv(cov_xx) @ (t - mean_t)
        return np.ravel(0.5 * np.log(np.linalg.det(cov_xx)) + gaussian_params -
                        np.sum(np.log(chain_d_sal)))
Example #24
0
def flatten(value):
    """Flattens any nesting of tuples, arrays, or dicts.
       Returns 1D numpy array and an unflatten function.
       Doesn't preserve mixed numeric types (e.g. floats and ints).
       Assumes dict keys are sortable."""
    if isinstance(getval(value), np.ndarray):
        shape = value.shape

        def unflatten(vector):
            return np.reshape(vector, shape)

        return np.ravel(value), unflatten

    elif isinstance(getval(value), (float, int)):
        return np.array([value]), lambda x: x[0]

    elif isinstance(getval(value), (tuple, list)):
        constructor = type(getval(value))
        if not value:
            return np.array([]), lambda x: constructor()
        flat_pieces, unflatteners = zip(*map(flatten, value))
        split_indices = np.cumsum([len(vec) for vec in flat_pieces[:-1]])

        def unflatten(vector):
            pieces = np.split(vector, split_indices)
            return constructor(
                unflatten(v) for unflatten, v in zip(unflatteners, pieces))

        return np.concatenate(flat_pieces), unflatten

    elif isinstance(getval(value), dict):
        items = sorted(iteritems(value), key=itemgetter(0))
        keys, flat_pieces, unflatteners = zip(*[(k, ) + flatten(v)
                                                for k, v in items])
        split_indices = np.cumsum([len(vec) for vec in flat_pieces[:-1]])

        def unflatten(vector):
            pieces = np.split(vector, split_indices)
            return {
                key: unflattener(piece)
                for piece, unflattener, key in zip(pieces, unflatteners, keys)
            }

        return np.concatenate(flat_pieces), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(
            type(value)))
Example #25
0
def _fragment_2_1(X, T, s):
    """
    A helper function for expm_2009.

    Notes
    -----
    The argument X is modified in-place, but this modification is not the same
    as the returned value of the function.
    This function also takes pains to do things in ways that are compatible
    with sparse matrices, for example by avoiding fancy indexing
    and by using methods of the matrices whenever possible instead of
    using functions of the numpy or scipy libraries themselves.

    """
    # Form X = r_m(2^-s T)
    # Replace diag(X) by exp(2^-s diag(T)).
    n = X.shape[0]
    diag_T = np.ravel(T.diagonal().copy())

    # Replace diag(X) by exp(2^-s diag(T)).
    scale = 2**-s
    exp_diag = np.exp(scale * diag_T)
    for k in range(n):
        X[k, k] = exp_diag[k]

    for i in range(s - 1, -1, -1):
        X = np.dot(X, X)

        # Replace diag(X) by exp(2^-i diag(T)).
        scale = 2**-i
        exp_diag = np.exp(scale * diag_T)
        for k in range(n):
            X[k, k] = exp_diag[k]

        # Replace (first) superdiagonal of X by explicit formula
        # for superdiagonal of exp(2^-i T) from Eq (10.42) of
        # the author's 2008 textbook
        # Functions of Matrices: Theory and Computation.
        for k in range(n - 1):
            lam_1 = scale * diag_T[k]
            lam_2 = scale * diag_T[k + 1]
            t_12 = scale * T[k, k + 1]
            value = _eq_10_42(lam_1, lam_2, t_12)
            X[k, k + 1] = value

    # Return the updated X matrix.
    return X
Example #26
0
def flatten(value):
    # value can be any nested thing ((), array, [] ) etc
    # returns numpy array
    if isinstance(getval(value), np.ndarray):

        def unflatten(vector):
            return np.reshape(vector, value.shape)

        return np.ravel(value), unflatten

    elif isinstance(getval(value), float):
        return np.array([value]), lambda x: x[0]

    elif isinstance(getval(value), tuple):
        if not value:
            return np.array([]), lambda x: ()
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])

        def unflatten(vector):
            N = len(flattened_first)
            return (unflatten_first(vector[:N]), ) + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(getval(value), list):
        if not value:
            return np.array([]), lambda x: []

        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])

        def unflatten(vector):
            N = len(flattened_first)
            return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(
            type(value)))
Example #27
0
def flatten(value):
    """Flattens any nesting of tuples, arrays, or dicts.
       Returns 1D numpy array and an unflatten function.
       Doesn't preserve mixed numeric types (e.g. floats and ints).
       Assumes dict keys are sortable."""
    if isinstance(getval(value), np.ndarray):
        shape = value.shape
        def unflatten(vector):
            return np.reshape(vector, shape)
        return np.ravel(value), unflatten

    elif isinstance(getval(value), (float, int)):
        return np.array([value]), lambda x : x[0]

    elif isinstance(getval(value), (tuple, list)):
        constructor = type(getval(value))
        if not value:
            return np.array([]), lambda x : constructor()
        flat_pieces, unflatteners = zip(*map(flatten, value))
        split_indices = np.cumsum([len(vec) for vec in flat_pieces[:-1]])

        def unflatten(vector):
            pieces = np.split(vector, split_indices)
            return constructor(unflatten(v) for unflatten, v in zip(unflatteners, pieces))

        return np.concatenate(flat_pieces), unflatten

    elif isinstance(getval(value), dict):
        items = sorted(iteritems(value), key=itemgetter(0))
        keys, flat_pieces, unflatteners = zip(*[(k,) + flatten(v) for k, v in items])
        split_indices = np.cumsum([len(vec) for vec in flat_pieces[:-1]])

        def unflatten(vector):
            pieces = np.split(vector, split_indices)
            return {key: unflattener(piece)
                    for piece, unflattener, key in zip(pieces, unflatteners, keys)}

        return np.concatenate(flat_pieces), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(type(value)))
Example #28
0
def flatten(value):
    # value can be any nested thing ((), array, [] ) etc
    # returns numpy array
    if isinstance(getval(value), np.ndarray):
        def unflatten(vector):
            return np.reshape(vector, value.shape)
        return np.ravel(value), unflatten

    elif isinstance(getval(value), float):
        return np.array([value]), lambda x : x[0]

    elif isinstance(getval(value), tuple):
        if not value:
            return np.array([]), lambda x : ()
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])
        def unflatten(vector):
            N = len(flattened_first)
            return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(getval(value), list):
        if not value:
            return np.array([]), lambda x : []

        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])
        def unflatten(vector):
            N = len(flattened_first)
            return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(type(value)))
Example #29
0
def block_tridiagonal_sample(J_diag, J_lower_diag, h, z=None):
    """
    Sample a Gaussian chain graph represented by a block
    tridiagonal precision matrix and a linear potential.
    """
    T, D = h.shape
    assert J_diag.shape == (T, D, D)
    assert J_lower_diag.shape == (T - 1, D, D)

    # Convert blocks to banded form so we can capitalize on Lapack code
    J_banded = A_banded = blocks_to_bands(J_diag, J_lower_diag, lower=True)
    L = cholesky_banded(J_banded, lower=True)
    U = transpose_banded((2 * D - 1, 0), L)

    # We have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
    # where A = U^{-1}.  Samples are Az = U^{-1}z = x, or equivalently Ux = z.
    z = npr.randn(T * D, ) if z is None else np.reshape(z, (T * D, ))
    samples = np.reshape(solve_banded((0, 2 * D - 1), U, z), (T, D))

    # Get the mean mu = J^{-1} h
    mu = np.reshape(solveh_banded(J_banded, np.ravel(h), lower=True), (T, D))

    # Add the mean
    return samples + mu
Example #30
0
    alpha = np.exp(nu)
    alpha = alpha / np.sum(alpha)
    alpha = np.expand_dims(alpha, 1)
    
    loglikvec = logsumexp(np.log(alpha) + log_q, axis=0)
    return -np.sum(loglikvec)

problem = Problem(manifold=manifold, cost=cost, verbosity=1)

# (3) Instantiate a Pymanopt solver
#solver = TrustRegions()
solver = SteepestDescent(logverbosity=1)

# let Pymanopt do the rest
Xopt, optlog = solver.solve(problem)
print optlog

# Inspect results
mu_hat = Xopt[0]
Sigma_hat = Xopt[1]
for k in range(K):
  mu_est = Xopt[0][k][0:2, 2:3]
  Sigma_est = Xopt[0][k][:2, :2] - mu_est.dot(mu_est.T)
  print 'k = {}'.format(k)
  print 'true mu {}, est {}'.format(mu[k], np.ravel(mu_est))
  
pihat = np.exp(np.concatenate([Xopt[1], [0]], axis=0))
pihat = pihat / np.sum(pihat)
print 'true pi {}, est {}'.format(pi, pihat)

Example #31
0
    alpha = np.exp(nu)
    alpha = alpha / np.sum(alpha)
    alpha = np.expand_dims(alpha, 1)

    loglikvec = logsumexp(np.log(alpha) + log_q, axis=0)
    return -np.sum(loglikvec)


problem = Problem(manifold=manifold, cost=cost, verbosity=1)

# (3) Instantiate a Pymanopt solver
#solver = TrustRegions()
solver = SteepestDescent(logverbosity=1)

# let Pymanopt do the rest
Xopt, optlog = solver.solve(problem)
print optlog

# Inspect results
mu_hat = Xopt[0]
Sigma_hat = Xopt[1]
for k in range(K):
    mu_est = Xopt[0][k][0:2, 2:3]
    Sigma_est = Xopt[0][k][:2, :2] - mu_est.dot(mu_est.T)
    print 'k = {}'.format(k)
    print 'true mu {}, est {}'.format(mu[k], np.ravel(mu_est))

pihat = np.exp(np.concatenate([Xopt[1], [0]], axis=0))
pihat = pihat / np.sum(pihat)
print 'true pi {}, est {}'.format(pi, pihat)
Example #32
0
    N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)

    f_out = open(filename, 'w')
    f_out.write("    Train err  |   Test err  |   Alpha\n")
    f_out.close()

    final_test_err = loss_fun(W, alpha, train_images, train_labels)

    print(N, final_test_err)
    return final_test_err

N_data, train_images, train_labels, test_images, test_labels = get_wine_data()

if __name__ == '__main__':
    # Initialize weights
    rs = npr.RandomState(11)
    param_scale = 0.1

    max_N = 7

    N = 4.5

    num_init_weights = 36 + 9 * (max_N - 1) + 3 * max_N
    W = np.ravel(np.identity(int(np.sqrt(num_init_weights)) + 1))

    run_nn_grad = grad(run_nn, 0)
    params = np.concatenate((np.array([N]), W))

    optimize.minimize(run_nn, params, jac=run_nn_grad, method='BFGS', \
        args=(12, 3), options={'disp': True})
Example #33
0
 def set(self, vect, name, value):
     """Takes in a vector and returns the subset indexed by name."""
     idxs, _ = self.idxs_and_shapes[name]
     vect[idxs] = np.ravel(value)
Example #34
0
anp.sqrt.defjvp(lambda g, ans, gvs, vs, x: g * 0.5 * x**-0.5)
anp.sinc.defjvp(lambda g, ans, gvs, vs, x: g * (anp.cos(
    anp.pi * x) * anp.pi * x - anp.sin(anp.pi * x)) / (anp.pi * x**2))
anp.reshape.defjvp(lambda g, ans, gvs, vs, x, shape, order=None: anp.reshape(
    g, vs.shape, order=order))
anp.roll.defjvp(
    lambda g, ans, gvs, vs, x, shift, axis=None: anp.roll(g, shift, axis=axis))
anp.array_split.defjvp(lambda g, ans, gvs, vs, ary, idxs, axis=0: anp.
                       array_split(g, idxs, axis=axis))
anp.split.defjvp(
    lambda g, ans, gvs, vs, ary, idxs, axis=0: anp.split(g, idxs, axis=axis))
anp.vsplit.defjvp(lambda g, ans, gvs, vs, ary, idxs: anp.vsplit(g, idxs))
anp.hsplit.defjvp(lambda g, ans, gvs, vs, ary, idxs: anp.hsplit(g, idxs))
anp.dsplit.defjvp(lambda g, ans, gvs, vs, ary, idxs: anp.dsplit(g, idxs))
anp.ravel.defjvp(
    lambda g, ans, gvs, vs, x, order=None: anp.ravel(g, order=order))
anp.expand_dims.defjvp(
    lambda g, ans, gvs, vs, x, axis: anp.expand_dims(g, axis))
anp.squeeze.defjvp(lambda g, ans, gvs, vs, x, axis=None: anp.squeeze(g, axis))
anp.diag.defjvp(lambda g, ans, gvs, vs, x, k=0: anp.diag(g, k))
anp.flipud.defjvp(lambda g, ans, gvs, vs, x, : anp.flipud(g))
anp.fliplr.defjvp(lambda g, ans, gvs, vs, x, : anp.fliplr(g))
anp.rot90.defjvp(lambda g, ans, gvs, vs, x, k=1: anp.rot90(g, k))
anp.trace.defjvp(lambda g, ans, gvs, vs, x, offset=0: anp.trace(g, offset))
anp.full.defjvp(lambda g, ans, gvs, vs, shape, fill_value, dtype=None: anp.
                full(shape, g, dtype),
                argnum=1)
anp.triu.defjvp(lambda g, ans, gvs, vs, x, k=0: anp.triu(g, k=k))
anp.tril.defjvp(lambda g, ans, gvs, vs, x, k=0: anp.tril(g, k=k))
anp.clip.defjvp(lambda g, ans, gvs, vs, x, a_min, a_max: g * anp.logical_and(
    ans != a_min, ans != a_max))
Example #35
0
def scan_policy(pos_1,
                pos_2=None,
                n=20,
                points=30,
                model=None,
                top=False,
                bot=False,
                nlp=None):

    x0 = np.zeros(4)
    x0 = random_init(x0)
    x0[0] = 0

    if top == True:
        x0 = np.array([
            random.normalvariate(0, 0.03),
            random.normalvariate(0, 0.03),
            random.normalvariate(0, 0.03),
            random.normalvariate(0, 0.03)
        ])
    if bot:
        x0 = np.array([
            random.normalvariate(0, 0.03),
            random.normalvariate(0, 0.03),
            np.pi - random.normalvariate(0, 0.03),
            random.normalvariate(0, 0.03)
        ])

    p = np.zeros(4)

    # p[1]=5
    # p[3]=-8

    loop = False
    osc = False

    if loop == True:
        x0 = np.array([0, 0, np.pi, 15])
    elif osc == True:
        x0 = np.array([0, 0, np.pi, 5])

    scanned_x = np.arange(-p_range[pos_1] / 2, p_range[pos_1] / 2,
                          p_range[pos_1] / points)  ##scanning variable 1yield

    if pos_2 != None:  ## 2d contour plots
        scanned_y = np.arange(-p_range[pos_2] / 2, p_range[pos_2] / 2,
                              p_range[pos_2] /
                              points)  ##scanning variable 1yield
        X, Y = np.meshgrid(scanned_x, scanned_y)
        rav_X = np.ravel(X)
        rav_Y = np.ravel(Y)
        rav_Z = np.zeros(rav_X.shape[0])
        for i in range(rav_X.shape[0]):
            p[pos_1] = rav_X[i]
            p[pos_2] = rav_Y[i]

            rav_Z[i] = loss_trajectory(x0, n, p, model, nlp)

        Z = rav_Z.reshape(X.shape)

        colour = 'inferno'
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        surf = ax.plot_surface(X,
                               Y,
                               Z,
                               rstride=1,
                               cstride=1,
                               cmap=colour,
                               vmin=0,
                               vmax=50)
        # Add a color bar which maps values to colors.
        cbar = fig.colorbar(surf, shrink=0.5, aspect=5)
        cbar.set_label('Loss')
        plt.xlabel('p[' + str(pos_1) + ']')
        plt.ylabel('p[' + str(pos_2) + ']')
        ax.set_zlabel('Loss')
        plt.title('Initial state: ' + str(np.round(x0, 2)) +
                  '\n Scan through ' + 'p[' + str(pos_1) + '] and p[' +
                  str(pos_2) + ']')
        plt.show()

        cont_plot = plt.tricontourf(rav_X,
                                    rav_Y,
                                    rav_Z,
                                    levels=14,
                                    cmap=colour,
                                    vmin=0,
                                    vmax=50)
        cbar = plt.colorbar(cont_plot, shrink=0.5, aspect=5)
        cbar.set_label('Loss')
        plt.xlabel('p[' + str(pos_1) + ']')
        plt.ylabel('p[' + str(pos_2) + ']')
        plt.title('Initial state: ' + str(np.round(x0, 2)) +
                  '\n Scan through ' + 'p[' + str(pos_1) + '] and p[' +
                  str(pos_2) + ']')

        plt.show()
Example #36
0
    return np.split(truncate_to_multiple(arr, length), len(arr) // length)

def split_into_batches(data, seq_len, num_seqs=None, permute=True):
    batches = npr.permutation(flatmap(partial(split_array, length=seq_len), data))
    if num_seqs is None:
        return batches, len(batches)
    chunks = (batches[i*num_seqs:(i+1)*num_seqs] for i in xrange(len(batches) // num_seqs))
    return itertools.imap(np.stack, chunks), len(batches) // num_seqs


### basic math on (nested) tuples

istuple = lambda x: isinstance(x, (tuple, TupleNode, list, ListNode))
ensuretuple = lambda x: x if istuple(x) else (x,)
concat = lambda *args: reduce(operator.add, map(ensuretuple, args))
inner = lambda a, b: np.dot(np.ravel(a), np.ravel(b))

Y = lambda f: (lambda x: x(x))(lambda y: f(lambda *args: y(y)(*args)))
make_unop = lambda op, combine: \
    Y(lambda f: lambda a: op(a) if not istuple(a) else combine(map(f, a)))
make_scalar_op = lambda op, combine: \
    Y(lambda f: lambda a, b : op(a, b)  if not istuple(b) else combine(map(partial(f, a), b)))
make_binop = lambda op, combine: \
    Y(lambda f: lambda a, b: op(a, b) if not istuple(a) else combine(map(f, a, b)))

def add_binop_size_check(binop):
    def wrapped(a, b):
        assert shape(a) == shape(b)
        return binop(a, b)
    return wrapped
make_binop = (lambda make_binop: lambda *args:
Example #37
0
 def fun(x):
     return to_scalar(np.ravel(x))
 def _from_x_tensors(x_tensors):
     x1d = numpy.hstack([numpy.ravel(x) for x in x_tensors])
     return numpy.hstack((x1d.real, x1d.imag))
Example #39
0
 def set(self, vect, name, value):
     """Takes in a vector and returns the subset indexed by name."""
     idxs, _ = self.idxs_and_shapes[name]
     vect[idxs] = np.ravel(value)
Example #40
0
def solve_symm_block_tridiag(J_diag, J_lower_diag, v):
    J_banded = blocks_to_bands(J_diag, J_lower_diag, lower=True)
    x_flat = solveh_banded(J_banded, np.ravel(v), lower=True)
    return np.reshape(x_flat, v.shape)
Example #41
0
def solve_lds(As, bs, Qi_sqrts, ms, Ri_sqrts, v):
    J_diag, J_lower_diag, _ = convert_lds_to_block_tridiag(As, bs, Qi_sqrts, ms, Ri_sqrts)
    J_banded = blocks_to_bands(J_diag, J_lower_diag, lower=True)
    x_flat = solveh_banded(J_banded, np.ravel(v), lower=True)
    return np.reshape(x_flat, v.shape)
Example #42
0
File: niw.py Project: WuCPMark/svae
def natural_sample(natparam):
    A, Sigma = mniw.natural_sample(add_dims(*natparam))
    return np.ravel(A), Sigma
Example #43
0
 def fun(x): return to_scalar(np.ravel(x))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Example #44
0
 def list_fun(*args, **kwargs):
     val = fun(*args, **kwargs)
     dummy.outshape = getshape(val)
     return list(np.ravel(val))