Esempio n. 1
0
File: nnet.py Progetto: mattjj/svae
def _gresnet(mlp_type, mlp, params, inputs):
    ravel, unravel = _make_ravelers(inputs.shape)
    mlp_params, (W, b1, b2) = params

    if mlp_type == 'mean':
        mu_mlp, sigmasq_mlp = mlp(mlp_params, inputs)
        mu_res = unravel(np.dot(ravel(inputs), W) + b1)
        sigmasq_res = log1pexp(b2)
        return make_tuple(mu_mlp + mu_res, sigmasq_mlp + sigmasq_res)
    else:
        J_mlp, h_mlp = mlp(mlp_params, inputs)
        J_res = -1./2 * log1pexp(b2)
        h_res = unravel(np.dot(ravel(inputs), W) + b1)
        return make_tuple(J_mlp + J_res, h_mlp + h_res)
Esempio n. 2
0
def _gresnet(mlp_type, mlp, params, inputs):
    ravel, unravel = _make_ravelers(inputs.shape)
    mlp_params, (W, b1, b2) = params

    if mlp_type == 'mean':
        mu_mlp, sigmasq_mlp = mlp(mlp_params, inputs)
        mu_res = unravel(np.dot(ravel(inputs), W) + b1)
        sigmasq_res = log1pexp(b2)
        return make_tuple(mu_mlp + mu_res, sigmasq_mlp + sigmasq_res)
    else:
        J_mlp, h_mlp = mlp(mlp_params, inputs)
        J_res = -1. / 2 * log1pexp(b2)
        h_res = unravel(np.dot(ravel(inputs), W) + b1)
        return make_tuple(J_mlp + J_res, h_mlp + h_res)
Esempio n. 3
0
def linear_recognize(x, psi):
    C, d = psi

    sigmasq = d**2
    mu = np.dot(x, C.T)

    J = np.tile(1. / sigmasq, (x.shape[0], 1))
    h = J * mu

    return make_tuple(-1. / 2 * J, h, np.zeros(x.shape[0]))
Esempio n. 4
0
def linear_recognize(x, psi):
    C, D = psi
    J, Jzx, Jxx, _ = pair_mean_to_natural(C, np.dot(D, D.T))
    T = x.shape[0]

    J = np.tile(np.diag(J), (T, 1))
    h = np.dot(x, Jzx.T)
    logZ = np.zeros(x.shape[0])

    return make_tuple(J, h, logZ)
Esempio n. 5
0
def expectedstats_standard(nu, S, M, K, fudge=1e-8):
    m = M.shape[0]
    E_Sigmainv = nu*symmetrize(np.linalg.inv(S)) + fudge*np.eye(S.shape[0])
    E_Sigmainv_A = nu*np.linalg.solve(S, M)
    E_AT_Sigmainv_A = m*K + nu*symmetrize(np.dot(M.T, np.linalg.solve(S, M))) \
        + fudge*np.eye(K.shape[0])
    E_logdetSigmainv = digamma((nu-np.arange(m))/2.).sum() \
        + m*np.log(2) - np.linalg.slogdet(S)[1]

    assert is_posdef(E_Sigmainv)
    assert is_posdef(E_AT_Sigmainv_A)

    return make_tuple(
        -1./2*E_AT_Sigmainv_A, E_Sigmainv_A.T, -1./2*E_Sigmainv, 1./2*E_logdetSigmainv)
Esempio n. 6
0
def expectedstats_standard(nu, S, M, K, fudge=1e-8):
    m = M.shape[0]
    E_Sigmainv = nu*symmetrize(np.linalg.inv(S)) + fudge*np.eye(S.shape[0])
    E_Sigmainv_A = nu*np.linalg.solve(S, M)
    E_AT_Sigmainv_A = m*K + nu*symmetrize(np.dot(M.T, np.linalg.solve(S, M))) \
        + fudge*np.eye(K.shape[0])
    E_logdetSigmainv = digamma((nu-np.arange(m))/2.).sum() \
        + m*np.log(2) - np.linalg.slogdet(S)[1]

    assert is_posdef(E_Sigmainv)
    assert is_posdef(E_AT_Sigmainv_A)

    return make_tuple(
        -1./2*E_AT_Sigmainv_A, E_Sigmainv_A.T, -1./2*E_Sigmainv, 1./2*E_logdetSigmainv)
Esempio n. 7
0
def natural_lds_inference_general_autograd(natparam, node_params, num_samples=None):
    init_params, pair_params = natparam

    def lds_log_normalizer(all_natparams):
        init_params, pair_params, node_params = all_natparams
        forward_messages, lognorm = natural_filter_forward_general(
            init_params, pair_params, node_params)
        return lognorm, (lognorm, forward_messages)

    all_natparams = make_tuple(init_params, pair_params, node_params)
    expected_stats, (lognorm, forward_messages) = agrad(lds_log_normalizer)(all_natparams)
    samples = natural_sample_backward_general(forward_messages, pair_params, num_samples)

    return samples, expected_stats, lognorm
Esempio n. 8
0
def mlp_recognize(x, psi, tanh_scale=10.):
    nnet_params, ((W_h, b_h), (W_J, b_J)) = psi[:-2], psi[-2:]
    T, p = x.shape[0], b_h.shape[0]
    shape = x.shape[:-1] + (-1,)

    nnet = compose(tanh_layer(W, b) for W, b in nnet_params)
    h = linear_layer(W_h, b_h)
    log_J = linear_layer(W_J, b_J)

    nnet_outputs = nnet(np.reshape(x, (-1, x.shape[-1])))
    J = -1./2 * np.exp(tanh_scale * np.tanh(log_J(nnet_outputs) / tanh_scale))
    h = h(nnet_outputs)
    logZ = np.zeros(shape[:-1])

    return make_tuple(np.reshape(J, shape), np.reshape(h, shape), logZ)
Esempio n. 9
0
def natural_lds_inference_general_autograd(natparam,
                                           node_params,
                                           num_samples=None):
    init_params, pair_params = natparam

    def lds_log_normalizer(all_natparams):
        init_params, pair_params, node_params = all_natparams
        forward_messages, lognorm = natural_filter_forward_general(
            init_params, pair_params, node_params)
        return lognorm, (lognorm, forward_messages)

    all_natparams = make_tuple(init_params, pair_params, node_params)
    expected_stats, (
        lognorm, forward_messages) = agrad(lds_log_normalizer)(all_natparams)
    samples = natural_sample_backward_general(forward_messages, pair_params,
                                              num_samples)

    return samples, expected_stats, lognorm
Esempio n. 10
0
 def bind(result, step):
     next_smooth, stats = result
     J, h, (mu, ExxT, ExxnT) = step(next_smooth)
     return make_tuple(J, h, mu), [(mu, ExxT, ExxnT)] + stats
Esempio n. 11
0
 def unit(filtered_message):
     J, h = filtered_message
     mu, Sigma = natural_to_mean(filtered_message)
     ExxT = Sigma + np.outer(mu, mu)
     return make_tuple(J, h, mu), [(mu, ExxT, 0.)]
Esempio n. 12
0
def gaussian_mean(inputs, sigmoid_mean=False):
    mu_input, sigmasq_input = np.split(inputs, 2, axis=-1)
    mu = sigmoid(mu_input) if sigmoid_mean else mu_input
    sigmasq = log1pexp(sigmasq_input)
    return make_tuple(mu, sigmasq)
Esempio n. 13
0
def pack_nodeparams(node_params):
    return make_tuple(*map(np.array, zip(*node_params)))
Esempio n. 14
0
def gaussian_info(inputs):
    J_input, h = np.split(inputs, 2, axis=-1)
    J = -1. / 2 * log1pexp(J_input)
    return make_tuple(J, h)
Esempio n. 15
0
File: nnet.py Progetto: mattjj/svae
def gaussian_mean(inputs, sigmoid_mean=False):
    mu_input, sigmasq_input = np.split(inputs, 2, axis=-1)
    mu = sigmoid(mu_input) if sigmoid_mean else mu_input
    sigmasq = log1pexp(sigmasq_input)
    return make_tuple(mu, sigmasq)
Esempio n. 16
0
from __future__ import division
import autograd.numpy as np
from autograd.scipy.special import multigammaln
from autograd import grad
from autograd.util import make_tuple

import mniw

# special case of mniw

al2d = np.atleast_2d
add_dims = lambda A, b, c, d: (A, b[:,None], al2d(c), d)
remove_dims = lambda A, B, C, d: make_tuple(A, B.ravel(), C[0,0], d)

def standard_to_natural(nu, S, m, kappa):
    A, B, C, d = mniw.standard_to_natural(nu, S, m[:,None], al2d(kappa))
    return C, B.ravel(), A[0,0], d

def expectedstats(natparam):
    return remove_dims(*mniw.expectedstats(add_dims(*natparam)))

def logZ(natparam):
    return mniw.logZ(add_dims(*natparam))

def natural_sample(natparam):
    A, Sigma = mniw.natural_sample(add_dims(*natparam))
    return np.ravel(A), Sigma
Esempio n. 17
0
def pack_nodeparams(node_params):
    return make_tuple(*map(np.array, zip(*node_params)))
Esempio n. 18
0
File: nnet.py Progetto: mattjj/svae
def gaussian_info(inputs):
    J_input, h = np.split(inputs, 2, axis=-1)
    J = -1./2 * log1pexp(J_input)
    return make_tuple(J, h)
Esempio n. 19
0
 def unit(filtered_message):
     J, h = filtered_message
     mu, Sigma = natural_to_mean(filtered_message)
     ExxT = Sigma + np.outer(mu, mu)
     return make_tuple(J, h, mu), [(mu, ExxT, 0.)]
Esempio n. 20
0
def get_hmm_vlb(lds_global_natparam, hmm_local_natparam, lds_expected_stats):
    init_params, pair_params, _ = hmm_local_natparam
    node_params = get_arhmm_local_nodeparams(lds_global_natparam,
                                             lds_expected_stats)
    local_natparam = make_tuple(init_params, pair_params, node_params)
    return hmm_logZ(local_natparam)
Esempio n. 21
0
 def bind(result, step):
     next_smooth, stats = result
     J, h, (mu, ExxT, ExxnT) = step(next_smooth)
     return make_tuple(J, h, mu), [(mu, ExxT, ExxnT)] + stats