def plot_contours(ax, params):
        samples = sample_obs(params, N_samples, inputs, layer_sizes)
        y_mean, y_cov = np.mean(samples, axis=0), np.cov(samples.T)

        approx_pdf = lambda x: mvn.logpdf(x, y_mean, y_cov)
        real_pdf = lambda x: mvn.logpdf(x, real_mean, real_cov)

        plot_isocontours(ax, approx_pdf, colors='r', label='approx')
        plot_isocontours(ax, real_pdf, colors='b', label='true')
Пример #2
0
 def target_lnpdf(theta):
     theta = np.atleast_2d(theta)
     target_lnpdf.counter += len(theta)
     y = np.zeros((len(theta)))
     x = np.zeros((len(theta)))
     for i in range(0, num_dimensions):
         y += l[i] * np.sin(np.sum(theta[:,:i + 1],1))
         x += l[i] * np.cos(np.sum(theta[:,:i + 1],1))
     return normal_auto.logpdf(theta, prior_mean, prior_cov) + normal_auto.logpdf(np.vstack([x, y]).transpose(),
                                                                                    likelihood_mean, likelihood_cov)
    def plot_contours(ax, params):
        samples = sample_bnn(params, N_samples, inputs, layer_sizes)
        y_mean, y_cov = np.mean(samples, axis=0), np.cov(samples.T)

        approx_pdf = lambda x: mvn.logpdf(x, y_mean, y_cov)
        real_pdf = lambda x: mvn.logpdf(x, real_mean, real_cov)

        plot_isocontours(ax, approx_pdf, colors='r', label='approx')
        plot_isocontours(ax, real_pdf, colors='b', label='true')

        gp_samples = sample_full_normal(real_mean, r, N_samples)
        ax.scatter(gp_samples[:, 0], gp_samples[:, 1], marker='x')
Пример #4
0
        def integrand(*args):
            """
            If we want avoid inverting a huge matrix we need to evaluate
            the log marginal through quadrature
            here observations are conditionally independent
            given function draw f
            """
            f = np.array(args)

            # UPDATE TO REFLECT DIMENSION
            likelihood = np.sum(mvn.logpdf(y, f, noise_variance * np.eye(t)))
            K = cov_func(cov_params, x, x)
            prior = mvn.logpdf(f, np.zeros(len(f)), K)
            return np.exp(likelihood + prior)
Пример #5
0
 def target_lnpdf(theta):
     theta = np.atleast_2d(theta)
     target_lnpdf.counter += len(theta)
     y = np.zeros((len(theta)))
     x = np.zeros((len(theta)))
     for i in range(0, num_dimensions):
         y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
         x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
     likelihood = np.max((
             normal_auto.logpdf(np.vstack((x,y)).transpose(),[0.7 * num_dimensions, 0], likelihood_variance * np.eye(2)),
             normal_auto.logpdf(np.vstack((x,y)).transpose(),[-0.7 * num_dimensions, 0], likelihood_variance * np.eye(2)),
             normal_auto.logpdf(np.vstack((x,y)).transpose(),[0, 0.7 * num_dimensions], likelihood_variance * np.eye(2)),
             normal_auto.logpdf(np.vstack((x,y)).transpose(),[0, -0.7 * num_dimensions], likelihood_variance * np.eye(2))))
     return np.squeeze(normal_auto.logpdf(theta, np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions)) + likelihood)
Пример #6
0
def log_pdf_prior(weights, prior_params, sd, type=None):
    if type is None:
        return diag_gaussian_log_density(weights, 0, np.log(sd))
    elif type == "diagonal":
        prior_mean, prior_log_std = prior_params
        return diag_gaussian_log_density(weights, prior_mean, prior_log_std)
    elif type == "full":
        prior_mean, prior_cov = prior_params
        return mvn.logpdf(weights, prior_mean, prior_cov)
    else:
        prior_pis, prior_mus, prior_covs = prior_params
        log_pdf = np.zeros(weights.shape[0])
        for pi, mu, cov in zip(prior_pis, prior_mus, prior_covs):
            log_pdf += mvn.logpdf(weights, mu, cov)
        return log_pdf
Пример #7
0
 def target_lnpdf(theta):
     theta = np.atleast_2d(theta)
     target_lnpdf.counter += len(theta)
     cluster_lls = []
     for i in range(0, num_true_components):
         cluster_lls.append(np.log(1./num_true_components) + normal_auto.logpdf(theta, means[i], covs[i]))
     return np.squeeze(logsumexp(np.vstack(cluster_lls), axis=0))
Пример #8
0
 def log_likelihood(all_params):  # implement mini batches later?
     n_samples = 1
     samples = [
         sample_mean_cov_from_deep_gp(all_params, X, True)
         for i in xrange(n_samples)
     ]
     return logsumexp(np.array([mvn.logpdf(y,mean,var+1e-6*np.eye(len(var))*np.max(np.diag(var))) for mean,var in samples])) - np.log(n_samples) \
         + evaluate_prior(all_params)
Пример #9
0
def test_log_likelihood(all_params, X, y, n_samples):
    rs = npr.RandomState(0)
    samples = [
        sample_mean_cov_from_deep_gp(all_params, X, True, rs, FITC=True)
        for i in xrange(n_samples)
    ]
    return logsumexp(
        np.array([mvn.logpdf(y, mean, var) for mean, var in samples]))
Пример #10
0
 def log_marginal_likelihood(params, data):
     cluster_lls = []
     for log_proportion, mean, chol in zip(*unpack_params(params)):
         cov = np.dot(chol.T, chol) + 0.000001 * np.eye(D)
         cluster_log_likelihood = log_proportion + mvn.logpdf(data, mean, cov)
         cluster_lls.append(np.expand_dims(cluster_log_likelihood, axis=0))
     cluster_lls = np.concatenate(cluster_lls, axis=0)
     return np.sum(logsumexp(cluster_lls, axis=0))
Пример #11
0
 def log_marginal_likelihood_full(params, x, y, weights=None):
     if weights is None:
         weights = np.ones(len(y))
     mean, cov_params, noise_variance = unpack_kernel_params(params)
     cov_y_y = cov_func(cov_params, x, x) + \
         noise_variance * np.diag(1 / weights)
     prior_mean = mean * np.ones(len(y))
     return mvn.logpdf(y, prior_mean, cov_y_y)
Пример #12
0
def expected_like(params, N_samples, x, layer_sizes, mean, chol):
    y = sample_obs(params, N_samples, x, layer_sizes)
    mu = np.dot(rs.randn(N_samples, mean.shape[0]), chol) + mean
    return np.mean(
        np.array(
            map(
                lambda s: np.mean(
                    mvn.logpdf(y, s, noise_var * np.eye(mean.shape[0]))), mu)))
Пример #13
0
 def log_likelihood(all_params, X, y, n_samples):
     rs = npr.RandomState(0)
     samples = [sample_mean_cov_from_deep_gp(all_params, X, True, rs, FITC=True) for i in xrange(n_samples)]
     return (
         logsumexp(np.array([mvn.logpdf(y, mean, var) for mean, var in samples]))
         - np.log(n_samples)
         + evaluate_prior(all_params)
     )
Пример #14
0
 def log_marginal_likelihood(params, x, y):
     """
     computes log p(y|X) = log N(y|mu, K + std*I)
     """
     mean, cov_params, noise_scale = unpack_kernel_params(params)
     cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
     prior_mean = mean * np.ones(len(y))
     return mvn.logpdf(y, prior_mean, cov_y_y)
Пример #15
0
 def log_marginal_likelihood(params, data):
     cluster_lls = []
     for log_proportion, mean, chol in zip(*unpack_params(params)):
         cov = np.dot(chol.T, chol) + 0.000001 * np.eye(D)
         cluster_log_likelihood = log_proportion + mvn.logpdf(data, mean, cov)
         cluster_lls.append(np.expand_dims(cluster_log_likelihood, axis=0))
     cluster_lls = np.concatenate(cluster_lls, axis=0)
     return np.sum(logsumexp(cluster_lls, axis=0))
Пример #16
0
 def lnq_grid(params):
     assert D == 2
     xg, yg = np.linspace(-4, 4, 50), np.linspace(-4, 4, 50)
     xx, yy = np.meshgrid(xg, yg)
     pts = np.column_stack([xx.ravel(), yy.ravel()])
     zs, ldets = feed_forward(pts, params, layers)
     lls = mvn.logpdf(pts, mean=np.zeros(D), cov=np.eye(D)) - ldets
     return zs[:, 0].reshape(xx.shape), zs[:, 1].reshape(
         yy.shape), lls.reshape(xx.shape)
Пример #17
0
 def evaluate_prior(all_params): # clean up code so we don't compute matrices twice
     layer_params, x0, y0 = unpack_all_params(all_params)
     log_prior = 0
     for layer in xrange(n_layers):
         #import pdb; pdb.set_trace()
         mean, cov_params, noise_scale = unpack_kernel_params(layer_params[layer])
         cov_y_y = covariance_function(cov_params, x0[layer], x0[layer]) + noise_scale * np.eye(len(y0[layer]))
         log_prior += mvn.logpdf(y0[layer],np.ones(len(cov_y_y))*mean,cov_y_y+np.eye(len(cov_y_y))*10)
     return log_prior
Пример #18
0
def plot_posterior_contours(mean_params,logstd_params):
    plt.clf()
    logprob_adj = lambda two_params: logprob_given_two(mean_params, two_params)
    plot_isocontours(ax, logprob_adj, cmap='Blues')
    mean_2d = mean_params[contourK, [px1,px2]]
    logstd_2s = logstd_params[contourK, [px1,px2]]
    variational_contour = lambda x: mvn.logpdf(x, mean_2d, np.diag(np.exp(2*logstd_2s)))
    plot_isocontours(ax, variational_contour, cmap='Reds')
    plt.draw()
    plt.pause(10)
Пример #19
0
 def evaluate_prior(all_params): # clean up code so we don't compute matrices twice
     all_layer_params = unpack_all_params(all_params)
     log_prior = 0
     deep_map = create_deep_map(all_params)
     for layer,layer_map in deep_map.iteritems():
         for unit,gp_map in layer_map.iteritems():
             cov_y_y = covariance_function(gp_map['cov_params'],gp_map['x0'],gp_map['x0']) + gp_map['noise_scale'] * np.eye(len(gp_map['y0']))
             log_prior += mvn.logpdf(gp_map['y0'],np.ones(len(cov_y_y))*gp_map['mean'],cov_y_y + np.diag(np.diag(cov_y_y))*0) # CHANGE
             ##log_prior += mvn.logpdf(gp_map['y0'],np.ones(len(cov_y_y))*gp_map['mean'],cov_y_y + np.eye(len(cov_y_y))*tuning_param)
             ###log_prior += mvn.logpdf(gp_map['y0'],np.ones(len(cov_y_y))*gp_map['mean'],np.diag(np.diag(cov_y_y))*10)
     return log_prior
Пример #20
0
 def evaluate_prior(all_params): # clean up code so we don't compute matrices twice
     all_layer_params = unpack_all_params(all_params)
     log_prior = 0
     deep_map = create_deep_map(all_params)
     for layer,layer_map in deep_map.iteritems():
         for unit,gp_map in layer_map.iteritems():
             cov_y_y = covariance_function(gp_map['cov_params'],gp_map['x0'],gp_map['x0']) + gp_map['noise_scale'] * np.eye(len(gp_map['y0']))
             log_prior += mvn.logpdf(gp_map['y0'],np.ones(len(cov_y_y))*gp_map['mean'],cov_y_y + np.diag(np.diag(cov_y_y))*0) # CHANGE
             ##log_prior += mvn.logpdf(gp_map['y0'],np.ones(len(cov_y_y))*gp_map['mean'],cov_y_y + np.eye(len(cov_y_y))*tuning_param)
             ###log_prior += mvn.logpdf(gp_map['y0'],np.ones(len(cov_y_y))*gp_map['mean'],np.diag(np.diag(cov_y_y))*10)
     return 0#log_prior
Пример #21
0
 def evaluate_prior(all_params): # clean up code so we don't compute matrices twice
     all_layer_params = unpack_all_params(all_params)
     log_prior = 0
     for layer in xrange(n_layers):
         layer_params = all_layer_params[layer]
         layer_gp_params = unpack_layer_params[layer](layer_params)
         for dim in xrange(dimensions[layer+1]):
             gp_params = layer_gp_params[dim]
             mean, cov_params, noise_scale, x0, y0 = unpack_gp_params_all[layer][dim](gp_params)
             cov_y_y = covariance_function(cov_params,x0,x0) + noise_scale * np.eye(len(y0))
             log_prior += mvn.logpdf(y0,np.ones(len(cov_y_y))*mean,cov_y_y+np.eye(len(cov_y_y))*10)
     return log_prior
Пример #22
0
    def elbo(y, phi, lam, pi, psi, sigma2s, mus, Sigmas, kernel_params):
        """
        phi [N, K] sample membership (cell line cluster)
        lam [G, L] feature membership (expression cluster)
        pi [K] sample mixture weight
        psi [L] feature mixture weights
        y[N, G, T] data
        mus [K, L, T] means
        """
        """
        conditional = np.array([list(map(
            lambda f, s: norm.logpdf(y, f, s).sum(axis=-1), Q[:, :-1], Q[:, -1]))
            for Q in np.concatenate([mus, sigma2s[:, :, np.newaxis]], 2)])

        conditional = conditional + np.log(mix)[:, :, np.newaxis, np.newaxis]
        assignments = np.einsum('nk, gl->klng', phi, lam)
        likelihood = np.sum(conditional * assignments)
        """

        likelihood = 0
        # data likelihood
        for l in range(L):
            for k in range(K):
                ll = np.sum(np.nan_to_num(norm.logpdf(
                    y, mus[k, l], np.sqrt(sigma2s[k, l]))), axis=-1)
                ll = ll - 0.5 * (np.trace(Sigmas[k, l] / sigma2s[k, l]))
                ll = ll * phi[:, k][:, np.newaxis]
                ll = ll * lam[:, l]
                likelihood = likelihood + np.sum(ll)

        # assignment likelihood
        likelihood = likelihood + np.sum(np.log(pi) * phi)
        likelihood = likelihood + np.sum(np.log(psi) * lam)

        # function liklihood
        for k in range(K):
            for l in range(L):
                Ker = cov_func(kernel_params[k, l], inputs, inputs)
                likelihood = likelihood \
                    + mvn.logpdf(mus[k, l], np.zeros(T), Ker) \
                    - 0.5 * np.trace(solve(Ker, Sigmas[k, l]))

        entropy = np.sum(list(map(multinomial_entropy, phi)) +
                         list(map(multinomial_entropy, lam)))
        for k in range(K):
            for l in range(L):
                entropy = entropy + mvn.entropy(mus[k, l], Sigmas[k, l])

        return likelihood + entropy
Пример #23
0
 def evaluate_prior(
         all_params):  # clean up code so we don't compute matrices twice
     layer_params, x0, y0 = unpack_all_params(all_params)
     log_prior = 0
     for layer in xrange(n_layers):
         #import pdb; pdb.set_trace()
         mean, cov_params, noise_scale = unpack_kernel_params(
             layer_params[layer])
         cov_y_y = covariance_function(
             cov_params, x0[layer],
             x0[layer]) + noise_scale * np.eye(len(y0[layer]))
         log_prior += mvn.logpdf(y0[layer],
                                 np.ones(len(cov_y_y)) * mean,
                                 cov_y_y + np.eye(len(cov_y_y)) * 10)
     return log_prior
Пример #24
0
    def log_posterior(x,inputs ,len_sc,variance, t):
        N=x.shape
        sum_prob=0
        params = [0, len_sc, variance, - 0.24707744]
        for i in range(N[0]):

            """An example 2D intractable distribution:
            a Gaussian evaluated at zero with a Gaussian prior on the log-variance."""
            mu= x[i][:]



            pred_mean, pred_cov = predict(params, inputs, mu, X)
            prior           = log_marginal_likelihood(params,inputs,mu)
            posterior       = mvn.logpdf(y, pred_mean, pred_cov, True)
        sum_prob=posterior+sum_prob+prior
        return sum_prob
Пример #25
0
def elbo(prior_params, qa_params, X, y, f_samples, arch, act, noise):
    fs = sample_bnn(prior_params, X, f_samples, arch, act)
    m = np.mean(fs, axis=0, keepdims=True)
    qa_mean, qa_Sigma = qa_params

    # m, K_ff = prior_mean_cov(prior_params, X, f_samples, arch, act)
    # a_samples = sample_full_normal(qa_posterior_moments(m, K_ff, y, noise),1)
    # qa_mean, qa_Sigma = qa_posterior_moments(m, K_ff, y, noise)
    a_samples = sample_normal(qa_params, 1)
    print(a_samples.shape, fs.shape, m.shape)
    mean = a_samples * (fs - m) / unbiased(fs)

    log_qy = diag_gaussian_log_density(y, mean, noise)
    log_qa = mvn.logpdf(a_samples, qa_mean, qa_Sigma)
    log_pa = diag_gaussian_log_density(a_samples, 0, 1)

    return np.mean(log_qy - log_qa + log_pa)
Пример #26
0
    def target_lnpdf(theta, without_prior=False):
        theta = np.atleast_2d(theta)
        target_lnpdf.counter += len(theta)
        weighted_sum = np.dot(theta, X.transpose())
        offset = np.maximum(weighted_sum, np.zeros(weighted_sum.shape))
        denominator = offset + np.log(np.exp(weighted_sum - offset) + np.exp(-offset))
        log_prediction = -denominator
        swapped_y = -(y - 1)
        log_prediction = log_prediction + swapped_y[np.newaxis, :] * (weighted_sum)

        #log_prediction[np.where(np.isinf(log_prediction))] = 0
        if (np.any(np.isnan(log_prediction)) or np.any(np.isinf(log_prediction))):
            print('nan')
        loglikelihood = np.sum(log_prediction,1)
        if without_prior:
            return np.squeeze(loglikelihood)
        else:
            return np.squeeze(normal_auto.logpdf(theta, prior_mean, prior_cov) + loglikelihood)
Пример #27
0
def kl_estimate(params, n_samples, arch, act):
    prior_params, noise, kernel_params, x = params
    x = sample_inputs('gridbox', 100, (0, 10))
    y = sample_bnn(prior_params, x, n_samples, arch, act, noise)  # [nf, nd]
    f = sample_bnn(prior_params, x, n_samples, arch, act)

    w = sample_weights(prior_params, 1)
    mu, log_std = prior_params
    log_prior = diag_gaussian_log_density(w, mu, log_std)
    log_likelihood = diag_gaussian_log_density(y, f, noise)

    jitter = 1e-7 * np.eye(y.shape[0])
    cov = covariance(kernel_params, x, x) + jitter
    log_pgp = mvn.logpdf(y, np.zeros(y.shape[1]), cov)

    print(log_likelihood.shape, log_pgp.shape)

    return np.mean(log_likelihood + log_prior - log_pgp)
Пример #28
0
 def log_marginal_likelihood(params, x, y):
     mean, cov_params, noise_scale = unpack_kernel_params(params)
     cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
     prior_mean = mean * np.ones(len(y))
     return mvn.logpdf(y, prior_mean, cov_y_y, True)
Пример #29
0
 def log_likelihood(all_params):  # implement mini batches later?
     n_samples = 1
     samples = [sample_mean_cov_from_deep_gp(all_params, X, True) for i in xrange(n_samples)]
     return logsumexp(np.array([mvn.logpdf(y,mean,var+1e-6*np.eye(len(var))*np.max(np.diag(var))) for mean,var in samples])) - np.log(n_samples) \
         + evaluate_prior(all_params)
Пример #30
0
 def logdensity(x, var_param):
     mean, beta = unpack_params(var_param)
     L = beta_to_L(beta[:,np.newaxis])
     Sigma = [email protected]
     return mvn.logpdf(x, mean, Sigma)
Пример #31
0
def gmm_log_likelihood(params, data):
    cluster_lls = []
    for log_proportion, mean, cov_sqrt in zip(*unpack_gmm_params(params)):
        cov = np.dot(cov_sqrt.T, cov_sqrt)
        cluster_lls.append(log_proportion + mvn.logpdf(data, mean, cov))
    return np.sum(logsumexp(np.vstack(cluster_lls), axis=0))
def kl_estimate(params, N_samples, x, layer_sizes, mean, cov):
    y = sample_obs(params, N_samples, x, layer_sizes)
    return -entropy_estimate(y) - np.mean(mvn.logpdf(y, mean, cov))
Пример #33
0
 def logdensity(x, var_param):
     mean, log_std = unpack_params(var_param)
     return mvn.logpdf(x, mean, np.diag(np.exp(2*log_std)))
Пример #34
0
 def log_var_approx(self, z, params):
     mu, log_sigma = self.unpack_params(params)
     sigma = np.diag(np.exp(2 * log_sigma)) + 1e-6
     return mvn.logpdf(z, mu, sigma)
Пример #35
0
def gmm_log_likelihood(params, data):
    cluster_lls = []
    for log_proportion, mean, cov_sqrt in zip(*unpack_gmm_params(params)):
        cov = np.dot(cov_sqrt.T, cov_sqrt)
        cluster_lls.append(log_proportion + mvn.logpdf(data, mean, cov))
    return np.sum(logsumexp(np.vstack(cluster_lls), axis=0))
Пример #36
0
 def log_marginal_likelihood(params, x, y):
     mean, cov_params, noise_scale = unpack_kernel_params(params)
     cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
     prior_mean = mean * np.ones(len(y))
     return mvn.logpdf(y, prior_mean, cov_y_y)
Пример #37
0
 def log_marginal(x, y):
     cov_y_y = RBF(x, x) + noise_scale * np.eye(len(y))
     return np.sum(mvn.logpdf(y, np.zeros(len(y)), cov_y_y, True))
Пример #38
0
 def log_cond(x,y,xstar,ystar):
     pred_mean, pred_cov= conditional(x,y,xstar)
     return np.sum(mvn.logpdf(ystar,np.reshape(pred_mean,-1),pred_cov))
Пример #39
0
 def _log_prior_x(self, X):
     """Return the log prior for `X`.
     """
     return ag_mvn.logpdf(X, self.mu_x, self.cov_x).sum()
Пример #40
0
def make_mvn_lowrank_marginal(mean, C, s_diag, marg_idx):
    mu_marg, Sigma_marg = mvn_lowrank_params(mean, C, s_diag, marg_idx)
    return lambda x: mvn.logpdf(
        x, mean=mu_marg, cov=Sigma_marg, allow_singular=True)