示例#1
0
 def log_posterior(x, t):
     """An example 2D intractable distribution:
     a Gaussian evaluated at zero with a Gaussian prior on the log-variance."""
     mu, log_sigma = x[:, 0], x[:, 1]
     prior       = norm.logpdf(log_sigma, 0, 1.35)
     likelihood  = norm.logpdf(mu,        0, np.exp(log_sigma))
     return prior + likelihood
示例#2
0
 def log_density(x):
     x_, y_ = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(y_, 0, 1.35)
     mu_density = norm.logpdf(x_, -0.5, np.exp(y_))
     sigma_density2 = norm.logpdf(y_, 0.1, 1.35)
     mu_density2 = norm.logpdf(x_, 0.5, np.exp(y_))
     return np.logaddexp(sigma_density + mu_density, sigma_density2 + mu_density2)
 def log_density(x, t):
     mu, log_sigma = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(log_sigma, 0, 1.35)
     mu_density = norm.logpdf(mu, -0.5, np.exp(log_sigma))
     sigma_density2 = norm.logpdf(log_sigma, 0.1, 1.35)
     mu_density2 = norm.logpdf(mu, 0.5, np.exp(log_sigma))
     return np.logaddexp(sigma_density + mu_density,
                         sigma_density2 + mu_density2)
 def log_density(x):
     '''
     x: [n_samples, D]
     return: [n_samples]
     '''
     x_, y_ = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(y_, 0, 1.35)
     mu_density = norm.logpdf(x_, -2.2, np.exp(y_))
     sigma_density2 = norm.logpdf(y_, 0.1, 1.35)
     mu_density2 = norm.logpdf(x_, 2.2, np.exp(y_))
     return np.logaddexp(sigma_density + mu_density, sigma_density2 + mu_density2)
示例#5
0
    def _log_hazard(self, params, T, *Xs):
        mu_params = params[self._LOOKUP_SLICE["mu_"]]
        mu_ = np.dot(Xs[0], mu_params)

        sigma_params = params[self._LOOKUP_SLICE["sigma_"]]

        log_sigma_ = np.dot(Xs[1], sigma_params)
        sigma_ = np.exp(log_sigma_)
        Z = (np.log(T) - mu_) / sigma_

        return norm.logpdf(Z) - log_sigma_ - np.log(T) - logsf(Z)
 def log_density(x, t):
     mu, log_sigma = x[:, :obs_dim], x[:, obs_dim:]
     sigma_density = np.sum(norm.logpdf(log_sigma, 0, 1.35), axis=1)
     mu_density    = np.sum(norm.logpdf(Y, mu, np.exp(log_sigma)), axis=1)
     return sigma_density + mu_density
示例#7
0
文件: gplvm.py 项目: CamZHU/autograd
 def objective(params):
     gp_params, latents = unpack_params(params)
     gp_likelihood = sum([log_marginal_likelihood(gp_params[i], latents, data[:, i])
                          for i in range(data_dimension)])
     latent_prior_likelihood = np.sum(norm.logpdf(latents))
     return -gp_likelihood - latent_prior_likelihood
from __future__ import print_function
from __future__ import division
import autograd.numpy as np
from scipy.stats import norm as _scipy_norm
from autograd.scipy.stats import norm
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f
from lifelines.fitters import KnownModelParametericUnivariateFitter


logsf = primitive(_scipy_norm.logsf)

defvjp(
    logsf,
    lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
        x, lambda g: -g * np.exp(norm.logpdf(x, loc, scale) - logsf(x, loc, scale))
    ),
    lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
        loc, lambda g: g * np.exp(norm.logpdf(x, loc, scale) - logsf(x, loc, scale))
    ),
    lambda ans, x, loc=0.0, scale=1.0: unbroadcast_f(
        scale, lambda g: g * np.exp(norm.logpdf(x, loc, scale) - logsf(x, loc, scale)) * (x - loc) / scale
    ),
)


class LogNormalFitter(KnownModelParametericUnivariateFitter):
    r"""
    This class implements an Log Normal model for univariate data. The model has parameterized
    form:
示例#9
0
def log_gaussian(params, scale):
    flat_params, _ = flatten(params)
    return np.sum(norm.logpdf(flat_params, 0, scale))
示例#10
0
 def log_density(x, t):
     mu, log_sigma = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(log_sigma, 0, 1.35)
     mu_density = norm.logpdf(mu, 0, np.exp(log_sigma))
     return sigma_density + mu_density
示例#11
0
文件: vae.py 项目: mwetzel7r/autocog
def diag_gaussian_log_density(x, mu, log_std):
    return np.sum(norm.logpdf(x, mu, np.exp(log_std)), axis=-1)
示例#12
0
 def objective(params):
     gp_params, latents = unpack_params(params)
     gp_likelihood = sum([log_marginal_likelihood(gp_params[i], latents, data[:, i])
                          for i in range(data_dimension)])
     latent_prior_likelihood = np.sum(norm.logpdf(latents))
     return -gp_likelihood - latent_prior_likelihood
示例#13
0
 def log_density(x, t):
     mu, log_sigma = x[:, 0], x[:, 1]
     sigma_density = norm.logpdf(log_sigma, 0, 1.35)
     mu_density = norm.logpdf(mu, 0, np.exp(log_sigma))
     return sigma_density + mu_density
示例#14
0
 def _log_hazard(self, params, times):
     mu_, sigma_ = params
     Z = (np.log(times) - mu_) / sigma_
     return norm.logpdf(
         Z, loc=0, scale=1) - np.log(sigma_) - np.log(times) - norm.logsf(Z)
示例#15
0
def log_gaussian(x, mu, std):
    return np.mean(norm.logpdf(x, mu, std))
def diag_gaussian_log_density(x, mu, log_std):
    return np.sum(norm.logpdf(x, mu, np.exp(log_std)), axis=-1)
示例#17
0
 def optimize_gp_params(init_params, X, y):
     log_hyperprior = lambda params: np.sum(norm.logpdf(params, 0., 100.))
     objective = lambda params: -log_marginal_likelihood(params, X, y) -log_hyperprior(params)
     return minimize(value_and_grad(objective), init_params, jac=True, method='CG').x
 def logprob(weights, inputs, targets):
     log_prior = np.sum(norm.logpdf(weights, 0, weight_scale))
     preds = predictions(weights, inputs)
     log_lik = np.sum(norm.logpdf(preds, targets, noise_scale))
     return log_prior + log_lik
示例#19
0
def logprob(weights, inputs, targets, noise_scale=0.1):
    predictions = nn_predict(weights, inputs)
    return np.sum(norm.logpdf(predictions, targets, noise_scale))
示例#20
0
 def log_density(self, x):
     return np.sum(norm.logpdf(x, self.mu, self.std))