Beispiel #1
0
def logprior(var_par, draw, k):
    # Log of prior probabilities
    d = int((len(var_par) / 2 - k) / k)
    l = int(len(var_par) / 2)
    alpha = np.ones(k)  # Prior for mixture probabilities
    beta = np.ones(d)  # Prior for multinomials
    mu, cov = var_par[:l], np.exp(var_par[l:])
    samples = draw * cov + mu
    pi = softmax(samples[:k])
    thetas = softmax(samples[k:].reshape([k, d]), axis=1)
    return dirichlet.logpdf(pi, alpha) + np.sum(
        [dirichlet.logpdf(theta, beta) + ldt(theta)
         for theta in thetas]) + ldt(pi)
Beispiel #2
0
 def log_prior(self):
     lp = 0
     for k in range(self.nb_states):
         alpha = self.prior['alpha'] * np.ones(self.nb_states)\
                 + self.prior['kappa'] * (np.arange(self.nb_states) == k)
         lp += dirichlet.logpdf(self.matrix[k], alpha)
     return lp
Beispiel #3
0
def logprior(var_par, draw, k, a=None, b=None, d=None):
    # Log of prior probabilities
    l = int(len(var_par) / 2)
    q = ((l + 1) / k - 2 - d) / (d + 1)
    q = int(q)
    mu, cov = var_par[:l], np.exp(var_par[l:])
    samples = draw * cov + mu
    pars = get_pars(samples, k, d, q)

    #pi = softmax(pars['pi'])
    pi = stick_backward(pars['pi'])

    mus = pars['mu'].reshape([k, d])

    alphas = pars['alpha'].reshape([k, q])

    tau = pars['tau']

    W = pars['W'].reshape([k, d * q])

    logp = 0
    for j, W_ in enumerate(W):
        W_ = W_.reshape([d, q])
        tau_ = tau[j]
        logp += multi_lpdf(
            mus[j], mean=np.zeros(d), cov=1e3 * np.eye(d)) + sum([
                multi_lpdf(W_[:, i], mean=np.zeros(d), cov=np.eye(d) / alpha)
                for i, alpha in enumerate(np.exp(alphas[j]))
            ]) + sum(lgamma(np.exp(alphas[j]), 1e-3, 1e3) +
                     alphas[j]) + lgamma(np.exp(tau_), 1e-3, 1e3) + tau_
    #return logp + dirichlet.logpdf(pi, np.ones(k)) + ldt(pi)
    return logp + dirichlet.logpdf(pi, np.ones(k)) + np.log(
        np.abs(stick_jacobian_det(pars['pi'])))
Beispiel #4
0
    def log_prior(self):
        K = self.K
        Ps = np.exp(self.log_Ps - logsumexp(self.log_Ps, axis=1, keepdims=True))

        lp = 0
        for k in range(K):
            alpha = self.alpha * np.ones(K) + self.kappa * (np.arange(K) == k)
            lp += dirichlet.logpdf(Ps[k], alpha)
        return lp
Beispiel #5
0
def logprior(sample, k):
    pi = softmax(sample[:k])
    a = np.ones(pi.shape[0])
    mvnpar = sample[k:]
    mus = mvnpar[:-k].reshape([k, int((mvnpar.shape[0] - k) / k)])
    taus = mvnpar[-k:]
    logp = 0
    for i, tau in enumerate(taus):
        logp += linvgamma(np.exp(tau), alpha, beta) + tau - k / 2 * np.log(
            2 * np.pi) - 0.5 * np.dot(mus[i], mus[i])
    return logp + dirichlet.logpdf(pi, a) + ldt(pi)
Beispiel #6
0
 def p_log_prob(self, idx, z):
     x = self.data[idx]        
     mu, tau, pi = z['mu'], softplus(z['tau']), stick_breaking(z['pi'])
     matrix = []  
     log_prior = 0.
     log_prior += np.sum(gamma_logpdf(tau, 1e-5, 1e-5) + np.log(jacobian_softplus(z['tau'])))        
     log_prior += np.sum(norm.logpdf(mu, 0, 1.))
     log_prior += dirichlet.logpdf(pi, 1e3 * np.ones(self.clusters)) + np.log(jacobian_stick_breaking(z['pi']))
     for k in range(self.clusters):
         matrix.append(np.log(pi[k]) + np.sum(norm.logpdf(x, mu[(k * self.D):((k + 1) * self.D)],
                             np.full([self.D], 1./np.sqrt(tau[k]))), 1))
     matrix  = np.vstack(matrix)
     vector = logsumexp(matrix, axis=0)
     log_lik = np.sum(vector)        
     return self.scale * log_lik + log_prior        
Beispiel #7
0
def logprior(var_par, draw, k):
	# Log of prior probabilities
	l = int(len(var_par)/2)
	d = (l-2*k+1)/k
	d = int(d)
	mu, cov = var_par[:l], np.exp(var_par[l:])
	samples = draw*cov + mu
	pars = get_pars(samples, k, d)
	pi = stick_backward(pars['pi'])
	mus = pars['mu'].reshape([k,d])
	taus = pars['tau']
	logp = 0
	for i, tau in enumerate(taus):
		logp += linvgamma(np.exp(tau), alpha, beta) + tau + multi_lpdf(mus[i], np.zeros(d), 1)
	return logp + dirichlet.logpdf(pi, np.ones(k)) + np.log(np.abs(stick_jacobian_det(pars['pi'])))