def hastings_factor(self): if self.cond_jumper.value is not None: for_factor = pm.mv_normal_chol_like(self.stochastic.value, self.cond_jumper.value.M_cond, self.cond_jumper.value.L_cond) back_factor = pm.mv_normal_chol_like(self.stochastic.last_value, self.cond_jumper.value.M_cond, self.cond_jumper.value.L_cond) return back_factor - for_factor else: return 0.
def propose(self): sig = pm.utils.value(self.stochastic.parents['sig']) mu = pm.utils.value(self.stochastic.parents['mu']) delta = pm.rmv_normal_chol(0*mu, sig) beta = np.minimum(1, self.proposal_sd * self.adaptive_scale_factor) bsig = beta*sig sb2 = np.sqrt(1-beta**2) self.stochastic.value = (self.stochastic.value - mu)*sb2+beta*delta+mu xp,x = self.stochastic.value, self.stochastic.last_value self._hastings_factor = pm.mv_normal_chol_like(x,(xp-mu)*sb2+mu,bsig) - pm.mv_normal_chol_like(xp,(x-mu)*sb2+mu,bsig)
def mixture_loglike(data, thetas, covs, labels): n = len(data) likes = pdfs.mvnpdf(data, thetas, covs) loglike = likes.ravel('F').take(labels * n + np.arange(n)).sum() if np.isnan(loglike): return -1e300 return loglike if np.isnan(likes).any(): loglike = 0. for j, (theta, cov) in enumerate(zip(thetas, covs)): this_data = data[labels == j] ch = chol(cov) loglike += pm.mv_normal_chol_like(this_data, theta, ch) return loglike
def mixture_loglike2(data, thetas, taus, weights): n = len(data) covs = [inv(tau) for tau in taus] likes = pdfs.mvnpdf(data, thetas, covs) loglike = (likes * weights).sum() # loglike = likes.ravel('F').take(labels * n + np.arange(n)).sum() if np.isnan(loglike): st() return -1e300 return loglike if np.isnan(likes).any(): loglike = 0. for j, (theta, cov) in enumerate(zip(thetas, covs)): this_data = data[labels == j] loglike += pm.mv_normal_chol_like(this_data, theta, ch) return loglike