def step(self): # TODO: Propose from not the prior, and tune using the asf's. # The right-hand sides for the linear constraints self.rhs = dict(zip(self.constraint_offdiags, [np.asarray(np.dot(pm.utils.value(od), self.g.value)).squeeze() for od in self.constraint_offdiags])) this_round = np.zeros(self.n, dtype='int') for i in xrange(self.n): self.check_constraints() # Jump an element of g. lb, ub, rhs = self.get_bounds(i) # Propose a new value curg = self.g.value[i] tau = 1./self.adaptive_scale_factor[i]**2 newg = pm.rtruncnorm(curg,tau,lb,ub)[0] # The Hastings factor hf = pm.truncnorm_like(curg,newg,tau,lb,ub)-pm.truncnorm_like(newg,curg,tau,lb,ub) # The difference in prior log-probabilities of g dpri = .5*(curg**2 - newg**2) # Get the current log-likelihood of the non-constraint children. lpl = self.get_likelihood_only() cv = {} for od in self.all_offdiags: for c in od.children: cv[c] = c.value.copy() # Inter the proposed value and get the proposed log-likelihood. self.set_g_value(newg, i) try: lpl_p = self.get_likelihood_only() except pm.ZeroProbability: self.reject(i, cv) self.check_constraints() this_round[i] = -1 continue # M-H acceptance if np.log(np.random.random()) < lpl_p - lpl + hf + dpri: self.accepted[i] += 1 this_round[i] = 1 for od in self.constraint_offdiags: rhs[od] += np.asarray(pm.utils.value(od))[:,i].squeeze() * newg self.rhs = rhs self.check_constraints() else: self.reject(i, cv) self.check_constraints() this_round[i] = -1
def hastings_factor(self): tau = 1.0 / (self.adaptive_scale_factor * self.proposal_sd) ** 2 cur_val = self.stochastic.value last_val = self.stochastic.last_value lp_for = pm.truncnorm_like(cur_val, last_val, tau, self.low_bound, self.up_bound) lp_bak = pm.truncnorm_like(last_val, cur_val, tau, self.low_bound, self.up_bound) if self.verbose > 1: print self._id + ": Hastings factor %f" % (lp_bak - lp_for) return lp_bak - lp_for
def hastings_factor(self): tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2 cur_val = self.stochastic.value last_val = self.stochastic.last_value lp_for = pm.truncnorm_like(cur_val, last_val, tau, self.low_bound, self.up_bound) lp_bak = pm.truncnorm_like(last_val, cur_val, tau, self.low_bound, self.up_bound) if self.verbose > 1: print self._id + ': Hastings factor %f'%(lp_bak - lp_for) return lp_bak - lp_for