def setup_logp(self): """Set up the prior and likelihood logp functions, and derivatives.""" shared = make_shared_replacements(self.variables, self.model) self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared) self.prior_dlogp_func = logp_forw( [gradient(self.model.varlogpt, self.variables)], self.variables, shared) self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared) self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared) self.posterior_dlogp_func = logp_forw( [gradient(self.model.logpt, self.variables)], self.variables, shared) self.posterior_hessian_func = logp_forw( [hessian(self.model.logpt, self.variables)], self.variables, shared) self.posterior_logp_nojac = logp_forw([self.model.logp_nojact], self.variables, shared) self.posterior_dlogp_nojac = logp_forw( [gradient(self.model.logp_nojact, self.variables)], self.variables, shared) self.posterior_hessian_nojac = logp_forw( [hessian(self.model.logp_nojact, self.variables)], self.variables, shared)
def setup_logp(self): """Set up the likelihood logp function based on the chosen kernel.""" shared = make_shared_replacements(self.variables, self.model) self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared) self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared) self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared) self.posterior_dlogp_func = logp_forw( [gradient(self.model.logpt, self.variables)], self.variables, shared) self.prior_dlogp_func = logp_forw( [gradient(self.model.varlogpt, self.variables)], self.variables, shared) self.likelihood_dlogp_func = logp_forw( [gradient(self.model.datalogpt, self.variables)], self.variables, shared)
def _theano_hamiltonian(model_vars, shared, logpt, potential): """Creates a Hamiltonian with shared inputs. Parameters ---------- model_vars : array of variables to be sampled shared : theano tensors that are already shared logpt : model log probability potential : hamiltonian potential Returns ------- Hamiltonian : namedtuple with log pdf, gradient of log pdf, and potential functions q : Starting position variable. """ dlogp = gradient(logpt, model_vars) (logp, dlogp), q = join_nonshared_inputs([logpt, dlogp], model_vars, shared) logp = CallableTensor(logp) dlogp = CallableTensor(dlogp) return Hamiltonian(logp, dlogp, potential), q
def _theano_hamiltonian(model_vars, shared, logpt, potential): """Creates a Hamiltonian with shared inputs. Parameters ---------- model_vars : array of variables to be sampled shared : theano tensors that are already shared logpt : model log probability potential : hamiltonian potential Returns ------- Hamiltonian : namedtuple with log pdf, gradient of log pdf, and potential functions q : Starting position variable. """ dlogp = gradient(logpt, model_vars) (logp, dlogp), q = join_nonshared_inputs([logpt, dlogp], model_vars, shared) dlogp_func = theano.function(inputs=[q], outputs=dlogp) dlogp_func.trust_input = True logp = CallableTensor(logp) dlogp = CallableTensor(dlogp) return Hamiltonian(logp, dlogp, potential), q, dlogp_func
def _theano_hamiltonian(model_vars, shared, logpt, potential): """Create a Hamiltonian with shared inputs. Parameters ---------- model_vars : array of variables to be sampled shared : theano tensors that are already shared logpt : model log probability potential : hamiltonian potential Returns ------- Hamiltonian : namedtuple with log pdf, gradient of log pdf, and potential functions q : Initial position for Hamiltonian Monte Carlo dlogp_func: theano function that computes the gradient of a log pdf at a point """ dlogp = gradient(logpt, model_vars) (logp, dlogp), q = join_nonshared_inputs([logpt, dlogp], model_vars, shared) dlogp_func = theano.function(inputs=[q], outputs=dlogp) dlogp_func.trust_input = True logp = CallableTensor(logp) dlogp = CallableTensor(dlogp) return Hamiltonian(logp, dlogp, potential), q, dlogp_func
def jacobian_det(self, x): grad = tt.reshape(gradient(tt.sum(self.backward(x)), [x]), x.shape) return tt.log(tt.abs_(grad))