def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False, model=None, blocked=True, use_single_leapfrog=False, potential=None, integrator="leapfrog", **theano_kwargs): """Superclass to implement Hamiltonian/hybrid monte carlo Parameters ---------- vars : list of theano variables scaling : array_like, ndim = {1,2} Scaling for momentum distribution. 1d arrays interpreted matrix diagonal. step_scale : float, default=0.25 Size of steps to take, automatically scaled down by 1/n**(1/4) is_cov : bool, default=False Treat scaling as a covariance matrix/vector if True, else treat it as a precision matrix/vector model : pymc3 Model instance. default=Context model blocked: Boolean, default True use_single_leapfrog: Boolean, will leapfrog steps take a single step at a time. default False. potential : Potential, optional An object that represents the Hamiltonian with methods `velocity`, `energy`, and `random` methods. **theano_kwargs: passed to theano functions """ model = modelcontext(model) if vars is None: vars = model.cont_vars vars = inputvars(vars) if scaling is None and potential is None: scaling = model.test_point if isinstance(scaling, dict): scaling = guess_scaling(Point(scaling, model=model), model=model, vars=vars) if scaling is not None and potential is not None: raise ValueError("Can not specify both potential and scaling.") self.step_size = step_scale / (model.ndim ** 0.25) if potential is not None: self.potential = potential else: self.potential = quad_potential(scaling, is_cov, as_cov=False) shared = make_shared_replacements(vars, model) if theano_kwargs is None: theano_kwargs = {} self.H, self.compute_energy, self.compute_velocity, self.leapfrog, self.dlogp = get_theano_hamiltonian_functions( vars, shared, model.logpt, self.potential, use_single_leapfrog, integrator, **theano_kwargs) super(BaseHMC, self).__init__(vars, shared, blocked=blocked)
def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False, model=None, blocked=True, potential=None, integrator="leapfrog", dtype=None, **theano_kwargs): """Set up Hamiltonian samplers with common structures. Parameters ---------- vars : list of theano variables scaling : array_like, ndim = {1,2} Scaling for momentum distribution. 1d arrays interpreted matrix diagonal. step_scale : float, default=0.25 Size of steps to take, automatically scaled down by 1/n**(1/4) is_cov : bool, default=False Treat scaling as a covariance matrix/vector if True, else treat it as a precision matrix/vector model : pymc3 Model instance blocked: bool, default=True potential : Potential, optional An object that represents the Hamiltonian with methods `velocity`, `energy`, and `random` methods. **theano_kwargs: passed to theano functions """ model = modelcontext(model) if vars is None: vars = model.cont_vars vars = inputvars(vars) super(BaseHMC, self).__init__(vars, blocked=blocked, model=model, dtype=dtype, **theano_kwargs) size = self._logp_dlogp_func.size if scaling is None and potential is None: mean = floatX(np.zeros(size)) var = floatX(np.ones(size)) potential = QuadPotentialDiagAdapt(size, mean, var, 10) if isinstance(scaling, dict): point = Point(scaling, model=model) scaling = guess_scaling(point, model=model, vars=vars) if scaling is not None and potential is not None: raise ValueError("Can not specify both potential and scaling.") self.step_size = step_scale / (size ** 0.25) if potential is not None: self.potential = potential else: self.potential = quad_potential(scaling, is_cov) self.integrator = integration.CpuLeapfrogIntegrator(self.potential, self._logp_dlogp_func)
def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False, model=None, blocked=True, use_single_leapfrog=False, **theano_kwargs): """Superclass to implement Hamiltonian/hybrid monte carlo Parameters ---------- vars : list of theano variables scaling : array_like, ndim = {1,2} Scaling for momentum distribution. 1d arrays interpreted matrix diagonal. step_scale : float, default=0.25 Size of steps to take, automatically scaled down by 1/n**(1/4) is_cov : bool, default=False Treat scaling as a covariance matrix/vector if True, else treat it as a precision matrix/vector state State object model : pymc3 Model instance. default=Context model blocked: Boolean, default True use_single_leapfrog: Boolean, will leapfrog steps take a single step at a time. default False. **theano_kwargs: passed to theano functions """ model = modelcontext(model) if vars is None: vars = model.cont_vars vars = inputvars(vars) if scaling is None: scaling = model.test_point if isinstance(scaling, dict): scaling = guess_scaling(Point(scaling, model=model), model=model, vars=vars) n = scaling.shape[0] self.step_size = step_scale / (n ** 0.25) self.potential = quad_potential(scaling, is_cov, as_cov=False) shared = make_shared_replacements(vars, model) if theano_kwargs is None: theano_kwargs = {} self.H, self.compute_energy, self.leapfrog, self._vars = get_theano_hamiltonian_functions( vars, shared, model.logpt, self.potential, use_single_leapfrog, **theano_kwargs) super(BaseHMC, self).__init__(vars, shared, blocked=blocked)
def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False, model=None, blocked=True, use_single_leapfrog=False, potential=None, integrator="leapfrog", **theano_kwargs): """Superclass to implement Hamiltonian/hybrid monte carlo Parameters ---------- vars : list of theano variables scaling : array_like, ndim = {1,2} Scaling for momentum distribution. 1d arrays interpreted matrix diagonal. step_scale : float, default=0.25 Size of steps to take, automatically scaled down by 1/n**(1/4) is_cov : bool, default=False Treat scaling as a covariance matrix/vector if True, else treat it as a precision matrix/vector model : pymc3 Model instance. default=Context model blocked: Boolean, default True use_single_leapfrog: Boolean, will leapfrog steps take a single step at a time. default False. potential : Potential, optional An object that represents the Hamiltonian with methods `velocity`, `energy`, and `random` methods. **theano_kwargs: passed to theano functions """ model = modelcontext(model) if vars is None: vars = model.cont_vars vars = inputvars(vars) if scaling is None and potential is None: size = sum(np.prod(var.dshape, dtype=int) for var in vars) mean = floatX(np.zeros(size)) var = floatX(np.ones(size)) potential = QuadPotentialDiagAdapt(size, mean, var, 10) if isinstance(scaling, dict): point = Point(scaling, model=model) scaling = guess_scaling(point, model=model, vars=vars) if scaling is not None and potential is not None: raise ValueError("Can not specify both potential and scaling.") self.step_size = step_scale / (model.ndim**0.25) if potential is not None: self.potential = potential else: self.potential = quad_potential(scaling, is_cov) shared = make_shared_replacements(vars, model) if theano_kwargs is None: theano_kwargs = {} self.H, self.compute_energy, self.compute_velocity, self.leapfrog, self.dlogp = get_theano_hamiltonian_functions( vars, shared, model.logpt, self.potential, use_single_leapfrog, integrator, **theano_kwargs) super(BaseHMC, self).__init__(vars, shared, blocked=blocked)
def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False, model=None, blocked=True, potential=None, dtype=None, Emax=1000, target_accept=0.8, gamma=0.05, k=0.75, t0=10, adapt_step_size=True, step_rand=None, **theano_kwargs): """Set up Hamiltonian samplers with common structures. Parameters ---------- vars: list of theano variables scaling: array_like, ndim = {1,2} Scaling for momentum distribution. 1d arrays interpreted matrix diagonal. step_scale: float, default=0.25 Size of steps to take, automatically scaled down by 1/n**(1/4) is_cov: bool, default=False Treat scaling as a covariance matrix/vector if True, else treat it as a precision matrix/vector model: pymc3 Model instance blocked: bool, default=True potential: Potential, optional An object that represents the Hamiltonian with methods `velocity`, `energy`, and `random` methods. **theano_kwargs: passed to theano functions """ self._model = modelcontext(model) if vars is None: vars = self._model.cont_vars vars = inputvars(vars) super().__init__(vars, blocked=blocked, model=model, dtype=dtype, **theano_kwargs) self.adapt_step_size = adapt_step_size self.Emax = Emax self.iter_count = 0 size = self._logp_dlogp_func.size self.step_size = step_scale / (size**0.25) self.step_adapt = step_sizes.DualAverageAdaptation( self.step_size, target_accept, gamma, k, t0) self.target_accept = target_accept self.tune = True if scaling is None and potential is None: mean = floatX(np.zeros(size)) var = floatX(np.ones(size)) potential = QuadPotentialDiagAdapt(size, mean, var, 10) if isinstance(scaling, dict): point = Point(scaling, model=model) scaling = guess_scaling(point, model=model, vars=vars) if scaling is not None and potential is not None: raise ValueError("Can not specify both potential and scaling.") if potential is not None: self.potential = potential else: self.potential = quad_potential(scaling, is_cov) self.integrator = integration.CpuLeapfrogIntegrator( self.potential, self._logp_dlogp_func) self._step_rand = step_rand self._warnings = [] self._samples_after_tune = 0 self._num_divs_sample = 0