def __init__(self, model=None, potential_fn=None, kinetic_fn=None, step_size=1.0, adapt_step_size=True, adapt_mass_matrix=True, dense_mass=False, target_accept_prob=0.8, trajectory_length=2 * math.pi, init_strategy=init_to_uniform()): if not (model is None) ^ (potential_fn is None): raise ValueError( 'Only one of `model` or `potential_fn` must be specified.') self.model = model self.potential_fn = potential_fn self.kinetic_fn = kinetic_fn if kinetic_fn is not None else euclidean_kinetic_energy self.step_size = step_size self.adapt_step_size = adapt_step_size self.adapt_mass_matrix = adapt_mass_matrix self.dense_mass = dense_mass self.target_accept_prob = target_accept_prob self.trajectory_length = trajectory_length self._sample_fn = None self.algo = 'HMC' self.max_tree_depth = 10 self.init_strategy = init_strategy
def __init__(self, model, prefix="auto", init_strategy=init_to_uniform(), init_scale=0.1): if init_scale <= 0: raise ValueError( "Expected init_scale > 0. but got {}".format(init_scale)) self._init_scale = init_scale super().__init__(model, prefix, init_strategy)
def __init__(self, model, prefix="auto", init_strategy=init_to_uniform(), num_flows=3, **arn_kwargs): self.num_flows = num_flows # 2-layer, stax.Elu, skip_connections=False by default following the experiments in # IAF paper (https://arxiv.org/abs/1606.04934) # and Neutra paper (https://arxiv.org/abs/1903.03704) self._hidden_dims = arn_kwargs.get('hidden_dims') self._skip_connections = arn_kwargs.get('skip_connections', False) self._nonlinearity = arn_kwargs.get('nonlinearity', stax.Elu) super(AutoIAFNormal, self).__init__(model, prefix=prefix, init_strategy=init_strategy)
def __init__(self, model, prefix="auto", init_strategy=init_to_uniform(), num_flows=1, hidden_factors=[8, 8]): self.num_flows = num_flows self._hidden_factors = hidden_factors super(AutoBNAFNormal, self).__init__(model, prefix=prefix, init_strategy=init_strategy)
def __init__(self, model, prefix="auto", init_strategy=init_to_uniform(), init_scale=0.1, rank=None): if init_scale <= 0: raise ValueError( "Expected init_scale > 0. but got {}".format(init_scale)) self._init_scale = init_scale self.rank = rank super(AutoLowRankMultivariateNormal, self).__init__(model, prefix=prefix, init_strategy=init_strategy)
def __init__(self, model=None, potential_fn=None, kinetic_fn=None, step_size=1.0, adapt_step_size=True, adapt_mass_matrix=True, dense_mass=False, target_accept_prob=0.8, trajectory_length=None, max_tree_depth=10, init_strategy=init_to_uniform()): super(NUTS, self).__init__(potential_fn=potential_fn, model=model, kinetic_fn=kinetic_fn, step_size=step_size, adapt_step_size=adapt_step_size, adapt_mass_matrix=adapt_mass_matrix, dense_mass=dense_mass, target_accept_prob=target_accept_prob, trajectory_length=trajectory_length, init_strategy=init_strategy) self.max_tree_depth = max_tree_depth self.algo = 'NUTS'
def __init__(self, model, prefix="auto", init_strategy=init_to_uniform()): self.init_strategy = init_strategy self._base_dist = None super(AutoContinuous, self).__init__(model, prefix=prefix)
def __init__(self, model, prefix="auto", init_strategy=init_to_uniform(), rank=None): self.rank = rank super(AutoLowRankMultivariateNormal, self).__init__( model, prefix=prefix, init_strategy=init_strategy)
base_inv_transforms = {'x': biject_to(x_prior.support), 'y': biject_to(y_prior.base_dist.support)} actual_samples = constrain_fn( handlers.seed(model, random.PRNGKey(0)), (), {}, base_inv_transforms, params) actual_potential_energy = potential_energy(model, (), {}, base_inv_transforms, params) assert_allclose(expected_samples['x'], actual_samples['x']) assert_allclose(expected_samples['y'], actual_samples['y']) assert_allclose(actual_potential_energy, expected_potential_energy) @pytest.mark.parametrize('init_strategy', [ init_to_feasible(), init_to_median(num_samples=2), init_to_prior(), init_to_uniform(), ]) def test_initialize_model_change_point(init_strategy): def model(data): alpha = 1 / np.mean(data) lambda1 = numpyro.sample('lambda1', dist.Exponential(alpha)) lambda2 = numpyro.sample('lambda2', dist.Exponential(alpha)) tau = numpyro.sample('tau', dist.Uniform(0, 1)) lambda12 = np.where(np.arange(len(data)) < tau * len(data), lambda1, lambda2) numpyro.sample('obs', dist.Poisson(lambda12), obs=data) count_data = np.array([ 13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57, 11, 19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13, 19, 23, 27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2, 15, 15, 19, 70, 49, 7, 53, 22, 21, 31, 19, 11, 18, 20,
def __init__(self, fn, init_strategy=init_to_uniform()): self.fn = fn self._init_params = None self.init_strategy = init_strategy
def __init__(self, model, *, prefix='auto', init_strategy=init_to_uniform(), create_plates=None): self.init_strategy = init_strategy self._param_map = None self._init_params = None super(AutoDelta, self).__init__(model, prefix=prefix, create_plates=create_plates)