def test_half_normal_ordered(self, sd, shape): testval = np.sort(np.abs(np.random.randn(*shape))) model = self.build_model(pm.HalfNormal, {'sd': sd}, shape=shape, testval=testval, transform=tr.Chain([tr.log, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_exponential_ordered(self, lam, shape): testval = np.sort(np.abs(np.random.randn(*shape))) model = self.build_model(pm.Exponential, {'lam': lam}, shape=shape, testval=testval, transform=tr.Chain([tr.log, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_chain_jacob_det(): chain_tranf = tr.Chain([tr.logodds, tr.ordered]) check_jacobian_det(chain_tranf, Vector(R, 4), at.dvector, np.zeros(4), elemwise=False)
def test_chain(): chain_tranf = tr.Chain([tr.logodds, tr.ordered]) check_vector_transform(chain_tranf, UnitSortedVector(3)) check_jacobian_det(chain_tranf, Vector(R, 4), aet.dvector, np.zeros(4), elemwise=False) vals = get_values(chain_tranf, Vector(R, 5), aet.dvector, np.zeros(5)) close_to_logical(np.diff(vals) >= 0, True, tol)
def test_half_normal_ordered(self, sd, size): initval = np.sort(np.abs(np.random.randn(*size))) model = self.build_model( pm.HalfNormal, {"sd": sd}, size=size, initval=initval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_vonmises_ordered(self, mu, kappa, shape): testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model(pm.VonMises, { 'mu': mu, 'kappa': kappa }, shape=shape, testval=testval, transform=tr.Chain([tr.circular, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_exponential_ordered(self, lam, size): initval = np.sort(np.abs(np.random.randn(*size))) model = self.build_model( pm.Exponential, {"lam": lam}, size=size, initval=initval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_beta_ordered(self, a, b, shape): testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model(pm.Beta, { 'alpha': a, 'beta': b }, shape=shape, testval=testval, transform=tr.Chain([tr.logodds, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_uniform_ordered(self, lower, upper, shape): interval = tr.Interval(lower, upper) testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model(pm.Uniform, { 'lower': lower, 'upper': upper }, shape=shape, testval=testval, transform=tr.Chain([interval, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_vonmises_ordered(self, mu, kappa, size): initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.VonMises, { "mu": mu, "kappa": kappa }, size=size, initval=initval, transform=tr.Chain([tr.circular, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_beta_ordered(self, a, b, size): initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.Beta, { "alpha": a, "beta": b }, size=size, initval=initval, transform=tr.Chain([tr.logodds, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0)
def test_uniform_ordered(self, lower, upper, size): def transform_params(rv_var): _, _, _, lower, upper = rv_var.owner.inputs lower = at.as_tensor_variable(lower) if lower is not None else None upper = at.as_tensor_variable(upper) if upper is not None else None return lower, upper interval = tr.Interval(transform_params) initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.Uniform, { "lower": lower, "upper": upper }, size=size, initval=initval, transform=tr.Chain([interval, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=1)
class TestElementWiseLogp(SeededTest): def build_model(self, distfam, params, shape, transform, testval=None): if testval is not None: testval = pm.floatX(testval) with pm.Model() as m: distfam('x', shape=shape, transform=transform, testval=testval, **params) return m def check_transform_elementwise_logp(self, model): x0 = model.deterministics[0] x = model.free_RVs[0] assert x.ndim == x.logp_elemwiset.ndim pt = model.test_point array = np.random.randn(*pt[x.name].shape) pt[x.name] = array dist = x.distribution logp_nojac = x0.distribution.logp(dist.transform_used.backward(array)) jacob_det = dist.transform_used.jacobian_det(theano.shared(array)) assert x.logp_elemwiset.ndim == jacob_det.ndim elementwiselogp = logp_nojac + jacob_det close_to(x.logp_elemwise(pt), elementwiselogp.eval(), tol) def check_vectortransform_elementwise_logp(self, model, vect_opt=0): x0 = model.deterministics[0] x = model.free_RVs[0] assert (x.ndim - 1) == x.logp_elemwiset.ndim pt = model.test_point array = np.random.randn(*pt[x.name].shape) pt[x.name] = array dist = x.distribution logp_nojac = x0.distribution.logp(dist.transform_used.backward(array)) jacob_det = dist.transform_used.jacobian_det(theano.shared(array)) assert x.logp_elemwiset.ndim == jacob_det.ndim if vect_opt == 0: # the original distribution is univariate elementwiselogp = logp_nojac.sum(axis=-1) + jacob_det else: elementwiselogp = logp_nojac + jacob_det # Hack to get relative tolerance a = x.logp_elemwise(pt) b = elementwiselogp.eval() close_to(a, b, np.abs(0.5 * (a + b) * tol)) @pytest.mark.parametrize('sd,shape', [ (2.5, 2), (5., (2, 3)), (np.ones(3) * 10., (4, 3)), ]) def test_half_normal(self, sd, shape): model = self.build_model(pm.HalfNormal, {'sd': sd}, shape=shape, transform=tr.log) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize('lam,shape', [(2.5, 2), (5., (2, 3)), (np.ones(3), (4, 3))]) def test_exponential(self, lam, shape): model = self.build_model(pm.Exponential, {'lam': lam}, shape=shape, transform=tr.log) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize('a,b,shape', [ (1., 1., 2), (.5, .5, (2, 3)), (np.ones(3), np.ones(3), (4, 3)), ]) def test_beta(self, a, b, shape): model = self.build_model(pm.Beta, { 'alpha': a, 'beta': b }, shape=shape, transform=tr.logodds) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize('lower,upper,shape', [(0., 1., 2), (.5, 5.5, (2, 3)), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3))]) def test_uniform(self, lower, upper, shape): interval = tr.Interval(lower, upper) model = self.build_model(pm.Uniform, { 'lower': lower, 'upper': upper }, shape=shape, transform=interval) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize('mu,kappa,shape', [(0., 1., 2), (-.5, 5.5, (2, 3)), (np.zeros(3), np.ones(3), (4, 3))]) def test_vonmises(self, mu, kappa, shape): model = self.build_model(pm.VonMises, { 'mu': mu, 'kappa': kappa }, shape=shape, transform=tr.circular) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize('a,shape', [(np.ones(2), 2), (np.ones((2, 3)) * .5, (2, 3)), (np.ones(3), (4, 3))]) def test_dirichlet(self, a, shape): model = self.build_model(pm.Dirichlet, {'a': a}, shape=shape, transform=tr.stick_breaking) self.check_vectortransform_elementwise_logp(model, vect_opt=1) def test_normal_ordered(self): model = self.build_model(pm.Normal, { 'mu': 0., 'sd': 1. }, shape=3, testval=np.asarray([-1., 1., 4.]), transform=tr.ordered) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('sd,shape', [ (2.5, (2, )), (np.ones(3), (4, 3)), ]) @pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32") def test_half_normal_ordered(self, sd, shape): testval = np.sort(np.abs(np.random.randn(*shape))) model = self.build_model(pm.HalfNormal, {'sd': sd}, shape=shape, testval=testval, transform=tr.Chain([tr.log, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('lam,shape', [(2.5, (2, )), (np.ones(3), (4, 3))]) def test_exponential_ordered(self, lam, shape): testval = np.sort(np.abs(np.random.randn(*shape))) model = self.build_model(pm.Exponential, {'lam': lam}, shape=shape, testval=testval, transform=tr.Chain([tr.log, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('a,b,shape', [ (1., 1., (2, )), (np.ones(3), np.ones(3), (4, 3)), ]) def test_beta_ordered(self, a, b, shape): testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model(pm.Beta, { 'alpha': a, 'beta': b }, shape=shape, testval=testval, transform=tr.Chain([tr.logodds, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('lower,upper,shape', [(0., 1., (2, )), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3))]) def test_uniform_ordered(self, lower, upper, shape): interval = tr.Interval(lower, upper) testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model(pm.Uniform, { 'lower': lower, 'upper': upper }, shape=shape, testval=testval, transform=tr.Chain([interval, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('mu,kappa,shape', [(0., 1., (2, )), (np.zeros(3), np.ones(3), (4, 3))]) def test_vonmises_ordered(self, mu, kappa, shape): testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model(pm.VonMises, { 'mu': mu, 'kappa': kappa }, shape=shape, testval=testval, transform=tr.Chain([tr.circular, tr.ordered])) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('lower,upper,shape,transform', [(0., 1., (2, ), tr.stick_breaking), (.5, 5.5, (2, 3), tr.stick_breaking), (np.zeros(3), np.ones(3), (4, 3), tr.Chain([tr.sum_to_1, tr.logodds]))]) def test_uniform_other(self, lower, upper, shape, transform): testval = np.ones(shape) / shape[-1] model = self.build_model(pm.Uniform, { 'lower': lower, 'upper': upper }, shape=shape, testval=testval, transform=transform) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize('mu,cov,shape', [ (np.zeros(2), np.diag(np.ones(2)), (2, )), (np.zeros(3), np.diag(np.ones(3)), (4, 3)), ]) def test_mvnormal_ordered(self, mu, cov, shape): testval = np.sort(np.random.randn(*shape)) model = self.build_model(pm.MvNormal, { 'mu': mu, 'cov': cov }, shape=shape, testval=testval, transform=tr.ordered) self.check_vectortransform_elementwise_logp(model, vect_opt=1)
name = "unitdisk" def backward(self, y): return tt.stack([y[0], y[1] * tt.sqrt(1 - y[0]**2)]) def forward(self, x): return tt.stack([x[0], x[1] / tt.sqrt(1 - x[0]**2)]) def forward_val(self, x, point=None): return np.array([x[0], x[1] / np.sqrt(1 - x[0]**2)]) def jacobian_det(self, y): return tt.stack((tt.zeros_like(y[0]), 0.5 * tt.log(1 - y[0]**2))) unit_disk = tr.Chain([UnitDiskTransform(), tr.Interval(-1, 1)]) class AngleTransform(tr.Transform): """An angle transformation The variable is augmented to sample an isotropic 2D normal and the angle is given by the arctan of the ratio of the two coordinates. This will have a uniform distribution between -pi and pi. Args: regularized: The amplitude of the regularization term. If ``None``, no regularization is applied. This has no effect on the distribution over the transformed parameter, but it can make sampling more efficient in some cases.
class PoissionProcess(pm.Discrete): def __init__(self, state=None, lambdas=None, *args, **kwargs): super(PoissionProcess, self).__init__(*args, **kwargs) self.state = state self.lambdas = lambdas # Housekeeping self.mode = tt.cast(1, dtype='int64') def logp(self, x): lambd = self.lambdas[self.state] llike = pm.Poisson.dist(lambd).logp_sum(x) return llike chain_tran = tr.Chain([tr.ordered]) with pm.Model() as m: lambdas = pm.Gamma('lam0', mu=10, sd=100, shape=2, transform=chain_tran, testval=np.asarray([1., 1.5])) init_probs = pm.Dirichlet('init_probs', a=tt.ones(2), shape=2) state_trans = pm.Dirichlet('state_trans', a=tt.ones(2), shape=(2, 2)) states = StateTransitions('states', state_trans, init_probs, shape=len(vals_simple)) y = PoissionProcess('Output', states, lambdas, observed=vals_simple) trace = pm.sample(tune=2000, sample=1000, chains=2)
def test_chain_values(): chain_tranf = tr.Chain([tr.logodds, tr.ordered]) vals = get_values(chain_tranf, Vector(R, 5), at.dvector, np.zeros(5)) close_to_logical(np.diff(vals) >= 0, True, tol)
def test_chain_vector_transform(): chain_tranf = tr.Chain([tr.logodds, tr.ordered]) check_vector_transform(chain_tranf, UnitSortedVector(3))
# First distribution is strictly before the other lamda_selected = tt.switch(timesteps <= changepoint_1, lambda_1, lambda_2) lamda_selected = tt.switch(timesteps <= changepoint_2, lambda_1, lambda_2) # Observations come from Poission distributions with one of the priors obs = pm.Poisson('obs', mu=lamda_selected, observed=y) def logistic(L, x0, k=500, t_=np.linspace(0., 1., 1000)): return L / (1 + tt.exp(-k * (t_ - x0))) import pymc3.distributions.transforms as tr with pm.Model() as m2: lambda0 = pm.Normal('lambda0', mu, sd=sd) lambdad = pm.Normal('lambdad', 0, sd=sd, shape=n_cpt) xform = tr.Chain([tr.LogOdds(), tr.Ordered()]) tv = np.random.uniform(low=0.0, high=1.0, size=n_cpt) b = pm.Beta('b', 1., 1., shape=n_cpt, transform=xform , testval=tv) xx = lambda0 for i in range(n_cpt): xx += logistic(lambdad[i], b[i]) dx = tt.exp(xx) theta_ = pm.Deterministic('theta', dx) obs = pm.Poisson('obs', theta_, observed=y) # sample step_method = pm.NUTS(target_accept=0.90, max_treedepth=15) cpt_trace = pm.sample(1000, chains=None, step=step_method, tune=1000) cpt_smry = pm.summary(cpt_trace) pm.traceplot(cpt_trace)
class TestElementWiseLogp(SeededTest): def build_model(self, distfam, params, size, transform, initval=None): if initval is not None: initval = pm.floatX(initval) with pm.Model() as m: distfam("x", size=size, transform=transform, initval=initval, **params) return m def check_transform_elementwise_logp(self, model): x = model.free_RVs[0] x0 = x.tag.value_var assert x.ndim == logpt(x).ndim pt = model.initial_point array = np.random.randn(*pt[x0.name].shape) transform = x0.tag.transform logp_notrans = logpt(x, transform.backward(x, array), transformed=False) jacob_det = transform.jacobian_det(x, aesara.shared(array)) assert logpt(x).ndim == jacob_det.ndim v1 = logpt(x, array, jacobian=False).eval() v2 = logp_notrans.eval() close_to(v1, v2, tol) def check_vectortransform_elementwise_logp(self, model, vect_opt=0): x = model.free_RVs[0] x0 = x.tag.value_var assert (x.ndim - 1) == logpt(x).ndim pt = model.initial_point array = np.random.randn(*pt[x0.name].shape) transform = x0.tag.transform logp_nojac = logpt(x, transform.backward(x, array), transformed=False) jacob_det = transform.jacobian_det(x, aesara.shared(array)) assert logpt(x).ndim == jacob_det.ndim # Hack to get relative tolerance a = logpt(x, array.astype(aesara.config.floatX), jacobian=False).eval() b = logp_nojac.eval() close_to(a, b, np.abs(0.5 * (a + b) * tol)) @pytest.mark.parametrize( "sd,size", [ (2.5, 2), (5.0, (2, 3)), (np.ones(3) * 10.0, (4, 3)), ], ) def test_half_normal(self, sd, size): model = self.build_model(pm.HalfNormal, {"sd": sd}, size=size, transform=tr.log) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize("lam,size", [(2.5, 2), (5.0, (2, 3)), (np.ones(3), (4, 3))]) def test_exponential(self, lam, size): model = self.build_model(pm.Exponential, {"lam": lam}, size=size, transform=tr.log) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( "a,b,size", [ (1.0, 1.0, 2), (0.5, 0.5, (2, 3)), (np.ones(3), np.ones(3), (4, 3)), ], ) def test_beta(self, a, b, size): model = self.build_model(pm.Beta, { "alpha": a, "beta": b }, size=size, transform=tr.logodds) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( "lower,upper,size", [ (0.0, 1.0, 2), (0.5, 5.5, (2, 3)), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3)), ], ) def test_uniform(self, lower, upper, size): def transform_params(rv_var): _, _, _, lower, upper = rv_var.owner.inputs lower = at.as_tensor_variable(lower) if lower is not None else None upper = at.as_tensor_variable(upper) if upper is not None else None return lower, upper interval = tr.Interval(transform_params) model = self.build_model(pm.Uniform, { "lower": lower, "upper": upper }, size=size, transform=interval) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( "lower, c, upper, size", [ (0.0, 1.0, 2.0, 2), (-10, 0, 200, (2, 3)), (np.zeros(3), np.ones(3), np.ones(3), (4, 3)), ], ) def test_triangular(self, lower, c, upper, size): def transform_params(rv_var): _, _, _, lower, _, upper = rv_var.owner.inputs lower = at.as_tensor_variable(lower) if lower is not None else None upper = at.as_tensor_variable(upper) if upper is not None else None return lower, upper interval = tr.Interval(transform_params) model = self.build_model(pm.Triangular, { "lower": lower, "c": c, "upper": upper }, size=size, transform=interval) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize("mu,kappa,size", [(0.0, 1.0, 2), (-0.5, 5.5, (2, 3)), (np.zeros(3), np.ones(3), (4, 3))]) def test_vonmises(self, mu, kappa, size): model = self.build_model(pm.VonMises, { "mu": mu, "kappa": kappa }, size=size, transform=tr.circular) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize("a,size", [(np.ones(2), None), (np.ones((2, 3)) * 0.5, None), (np.ones(3), (4, ))]) def test_dirichlet(self, a, size): model = self.build_model(pm.Dirichlet, {"a": a}, size=size, transform=tr.stick_breaking) self.check_vectortransform_elementwise_logp(model, vect_opt=1) def test_normal_ordered(self): model = self.build_model( pm.Normal, { "mu": 0.0, "sd": 1.0 }, size=3, initval=np.asarray([-1.0, 1.0, 4.0]), transform=tr.ordered, ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( "sd,size", [ (2.5, (2, )), (np.ones(3), (4, 3)), ], ) @pytest.mark.xfail(condition=(aesara.config.floatX == "float32"), reason="Fails on float32") def test_half_normal_ordered(self, sd, size): initval = np.sort(np.abs(np.random.randn(*size))) model = self.build_model( pm.HalfNormal, {"sd": sd}, size=size, initval=initval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize("lam,size", [(2.5, (2, )), (np.ones(3), (4, 3))]) def test_exponential_ordered(self, lam, size): initval = np.sort(np.abs(np.random.randn(*size))) model = self.build_model( pm.Exponential, {"lam": lam}, size=size, initval=initval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( "a,b,size", [ (1.0, 1.0, (2, )), (np.ones(3), np.ones(3), (4, 3)), ], ) def test_beta_ordered(self, a, b, size): initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.Beta, { "alpha": a, "beta": b }, size=size, initval=initval, transform=tr.Chain([tr.logodds, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( "lower,upper,size", [(0.0, 1.0, (2, )), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3))], ) def test_uniform_ordered(self, lower, upper, size): def transform_params(rv_var): _, _, _, lower, upper = rv_var.owner.inputs lower = at.as_tensor_variable(lower) if lower is not None else None upper = at.as_tensor_variable(upper) if upper is not None else None return lower, upper interval = tr.Interval(transform_params) initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.Uniform, { "lower": lower, "upper": upper }, size=size, initval=initval, transform=tr.Chain([interval, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=1) @pytest.mark.parametrize("mu,kappa,size", [(0.0, 1.0, (2, )), (np.zeros(3), np.ones(3), (4, 3))]) def test_vonmises_ordered(self, mu, kappa, size): initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.VonMises, { "mu": mu, "kappa": kappa }, size=size, initval=initval, transform=tr.Chain([tr.circular, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( "lower,upper,size,transform", [ (0.0, 1.0, (2, ), tr.stick_breaking), (0.5, 5.5, (2, 3), tr.stick_breaking), (np.zeros(3), np.ones(3), (4, 3), tr.Chain([tr.sum_to_1, tr.logodds])), ], ) def test_uniform_other(self, lower, upper, size, transform): initval = np.ones(size) / size[-1] model = self.build_model( pm.Uniform, { "lower": lower, "upper": upper }, size=size, initval=initval, transform=transform, ) self.check_vectortransform_elementwise_logp(model, vect_opt=1) @pytest.mark.parametrize( "mu,cov,size,shape", [ (np.zeros(2), np.diag(np.ones(2)), None, (2, )), (np.zeros(3), np.diag(np.ones(3)), (4, ), (4, 3)), ], ) def test_mvnormal_ordered(self, mu, cov, size, shape): initval = np.sort(np.random.randn(*shape)) model = self.build_model(pm.MvNormal, { "mu": mu, "cov": cov }, size=size, initval=initval, transform=tr.ordered) self.check_vectortransform_elementwise_logp(model, vect_opt=1)