Example #1
0
def test_logp_helper_exceptions():
    with pytest.raises(TypeError, match="When RV is not a pure distribution"):
        logp(at.exp(Normal.dist()), [1, 2])

    with pytest.raises(NotImplementedError,
                       match="PyMC could not infer logp of input variable"):
        logp(at.cos(Normal.dist()), 1)
Example #2
0
def marginal_mixture_logprob(op, values, rng, weights, *components, **kwargs):
    (value, ) = values

    # single component
    if len(components) == 1:
        # Need to broadcast value across mixture axis
        mix_axis = -components[0].owner.op.ndim_supp - 1
        components_logp = logp(components[0], at.expand_dims(value, mix_axis))
    else:
        components_logp = at.stack(
            [logp(component, value) for component in components],
            axis=-1,
        )

    mix_logp = at.logsumexp(at.log(weights) + components_logp, axis=-1)

    mix_logp = check_parameters(
        mix_logp,
        0 <= weights,
        weights <= 1,
        at.isclose(at.sum(weights, axis=-1), 1),
        msg="0 <= weights <= 1, sum(weights) == 1",
    )

    return mix_logp
Example #3
0
    def test_single_multivariate_component_deterministic_weights(self, weights, component, size):
        # This test needs seeding to avoid repetitions
        rngs = [
            aesara.shared(np.random.default_rng(seed))
            for seed in self.get_random_state().randint(2**30, size=2)
        ]
        mix = Mixture.dist(weights, component, size=size, rngs=rngs)
        mix_eval = mix.eval()

        # Test shape
        # component shape is either (4, 2, 3), (2, 3)
        # weights shape is either (4, 2) or (2,)
        if size is not None:
            expected_shape = size + (3,)
        elif component.ndim == 3 or weights.ndim == 2:
            expected_shape = (4, 3)
        else:
            expected_shape = (3,)
        assert mix_eval.shape == expected_shape

        # Test draws
        totals = mix_eval.sum(-1)
        expected_large_count = (weights == 1)[..., 1]
        assert np.all((totals == 10_000) == expected_large_count)
        repetitions = np.unique(mix_eval[..., 0]).size < totals.size
        assert not repetitions

        # Test logp
        mix_logp_eval = logp(mix, mix_eval).eval()
        expected_logp_shape = expected_shape[:-1]
        assert mix_logp_eval.shape == expected_logp_shape
        bcast_weights = np.broadcast_to(weights, (*expected_logp_shape, 2))
        expected_logp = logp(component, mix_eval[..., None, :]).eval()[bcast_weights == 1]
        expected_logp = expected_logp.reshape(expected_logp_shape)
        assert np.allclose(mix_logp_eval, expected_logp)
Example #4
0
def ar_logp(op, values, rhos, sigma, init_dist, steps, noise_rng, **kwargs):
    (value,) = values

    ar_order = op.ar_order
    constant_term = op.constant_term

    # Convolve rhos with values
    if constant_term:
        expectation = at.add(
            rhos[..., 0, None],
            *(
                rhos[..., i + 1, None] * value[..., ar_order - (i + 1) : -(i + 1)]
                for i in range(ar_order)
            ),
        )
    else:
        expectation = at.add(
            *(
                rhos[..., i, None] * value[..., ar_order - (i + 1) : -(i + 1)]
                for i in range(ar_order)
            )
        )
    # Compute and collapse logp across time dimension
    innov_logp = at.sum(
        logp(Normal.dist(0, sigma[..., None]), value[..., ar_order:] - expectation), axis=-1
    )
    init_logp = logp(init_dist, value[..., :ar_order])
    if init_dist.owner.op.ndim_supp == 0:
        init_logp = at.sum(init_logp, axis=-1)
    return init_logp + innov_logp
Example #5
0
def marginal_mixture_logprob(op, values, rng, weights, *components, **kwargs):
    (value, ) = values

    # single component
    if len(components) == 1:
        # Need to broadcast value across mixture axis
        mix_axis = -components[0].owner.op.ndim_supp - 1
        components_logp = logp(components[0], at.expand_dims(value, mix_axis))
    else:
        components_logp = at.stack(
            [logp(component, value) for component in components],
            axis=-1,
        )

    mix_logp = at.logsumexp(at.log(weights) + components_logp, axis=-1)

    # Squeeze stack dimension
    # There is a Aesara bug in squeeze with negative axis
    # https://github.com/aesara-devs/aesara/issues/830
    # mix_logp = at.squeeze(mix_logp, axis=-1)
    mix_logp = at.squeeze(mix_logp, axis=mix_logp.ndim - 1)

    mix_logp = check_parameters(
        mix_logp,
        0 <= weights,
        weights <= 1,
        at.isclose(at.sum(weights, axis=-1), 1),
        msg="0 <= weights <= 1, sum(weights) == 1",
    )

    return mix_logp
Example #6
0
def test_logp_helper():
    value = at.vector("value")
    x = Normal.dist(0, 1)

    x_logp = logp(x, value)
    np.testing.assert_almost_equal(x_logp.eval({value: [0, 1]}), sp.norm(0, 1).logpdf([0, 1]))

    x_logp = logp(x, [0, 1])
    np.testing.assert_almost_equal(x_logp.eval(), sp.norm(0, 1).logpdf([0, 1]))
Example #7
0
    def test_multivariate_init_dist(self):
        init_dist = Dirichlet.dist(a=np.full((5, 2), [1, 10]))
        x = AR.dist(rho=[0, 0], init_dist=init_dist, steps=0)

        x_eval = x.eval()
        assert x_eval.shape == (5, 2)

        init_dist_eval = init_dist.eval()
        init_dist_logp_eval = logp(init_dist, init_dist_eval).eval()
        x_logp_eval = logp(x, init_dist_eval).eval()
        assert x_logp_eval.shape == (5, )
        assert np.allclose(x_logp_eval, init_dist_logp_eval)
Example #8
0
    def test_with_mvnormal(self):
        # 10 batch, 3-variate Gaussian
        mu = np.random.randn(self.mixture_comps, 3)
        mat = np.random.randn(3, 3)
        cov = mat @ mat.T
        chol = np.linalg.cholesky(cov)
        w = np.ones(self.mixture_comps) / self.mixture_comps

        with Model() as model:
            comp_dists = MvNormal.dist(mu=mu, chol=chol, shape=(self.mixture_comps, 3))
            mixture = Mixture("mixture", w=w, comp_dists=comp_dists, shape=(3,))
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(1, 3))
        log_sum_exp = logsumexp(comp_logp.eval() + np.log(w), axis=0, keepdims=True).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
Example #9
0
    def test_with_multinomial(self, batch_shape):
        p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
        n = 100 * np.ones((*batch_shape, 1))
        w = np.ones(self.mixture_comps) / self.mixture_comps
        mixture_axis = len(batch_shape)
        with Model() as model:
            comp_dists = Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))
            mixture = Mixture(
                "mixture",
                w=w,
                comp_dists=comp_dists,
                shape=(*batch_shape, 3),
            )
            prior = sample_prior_predictive(samples=self.n_samples, return_inferencedata=False)

        assert prior["mixture"].shape == (self.n_samples, *batch_shape, 3)
        assert draw(mixture, draws=self.size).shape == (self.size, *batch_shape, 3)

        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7

        initial_point = model.compute_initial_point()
        comp_logp = logp(comp_dists, initial_point["mixture"].reshape(*batch_shape, 1, 3))
        log_sum_exp = logsumexp(
            comp_logp.eval() + np.log(w), axis=mixture_axis, keepdims=True
        ).sum()
        assert_allclose(
            model.compile_logp()(initial_point),
            log_sum_exp,
            rtol,
        )
Example #10
0
    def logp(value, distribution, lower, upper):
        """
        Calculate log-probability of Bounded distribution at specified value.

        Parameters
        ----------
        value: numeric
            Value for which log-probability is calculated.
        distribution: TensorVariable
            Distribution which is being bounded
        lower: numeric
            Lower bound for the distribution being bounded.
        upper: numeric
            Upper bound for the distribution being bounded.

        Returns
        -------
        TensorVariable
        """
        res = at.switch(
            at.or_(at.lt(value, lower), at.gt(value, upper)),
            -np.inf,
            logp(distribution, value),
        )

        return check_parameters(
            res,
            lower <= upper,
            msg="lower <= upper",
        )
Example #11
0
    def test_list_univariate_components_deterministic_weights(self, weights, components, size):
        # Size can't be smaller than what is implied by replication dimensions
        if size is not None and len(size) < max(components[0].ndim, weights.ndim - 1):
            return

        mix = Mixture.dist(weights, components, size=size)
        mix_eval = mix.eval()

        # Test shape
        # components[0] shape is either (4, 3), (3,) or ()
        # weights shape is either (3, 2) or (2,)
        if size is not None:
            expected_shape = size
        elif components[0].ndim == 2:
            expected_shape = (4, 3)
        elif components[0].ndim == 1 or weights.ndim == 2:
            expected_shape = (3,)
        else:
            expected_shape = ()
        assert mix_eval.shape == expected_shape

        # Test draws
        expected_positive = np.zeros_like(mix_eval)
        if expected_positive.ndim > 0:
            expected_positive[..., :] = (weights == 1)[..., 1]
        assert np.all((mix_eval > 0) == expected_positive)
        repetitions = np.unique(mix_eval).size < mix_eval.size
        assert not repetitions

        # Test logp
        mix_logp_eval = logp(mix, mix_eval).eval()
        assert mix_logp_eval.shape == expected_shape
        bcast_weights = np.broadcast_to(weights, (*expected_shape, 2))
        expected_logp = np.stack(
            (
                logp(components[0], mix_eval).eval(),
                logp(components[1], mix_eval).eval(),
            ),
            axis=-1,
        )[bcast_weights == 1]
        expected_logp = expected_logp.reshape(expected_shape)
        assert np.allclose(mix_logp_eval, expected_logp)
Example #12
0
def test_logcdf_transformed_argument():
    with Model() as m:
        sigma = HalfFlat("sigma")
        x = Normal("x", 0, sigma)
        Potential("norm_term", -logcdf(x, 1.0))

    sigma_value_log = -1.0
    sigma_value = np.exp(sigma_value_log)
    x_value = 0.5

    observed = m.logp_nojac({"sigma_log__": sigma_value_log, "x": x_value})
    expected = logp(TruncatedNormal.dist(0, sigma_value, lower=None, upper=1.0), x_value).eval()
    assert np.isclose(observed, expected)
Example #13
0
    def test_list_multivariate_components_deterministic_weights(self, weights, components, size):
        mix = Mixture.dist(weights, components, size=size)
        mix_eval = mix.eval()

        # Test shape
        # components[0] shape is either (4, 3) or (3,)
        # weights shape is either (4, 2) or (2,)
        if size is not None:
            expected_shape = size + (3,)
        elif components[0].ndim == 2 or weights.ndim == 2:
            expected_shape = (4, 3)
        else:
            expected_shape = (3,)
        assert mix_eval.shape == expected_shape

        # Test draws
        expected_positive = np.zeros_like(mix_eval)
        expected_positive[..., :] = (weights == 1)[..., 1, None]
        assert np.all((mix_eval > 0) == expected_positive)
        repetitions = np.unique(mix_eval).size < mix_eval.size
        assert not repetitions

        # Test logp
        # MvNormal logp is currently limited to 2d values
        expectation = pytest.raises(ValueError) if mix_eval.ndim > 2 else does_not_raise()
        with expectation:
            mix_logp_eval = logp(mix, mix_eval).eval()
            assert mix_logp_eval.shape == expected_shape[:-1]
            bcast_weights = np.broadcast_to(weights, (*expected_shape[:-1], 2))
            expected_logp = np.stack(
                (
                    logp(components[0], mix_eval).eval(),
                    logp(components[1], mix_eval).eval(),
                ),
                axis=-1,
            )[bcast_weights == 1]
            expected_logp = expected_logp.reshape(expected_shape[:-1])
            assert np.allclose(mix_logp_eval, expected_logp)
Example #14
0
    def logp(
        value: at.Variable,
        mu: at.Variable,
        sigma: at.Variable,
        init: at.Variable,
        steps: at.Variable,
    ) -> at.TensorVariable:
        """Calculate log-probability of Gaussian Random Walk distribution at specified value."""

        # Calculate initialization logp
        init_logp = logp(init, value[..., 0])

        # Make time series stationary around the mean value
        stationary_series = value[..., 1:] - value[..., :-1]
        # Add one dimension to the right, so that mu and sigma broadcast safely along
        # the steps dimension
        series_logp = logp(Normal.dist(mu[..., None], sigma[..., None]), stationary_series)

        return check_parameters(
            init_logp + series_logp.sum(axis=-1),
            steps > 0,
            msg="steps > 0",
        )
Example #15
0
def test_logp_helper_derived_rv():
    assert np.isclose(
        logp(at.exp(Normal.dist()), 5).eval(),
        logp(LogNormal.dist(), 5).eval(),
    )