Beispiel #1
0
    def test_order1_logp(self):
        data = np.array([0.3, 1, 2, 3, 4])
        phi = np.array([0.99])
        with Model() as t:
            y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
            z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
        ar_like = t.compile_logp(y)({"y": data})
        reg_like = t.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)

        with Model() as t_constant:
            y = AR(
                "y",
                np.hstack((0.3, phi)),
                sigma=1,
                init_dist=Flat.dist(),
                shape=len(data),
                constant=True,
            )
            z = Normal("z",
                       mu=0.3 + phi * data[:-1],
                       sigma=1,
                       shape=len(data) - 1)
        ar_like = t_constant.compile_logp(y)({"y": data})
        reg_like = t_constant.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)
Beispiel #2
0
    def test_batched_rhos(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = np.random.randn(batch_size, ar_order)
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            AR("y", beta, sigma=1.0, shape=(batch_size, steps), initval=y_tp)
        with Model() as t1:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            for i in range(batch_size):
                AR(f"y_{i}", beta[i], sigma=1.0, shape=steps, initval=y_tp[i])

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp[1] = 0  # Should always be close to zero
        y_eval = t0["y"].eval({t0["beta"]: beta_tp})
        assert y_eval.shape == (batch_size, steps)
        assert np.all(abs(y_eval[1]) < 5)
Beispiel #3
0
def test_logp_helper_exceptions():
    with pytest.raises(TypeError, match="When RV is not a pure distribution"):
        logp(at.exp(Normal.dist()), [1, 2])

    with pytest.raises(NotImplementedError,
                       match="PyMC could not infer logp of input variable"):
        logp(at.cos(Normal.dist()), 1)
Beispiel #4
0
def test_missing_logp():
    with Model() as m:
        theta1 = Normal("theta1", 0, 5, observed=[0, 1, 2, 3, 4])
        theta2 = Normal("theta2", mu=theta1, observed=[0, 1, 2, 3, 4])
    m_logp = m.logp()

    with Model() as m_missing:
        theta1 = Normal("theta1", 0, 5, observed=np.array([0, 1, np.nan, 3, np.nan]))
        theta2 = Normal("theta2", mu=theta1, observed=np.array([np.nan, np.nan, 2, np.nan, 4]))
    m_missing_logp = m_missing.logp({"theta1_missing": [2, 4], "theta2_missing": [0, 1, 3]})

    assert m_logp == m_missing_logp
    def test_batched_sigma(self):
        ar_order, steps, batch_size = 4, 100, (7, 5)
        # AR order cannot be inferred from beta_tp because it is not fixed.
        # We specify it manually below
        beta_tp = aesara.shared(np.random.randn(ar_order))
        sigma_tp = np.abs(np.random.randn(*batch_size))
        y_tp = np.random.randn(*batch_size, steps)
        with Model() as t0:
            sigma = HalfNormal("sigma",
                               1.0,
                               shape=batch_size,
                               initval=sigma_tp)
            AR(
                "y",
                beta_tp,
                sigma=sigma,
                init_dist=Normal.dist(0, sigma[..., None]),
                size=batch_size,
                steps=steps,
                initval=y_tp,
                ar_order=ar_order,
            )
        with Model() as t1:
            sigma = HalfNormal("beta", 1.0, shape=batch_size, initval=sigma_tp)
            for i in range(batch_size[0]):
                for j in range(batch_size[1]):
                    AR(
                        f"y_{i}{j}",
                        beta_tp,
                        sigma=sigma[i][j],
                        init_dist=Normal.dist(0, sigma[i][j]),
                        shape=steps,
                        initval=y_tp[i, j],
                        ar_order=ar_order,
                    )

        # Check logp shape
        sigma_logp, y_logp = t0.compile_logp(sum=False)(t0.initial_point())
        assert tuple(y_logp.shape) == batch_size

        np.testing.assert_allclose(
            sigma_logp.sum() + y_logp.sum(),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp.set_value(np.zeros(
            (ar_order, )))  # Should always be close to zero
        sigma_tp = np.full(batch_size, [0.01, 0.1, 1, 10, 100])
        y_eval = t0["y"].eval({t0["sigma"]: sigma_tp})
        assert y_eval.shape == (*batch_size, steps + ar_order)
        assert np.allclose(y_eval.std(axis=(0, 2)), [0.01, 0.1, 1, 10, 100],
                           rtol=0.1)
Beispiel #6
0
def test_missing(data):

    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            _ = Normal("y", x, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.initial_point
    assert not np.isnan(model.logp(test_point))

    with model:
        prior_trace = sample_prior_predictive(return_inferencedata=False)
    assert {"x", "y"} <= set(prior_trace.keys())
Beispiel #7
0
    def test_batched_init_dist(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = aesara.shared(np.random.randn(ar_order), shape=(3, ))
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            init_dist = Normal.dist(0.0, 0.01, size=(batch_size, ar_order))
            AR("y",
               beta_tp,
               sigma=0.01,
               init_dist=init_dist,
               steps=steps,
               initval=y_tp)
        with Model() as t1:
            for i in range(batch_size):
                AR(f"y_{i}", beta_tp, sigma=0.01, shape=steps, initval=y_tp[i])

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        # Next values should keep close to previous ones
        beta_tp.set_value(np.full((ar_order, ), 1 / ar_order))
        # Init dist is cloned when creating the AR, so the original variable is not
        # part of the AR graph. We retrieve the one actually used manually
        init_dist = t0["y"].owner.inputs[2]
        init_dist_tp = np.full((batch_size, ar_order),
                               (np.arange(batch_size) * 100)[:, None])
        y_eval = t0["y"].eval({init_dist: init_dist_tp})
        assert y_eval.shape == (batch_size, steps + ar_order)
        assert np.allclose(y_eval[:, -10:].mean(-1),
                           np.arange(batch_size) * 100,
                           rtol=0.1,
                           atol=0.5)
Beispiel #8
0
def ar_logp(op, values, rhos, sigma, init_dist, steps, noise_rng, **kwargs):
    (value,) = values

    ar_order = op.ar_order
    constant_term = op.constant_term

    # Convolve rhos with values
    if constant_term:
        expectation = at.add(
            rhos[..., 0, None],
            *(
                rhos[..., i + 1, None] * value[..., ar_order - (i + 1) : -(i + 1)]
                for i in range(ar_order)
            ),
        )
    else:
        expectation = at.add(
            *(
                rhos[..., i, None] * value[..., ar_order - (i + 1) : -(i + 1)]
                for i in range(ar_order)
            )
        )
    # Compute and collapse logp across time dimension
    innov_logp = at.sum(
        logp(Normal.dist(0, sigma[..., None]), value[..., ar_order:] - expectation), axis=-1
    )
    init_logp = logp(init_dist, value[..., :ar_order])
    if init_dist.owner.op.ndim_supp == 0:
        init_logp = at.sum(init_logp, axis=-1)
    return init_logp + innov_logp
Beispiel #9
0
def test_logpt_basic():
    """Make sure we can compute a log-likelihood for a hierarchical model with transforms."""

    with Model() as m:
        a = Uniform("a", 0.0, 1.0)
        c = Normal("c")
        b_l = c * a + 2.0
        b = Uniform("b", b_l, b_l + 1.0)

    a_value_var = m.rvs_to_values[a]
    assert a_value_var.tag.transform

    b_value_var = m.rvs_to_values[b]
    assert b_value_var.tag.transform

    c_value_var = m.rvs_to_values[c]

    b_logp = logpt(b, b_value_var, sum=False)

    res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))
    res_rv_ancestors = [
        v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable)
    ]

    # There shouldn't be any `RandomVariable`s in the resulting graph
    assert len(res_rv_ancestors) == 0
    assert b_value_var in res_ancestors
    assert c_value_var in res_ancestors
    assert a_value_var in res_ancestors
Beispiel #10
0
    def logp(self, value):
        """
        Calculate log-probability of AR distribution at specified value.

        Parameters
        ----------
        value: numeric
            Value for which log-probability is calculated.

        Returns
        -------
        TensorVariable
        """
        if self.constant:
            x = at.add(*(self.rho[i + 1] * value[self.p - (i + 1):-(i + 1)]
                         for i in range(self.p)))
            eps = value[self.p:] - self.rho[0] - x
        else:
            if self.p == 1:
                x = self.rho * value[:-1]
            else:
                x = at.add(*(self.rho[i] * value[self.p - (i + 1):-(i + 1)]
                             for i in range(self.p)))
            eps = value[self.p:] - x

        innov_like = Normal.dist(mu=0.0, tau=self.tau).logp(eps)
        init_like = self.init.logp(value[:self.p])

        return at.sum(innov_like) + at.sum(init_like)
Beispiel #11
0
def test_logpt_incsubtensor(indices, size):
    """Make sure we can compute a log-likelihood for ``Y[idx] = data`` where ``Y`` is univariate."""

    mu = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size)
    data = mu[indices]
    sigma = 0.001
    rng = np.random.RandomState(232)
    a_val = rng.normal(mu, sigma, size=size).astype(aesara.config.floatX)

    rng = aesara.shared(rng, borrow=False)
    a = Normal.dist(mu, sigma, size=size, rng=rng)
    a_value_var = a.type()
    a.name = "a"

    a_idx = at.set_subtensor(a[indices], data)

    assert isinstance(a_idx.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1))

    a_idx_value_var = a_idx.type()
    a_idx_value_var.name = "a_idx_value"

    a_idx_logp = logpt(a_idx, {a_idx: a_value_var}, sum=False)

    logp_vals = a_idx_logp.eval({a_value_var: a_val})

    # The indices that were set should all have the same log-likelihood values,
    # because the values they were set to correspond to the unique means along
    # that dimension.  This helps us confirm that the log-likelihood is
    # associating the assigned values with their correct parameters.
    a_val_idx = a_val.copy()
    a_val_idx[indices] = data
    exp_obs_logps = sp.norm.logpdf(a_val_idx, mu, sigma)
    np.testing.assert_almost_equal(logp_vals, exp_obs_logps)
Beispiel #12
0
def test_missing_dual_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        beta1 = Normal("beta1", 1, 1)
        beta2 = Normal("beta2", 2, 1)
        latent = Normal("theta", size=5)
        with pytest.warns(ImputationWarning):
            ovar1 = Normal("o1", mu=beta1 * latent, observed=obs1)
        with pytest.warns(ImputationWarning):
            ovar2 = Normal("o2", mu=beta2 * latent, observed=obs2)

        prior_trace = sample_prior_predictive(return_inferencedata=False)
        assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys())
        # TODO: Assert something
        trace = sample(chains=1, draws=50)
Beispiel #13
0
def test_missing_with_predictors():
    predictors = array([0.5, 1, 0.5, 2, 0.3])
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            y = Normal("y", x * predictors, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.initial_point
    assert not np.isnan(model.logp(test_point))

    with model:
        prior_trace = sample_prior_predictive(return_inferencedata=False)
    assert {"x", "y"} <= set(prior_trace.keys())
Beispiel #14
0
def test_AR_nd():
    # AR2 multidimensional
    p, T, n = 3, 100, 5
    beta_tp = np.random.randn(p, n)
    y_tp = np.random.randn(T, n)
    with Model() as t0:
        beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
        AR("y", beta, sigma=1.0, shape=(T, n), initval=y_tp)

    with Model() as t1:
        beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
        for i in range(n):
            AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])

    np.testing.assert_allclose(t0.logp(t0.recompute_initial_point()),
                               t1.logp(t1.recompute_initial_point()))
Beispiel #15
0
    def dist(
        cls, mu=0.0, sigma=1.0, *, init=None, steps=None, size=None, **kwargs
    ) -> at.TensorVariable:

        mu = at.as_tensor_variable(floatX(mu))
        sigma = at.as_tensor_variable(floatX(sigma))

        steps = get_steps(
            steps=steps,
            shape=kwargs.get("shape", None),
            step_shape_offset=1,
        )
        if steps is None:
            raise ValueError("Must specify steps or shape parameter")
        steps = at.as_tensor_variable(intX(steps))

        # If no scalar distribution is passed then initialize with a Normal of same mu and sigma
        if init is None:
            init = Normal.dist(0, 1)
        else:
            if not (
                isinstance(init, at.TensorVariable)
                and init.owner is not None
                and isinstance(init.owner.op, RandomVariable)
                and init.owner.op.ndim_supp == 0
            ):
                raise TypeError("init must be a univariate distribution variable")
            check_dist_not_registered(init)

        # Ignores logprob of init var because that's accounted for in the logp method
        init = ignore_logprob(init)

        return super().dist([mu, sigma, init, steps], size=size, **kwargs)
Beispiel #16
0
def test_ignore_logprob_model():
    # logp that does not depend on input
    def logp(value, x):
        return value

    with Model() as m:
        x = Normal.dist()
        y = DensityDist("y", x, logp=logp)
    # Aeppl raises a KeyError when it finds an unexpected RV
    with pytest.raises(KeyError):
        joint_logp([y], {y: y.type()})

    with Model() as m:
        x = ignore_logprob(Normal.dist())
        y = DensityDist("y", x, logp=logp)
    assert joint_logp([y], {y: y.type()})
Beispiel #17
0
def test_unexpected_rvs():
    with Model() as model:
        x = Normal("x")
        y = DensityDist("y", logp=lambda *args: x)

    with pytest.raises(ValueError,
                       match="^Random variables detected in the logp graph"):
        model.logp()
Beispiel #18
0
 def step(*args):
     *prev_xs, reversed_rhos, sigma, rng = args
     if constant_term:
         mu = reversed_rhos[-1] + at.sum(prev_xs * reversed_rhos[:-1], axis=0)
     else:
         mu = at.sum(prev_xs * reversed_rhos, axis=0)
     next_rng, new_x = Normal.dist(mu=mu, sigma=sigma, rng=rng).owner.outputs
     return new_x, {rng: next_rng}
Beispiel #19
0
def test_joint_logp_subtensor():
    """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables."""

    size = 5

    mu_base = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size)
    mu = np.stack([mu_base, -mu_base])
    sigma = 0.001
    rng = aesara.shared(np.random.RandomState(232), borrow=True)

    A_rv = Normal.dist(mu, sigma, rng=rng)
    A_rv.name = "A"

    p = 0.5

    I_rv = Bernoulli.dist(p, size=size, rng=rng)
    I_rv.name = "I"

    A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1]:]]

    assert isinstance(A_idx.owner.op,
                      (Subtensor, AdvancedSubtensor, AdvancedSubtensor1))

    A_idx_value_var = A_idx.type()
    A_idx_value_var.name = "A_idx_value"

    I_value_var = I_rv.type()
    I_value_var.name = "I_value"

    A_idx_logps = joint_logp(A_idx, {
        A_idx: A_idx_value_var,
        I_rv: I_value_var
    },
                             sum=False)
    A_idx_logp = at.add(*A_idx_logps)

    logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)

    # The compiled graph should not contain any `RandomVariables`
    assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])

    decimals = select_by_precision(float64=6, float32=4)

    for i in range(10):
        bern_sp = sp.bernoulli(p)
        I_value = bern_sp.rvs(size=size).astype(I_rv.dtype)

        norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1]:]], sigma)
        A_idx_value = norm_sp.rvs().astype(A_idx.dtype)

        exp_obs_logps = norm_sp.logpdf(A_idx_value)
        exp_obs_logps += bern_sp.logpmf(I_value)

        logp_vals = logp_vals_fn(A_idx_value, I_value)

        np.testing.assert_almost_equal(logp_vals,
                                       exp_obs_logps,
                                       decimal=decimals)
Beispiel #20
0
def test_logcdf_helper():
    value = at.vector("value")
    x = Normal.dist(0, 1, size=2)

    x_logp = logcdf(x, value, sum=False)
    np.testing.assert_almost_equal(x_logp.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1]))

    x_logp = logcdf(x, [0, 1], sum=False)
    np.testing.assert_almost_equal(x_logp.eval(), sp.norm(0, 1).logcdf([0, 1]))
Beispiel #21
0
    def __init__(self, w, mu, sigma=None, tau=None, sd=None, comp_shape=(), *args, **kwargs):
        if sd is not None:
            sigma = sd
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)

        self.mu = mu = at.as_tensor_variable(mu)
        self.sigma = self.sd = sigma = at.as_tensor_variable(sigma)

        super().__init__(w, Normal.dist(mu, sigma=sigma, shape=comp_shape), *args, **kwargs)
Beispiel #22
0
def test_logcdf_helper():
    value = at.vector("value")
    x = Normal.dist(0, 1)

    x_logcdf = logcdf(x, value)
    np.testing.assert_almost_equal(x_logcdf.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1]))

    x_logcdf = logcdf(x, [0, 1])
    np.testing.assert_almost_equal(x_logcdf.eval(), sp.norm(0, 1).logcdf([0, 1]))
Beispiel #23
0
    def dist(
        cls,
        rho,
        sigma=None,
        tau=None,
        *,
        init_dist=None,
        steps=None,
        constant=False,
        ar_order=None,
        **kwargs,
    ):
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)
        sigma = at.as_tensor_variable(floatX(sigma))
        rhos = at.atleast_1d(at.as_tensor_variable(floatX(rho)))

        if "init" in kwargs:
            warnings.warn(
                "init parameter is now called init_dist. Using init will raise an error in a future release.",
                FutureWarning,
            )
            init_dist = kwargs.pop("init")

        ar_order = cls._get_ar_order(rhos=rhos, constant=constant, ar_order=ar_order)
        steps = get_steps(steps=steps, shape=kwargs.get("shape", None), step_shape_offset=ar_order)
        if steps is None:
            raise ValueError("Must specify steps or shape parameter")
        steps = at.as_tensor_variable(intX(steps), ndim=0)

        if init_dist is not None:
            if not isinstance(init_dist, TensorVariable) or not isinstance(
                init_dist.owner.op, RandomVariable
            ):
                raise ValueError(
                    f"Init dist must be a distribution created via the `.dist()` API, "
                    f"got {type(init_dist)}"
                )
                check_dist_not_registered(init_dist)
            if init_dist.owner.op.ndim_supp > 1:
                raise ValueError(
                    "Init distribution must have a scalar or vector support dimension, ",
                    f"got ndim_supp={init_dist.owner.op.ndim_supp}.",
                )
        else:
            warnings.warn(
                "Initial distribution not specified, defaulting to "
                "`Normal.dist(0, 100, shape=...)`. You can specify an init_dist "
                "manually to suppress this warning.",
                UserWarning,
            )
            init_dist = Normal.dist(0, 100, shape=(*sigma.shape, ar_order))

        # Tell Aeppl to ignore init_dist, as it will be accounted for in the logp term
        init_dist = ignore_logprob(init_dist)

        return super().dist([rhos, sigma, init_dist, steps, ar_order, constant], **kwargs)
Beispiel #24
0
def test_AR():
    # AR1
    data = np.array([0.3, 1, 2, 3, 4])
    phi = np.array([0.99])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR1 and AR(1)
    with Model() as t:
        rho = Normal("rho", 0.0, 1.0)
        y1 = AR1("y1", rho, 1.0, observed=data)
        y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
    initial_point = t.recompute_initial_point()
    np.testing.assert_allclose(y1.logp(initial_point), y2.logp(initial_point))

    # AR1 + constant
    with Model() as t:
        y = AR("y",
               np.hstack((0.3, phi)),
               sigma=1,
               shape=len(data),
               constant=True)
        z = Normal("z", mu=0.3 + phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR2
    phi = np.array([0.84, 0.10])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z",
                   mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                   sigma=1,
                   shape=len(data) - 2)
    ar_like = t["y"].logp({"z": data[2:], "y": data})
    reg_like = t["z"].logp({"z": data[2:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)
Beispiel #25
0
def test_interval_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)

        rng = aesara.shared(np.random.RandomState(2323), borrow=True)

        with pytest.warns(ImputationWarning):
            theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng)
        with pytest.warns(ImputationWarning):
            theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng)

        assert "theta1_observed_interval__" in model.named_vars
        assert "theta1_missing_interval__" in model.named_vars
        assert isinstance(
            model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, interval
        )

        prior_trace = sample_prior_predictive(return_inferencedata=False)

        # Make sure the observed + missing combined deterministics have the
        # same shape as the original observations vectors
        assert prior_trace["theta1"].shape[-1] == obs1.shape[0]
        assert prior_trace["theta2"].shape[-1] == obs2.shape[0]

        # Make sure that the observed values are newly generated samples
        assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0)
        assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0)

        # Make sure the missing parts of the combined deterministic matches the
        # sampled missing and observed variable values
        assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0
        assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0

        assert {"theta1", "theta2"} <= set(prior_trace.keys())

        trace = sample(
            chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False
        )

        assert np.all(0 < trace["theta1_missing"].mean(0))
        assert np.all(0 < trace["theta2_missing"].mean(0))
        assert "theta1" not in trace.varnames
        assert "theta2" not in trace.varnames

        # Make sure that the observed values are newly generated samples and that
        # the observed and deterministic matche
        pp_trace = sample_posterior_predictive(trace, return_inferencedata=False)
        assert np.all(np.var(pp_trace["theta1"], 0) > 0.0)
        assert np.all(np.var(pp_trace["theta2"], 0) > 0.0)
        assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0
        assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0
Beispiel #26
0
def test_model_unchanged_logprob_access():
    # Issue #5007
    with Model() as model:
        a = Normal("a")
        c = Uniform("c", lower=a - 1, upper=1)

    original_inputs = set(aesara.graph.graph_inputs([c]))
    # Extract model.logpt
    model.logpt
    new_inputs = set(aesara.graph.graph_inputs([c]))
    assert original_inputs == new_inputs
Beispiel #27
0
 def test_constant_random(self):
     x = AR.dist(
         rho=[100, 0, 0],
         sigma=0.1,
         init_dist=Normal.dist(-100.0, sigma=0.1),
         constant=True,
         shape=(6, ),
     )
     x_eval = x.eval()
     assert np.allclose(x_eval[:2], -100, rtol=0.1)
     assert np.allclose(x_eval[2:], 100, rtol=0.1)
Beispiel #28
0
    def __new__(cls,
                name,
                w,
                mu,
                sigma=None,
                tau=None,
                comp_shape=(),
                **kwargs):
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)

        return Mixture(name, w, Normal.dist(mu, sigma=sigma, size=comp_shape),
                       **kwargs)
Beispiel #29
0
 def test_order2_logp(self):
     data = np.array([0.3, 1, 2, 3, 4])
     phi = np.array([0.84, 0.10])
     with Model() as t:
         y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
         z = Normal("z",
                    mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                    sigma=1,
                    shape=len(data) - 2)
     ar_like = t.compile_logp(y)({"y": data})
     reg_like = t.compile_logp(z)({"z": data[2:]})
     np.testing.assert_allclose(ar_like, reg_like)
Beispiel #30
0
def test_logcdf_transformed_argument():
    with Model() as m:
        sigma = HalfFlat("sigma")
        x = Normal("x", 0, sigma)
        Potential("norm_term", -logcdf(x, 1.0))

    sigma_value_log = -1.0
    sigma_value = np.exp(sigma_value_log)
    x_value = 0.5

    observed = m.logp_nojac({"sigma_log__": sigma_value_log, "x": x_value})
    expected = logp(TruncatedNormal.dist(0, sigma_value, lower=None, upper=1.0), x_value).eval()
    assert np.isclose(observed, expected)