Ejemplo n.º 1
0
    def test_order1_logp(self):
        data = np.array([0.3, 1, 2, 3, 4])
        phi = np.array([0.99])
        with Model() as t:
            y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
            z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
        ar_like = t.compile_logp(y)({"y": data})
        reg_like = t.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)

        with Model() as t_constant:
            y = AR(
                "y",
                np.hstack((0.3, phi)),
                sigma=1,
                init_dist=Flat.dist(),
                shape=len(data),
                constant=True,
            )
            z = Normal("z",
                       mu=0.3 + phi * data[:-1],
                       sigma=1,
                       shape=len(data) - 1)
        ar_like = t_constant.compile_logp(y)({"y": data})
        reg_like = t_constant.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)
Ejemplo n.º 2
0
    def test_batched_rhos(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = np.random.randn(batch_size, ar_order)
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            AR("y", beta, sigma=1.0, shape=(batch_size, steps), initval=y_tp)
        with Model() as t1:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            for i in range(batch_size):
                AR(f"y_{i}", beta[i], sigma=1.0, shape=steps, initval=y_tp[i])

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp[1] = 0  # Should always be close to zero
        y_eval = t0["y"].eval({t0["beta"]: beta_tp})
        assert y_eval.shape == (batch_size, steps)
        assert np.all(abs(y_eval[1]) < 5)
Ejemplo n.º 3
0
def test_missing_logp():
    with Model() as m:
        theta1 = Normal("theta1", 0, 5, observed=[0, 1, 2, 3, 4])
        theta2 = Normal("theta2", mu=theta1, observed=[0, 1, 2, 3, 4])
    m_logp = m.logp()

    with Model() as m_missing:
        theta1 = Normal("theta1", 0, 5, observed=np.array([0, 1, np.nan, 3, np.nan]))
        theta2 = Normal("theta2", mu=theta1, observed=np.array([np.nan, np.nan, 2, np.nan, 4]))
    m_missing_logp = m_missing.logp({"theta1_missing": [2, 4], "theta2_missing": [0, 1, 3]})

    assert m_logp == m_missing_logp
Ejemplo n.º 4
0
def test_missing(data):

    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            _ = Normal("y", x, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.initial_point
    assert not np.isnan(model.logp(test_point))

    with model:
        prior_trace = sample_prior_predictive(return_inferencedata=False)
    assert {"x", "y"} <= set(prior_trace.keys())
Ejemplo n.º 5
0
def test_logpt_basic():
    """Make sure we can compute a log-likelihood for a hierarchical model with transforms."""

    with Model() as m:
        a = Uniform("a", 0.0, 1.0)
        c = Normal("c")
        b_l = c * a + 2.0
        b = Uniform("b", b_l, b_l + 1.0)

    a_value_var = m.rvs_to_values[a]
    assert a_value_var.tag.transform

    b_value_var = m.rvs_to_values[b]
    assert b_value_var.tag.transform

    c_value_var = m.rvs_to_values[c]

    b_logp = logpt(b, b_value_var, sum=False)

    res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))
    res_rv_ancestors = [
        v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable)
    ]

    # There shouldn't be any `RandomVariable`s in the resulting graph
    assert len(res_rv_ancestors) == 0
    assert b_value_var in res_ancestors
    assert c_value_var in res_ancestors
    assert a_value_var in res_ancestors
Ejemplo n.º 6
0
def test_AR_nd():
    # AR2 multidimensional
    p, T, n = 3, 100, 5
    beta_tp = np.random.randn(p, n)
    y_tp = np.random.randn(T, n)
    with Model() as t0:
        beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
        AR("y", beta, sigma=1.0, shape=(T, n), initval=y_tp)

    with Model() as t1:
        beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
        for i in range(n):
            AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])

    np.testing.assert_allclose(t0.logp(t0.recompute_initial_point()),
                               t1.logp(t1.recompute_initial_point()))
Ejemplo n.º 7
0
def test_missing_dual_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        beta1 = Normal("beta1", 1, 1)
        beta2 = Normal("beta2", 2, 1)
        latent = Normal("theta", size=5)
        with pytest.warns(ImputationWarning):
            ovar1 = Normal("o1", mu=beta1 * latent, observed=obs1)
        with pytest.warns(ImputationWarning):
            ovar2 = Normal("o2", mu=beta2 * latent, observed=obs2)

        prior_trace = sample_prior_predictive(return_inferencedata=False)
        assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys())
        # TODO: Assert something
        trace = sample(chains=1, draws=50)
Ejemplo n.º 8
0
def test_missing_with_predictors():
    predictors = array([0.5, 1, 0.5, 2, 0.3])
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            y = Normal("y", x * predictors, 1, observed=data)

    assert "y_missing" in model.named_vars

    test_point = model.initial_point
    assert not np.isnan(model.logp(test_point))

    with model:
        prior_trace = sample_prior_predictive(return_inferencedata=False)
    assert {"x", "y"} <= set(prior_trace.keys())
Ejemplo n.º 9
0
def test_unexpected_rvs():
    with Model() as model:
        x = Normal("x")
        y = DensityDist("y", logp=lambda *args: x)

    with pytest.raises(ValueError,
                       match="^Random variables detected in the logp graph"):
        model.logp()
Ejemplo n.º 10
0
def test_AR():
    # AR1
    data = np.array([0.3, 1, 2, 3, 4])
    phi = np.array([0.99])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR1 and AR(1)
    with Model() as t:
        rho = Normal("rho", 0.0, 1.0)
        y1 = AR1("y1", rho, 1.0, observed=data)
        y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
    initial_point = t.recompute_initial_point()
    np.testing.assert_allclose(y1.logp(initial_point), y2.logp(initial_point))

    # AR1 + constant
    with Model() as t:
        y = AR("y",
               np.hstack((0.3, phi)),
               sigma=1,
               shape=len(data),
               constant=True)
        z = Normal("z", mu=0.3 + phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR2
    phi = np.array([0.84, 0.10])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z",
                   mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                   sigma=1,
                   shape=len(data) - 2)
    ar_like = t["y"].logp({"z": data[2:], "y": data})
    reg_like = t["z"].logp({"z": data[2:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)
Ejemplo n.º 11
0
def test_model_unchanged_logprob_access():
    # Issue #5007
    with Model() as model:
        a = Normal("a")
        c = Uniform("c", lower=a - 1, upper=1)

    original_inputs = set(aesara.graph.graph_inputs([c]))
    # Extract model.logpt
    model.logpt
    new_inputs = set(aesara.graph.graph_inputs([c]))
    assert original_inputs == new_inputs
Ejemplo n.º 12
0
def test_interval_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)

        rng = aesara.shared(np.random.RandomState(2323), borrow=True)

        with pytest.warns(ImputationWarning):
            theta1 = Uniform("theta1", 0, 5, observed=obs1, rng=rng)
        with pytest.warns(ImputationWarning):
            theta2 = Normal("theta2", mu=theta1, observed=obs2, rng=rng)

        assert "theta1_observed_interval__" in model.named_vars
        assert "theta1_missing_interval__" in model.named_vars
        assert isinstance(
            model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, interval
        )

        prior_trace = sample_prior_predictive(return_inferencedata=False)

        # Make sure the observed + missing combined deterministics have the
        # same shape as the original observations vectors
        assert prior_trace["theta1"].shape[-1] == obs1.shape[0]
        assert prior_trace["theta2"].shape[-1] == obs2.shape[0]

        # Make sure that the observed values are newly generated samples
        assert np.all(np.var(prior_trace["theta1_observed"], 0) > 0.0)
        assert np.all(np.var(prior_trace["theta2_observed"], 0) > 0.0)

        # Make sure the missing parts of the combined deterministic matches the
        # sampled missing and observed variable values
        assert np.mean(prior_trace["theta1"][:, obs1.mask] - prior_trace["theta1_missing"]) == 0.0
        assert np.mean(prior_trace["theta1"][:, ~obs1.mask] - prior_trace["theta1_observed"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, obs2.mask] - prior_trace["theta2_missing"]) == 0.0
        assert np.mean(prior_trace["theta2"][:, ~obs2.mask] - prior_trace["theta2_observed"]) == 0.0

        assert {"theta1", "theta2"} <= set(prior_trace.keys())

        trace = sample(
            chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False
        )

        assert np.all(0 < trace["theta1_missing"].mean(0))
        assert np.all(0 < trace["theta2_missing"].mean(0))
        assert "theta1" not in trace.varnames
        assert "theta2" not in trace.varnames

        # Make sure that the observed values are newly generated samples and that
        # the observed and deterministic matche
        pp_trace = sample_posterior_predictive(trace, return_inferencedata=False)
        assert np.all(np.var(pp_trace["theta1"], 0) > 0.0)
        assert np.all(np.var(pp_trace["theta2"], 0) > 0.0)
        assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0
        assert np.mean(pp_trace["theta2"][:, ~obs2.mask] - pp_trace["theta2_observed"]) == 0.0
Ejemplo n.º 13
0
 def test_order2_logp(self):
     data = np.array([0.3, 1, 2, 3, 4])
     phi = np.array([0.84, 0.10])
     with Model() as t:
         y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
         z = Normal("z",
                    mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                    sigma=1,
                    shape=len(data) - 2)
     ar_like = t.compile_logp(y)({"y": data})
     reg_like = t.compile_logp(z)({"z": data[2:]})
     np.testing.assert_allclose(ar_like, reg_like)
Ejemplo n.º 14
0
def test_logcdf_transformed_argument():
    with Model() as m:
        sigma = HalfFlat("sigma")
        x = Normal("x", 0, sigma)
        Potential("norm_term", -logcdf(x, 1.0))

    sigma_value_log = -1.0
    sigma_value = np.exp(sigma_value_log)
    x_value = 0.5

    observed = m.logp_nojac({"sigma_log__": sigma_value_log, "x": x_value})
    expected = logp(TruncatedNormal.dist(0, sigma_value, lower=None, upper=1.0), x_value).eval()
    assert np.isclose(observed, expected)
Ejemplo n.º 15
0
def test_GARCH11():
    # test data ~ N(0, 1)
    data = np.array([
        -1.35078362,
        -0.81254164,
        0.28918551,
        -2.87043544,
        -0.94353337,
        0.83660719,
        -0.23336562,
        -0.58586298,
        -1.36856736,
        -1.60832975,
        -1.31403141,
        0.05446936,
        -0.97213128,
        -0.18928725,
        1.62011258,
        -0.95978616,
        -2.06536047,
        0.6556103,
        -0.27816645,
        -1.26413397,
    ])
    omega = 0.6
    alpha_1 = 0.4
    beta_1 = 0.5
    initial_vol = np.float64(0.9)
    vol = np.empty_like(data)
    vol[0] = initial_vol
    for i in range(len(data) - 1):
        vol[i + 1] = np.sqrt(omega + beta_1 * vol[i]**2 + alpha_1 * data[i]**2)

    with Model() as t:
        y = GARCH11(
            "y",
            omega=omega,
            alpha_1=alpha_1,
            beta_1=beta_1,
            initial_vol=initial_vol,
            shape=data.shape,
        )
        z = Normal("z", mu=0, sigma=vol, shape=data.shape)
    garch_like = t["y"].logp({"z": data, "y": data})
    reg_like = t["z"].logp({"z": data, "y": data})
    decimal = select_by_precision(float64=7, float32=4)
    np.testing.assert_allclose(garch_like, reg_like, 10**(-decimal))
Ejemplo n.º 16
0
def test_linear():
    lam = -0.78
    sig2 = 5e-3
    N = 300
    dt = 1e-1
    sde = lambda x, lam: (lam * x, sig2)
    x = floatX(_gen_sde_path(sde, (lam, ), dt, N, 5.0))
    z = x + np.random.randn(x.size) * sig2
    # build model
    with Model() as model:
        lamh = Flat("lamh")
        xh = EulerMaruyama("xh", dt, sde, (lamh, ), shape=N + 1, initval=x)
        Normal("zh", mu=xh, sigma=sig2, observed=z)
    # invert
    with model:
        trace = sample(init="advi+adapt_diag", chains=1)

    ppc = sample_posterior_predictive(trace, model=model)

    p95 = [2.5, 97.5]
    lo, hi = np.percentile(trace[lamh], p95, axis=0)
    assert (lo < lam) and (lam < hi)
    lo, hi = np.percentile(ppc["zh"], p95, axis=0)
    assert ((lo < z) * (z < hi)).mean() > 0.95