예제 #1
0
    def test_density_scaling_with_genarator(self):
        # We have different size generators

        def true_dens():
            g = gen1()
            for i, point in enumerate(g):
                yield stats.norm.logpdf(point).sum() * 10

        t = true_dens()
        # We have same size models
        with pm.Model() as model1:
            Normal("n", observed=gen1(), total_size=100)
            p1 = aesara.function([], model1.logpt)

        with pm.Model() as model2:
            gen_var = generator(gen2())
            Normal("n", observed=gen_var, total_size=100)
            p2 = aesara.function([], model2.logpt)

        for i in range(10):
            _1, _2, _t = p1(), p2(), next(t)
            decimals = select_by_precision(float64=7, float32=2)
            np.testing.assert_almost_equal(
                _1, _t, decimal=decimals)  # Value O(-50,000)
            np.testing.assert_almost_equal(_1, _2)
예제 #2
0
def test_logpt_subtensor():
    """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables."""

    size = 5

    mu_base = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size)
    mu = np.stack([mu_base, -mu_base])
    sigma = 0.001
    rng = aesara.shared(np.random.RandomState(232), borrow=True)

    A_rv = Normal.dist(mu, sigma, rng=rng)
    A_rv.name = "A"

    p = 0.5

    I_rv = Bernoulli.dist(p, size=size, rng=rng)
    I_rv.name = "I"

    A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1]:]]

    assert isinstance(A_idx.owner.op,
                      (Subtensor, AdvancedSubtensor, AdvancedSubtensor1))

    A_idx_value_var = A_idx.type()
    A_idx_value_var.name = "A_idx_value"

    I_value_var = I_rv.type()
    I_value_var.name = "I_value"

    A_idx_logp = logpt(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var})

    logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)

    # The compiled graph should not contain any `RandomVariables`
    assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])

    decimals = select_by_precision(float64=6, float32=4)

    for i in range(10):
        bern_sp = sp.bernoulli(p)
        I_value = bern_sp.rvs(size=size).astype(I_rv.dtype)

        norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1]:]], sigma)
        A_idx_value = norm_sp.rvs().astype(A_idx.dtype)

        exp_obs_logps = norm_sp.logpdf(A_idx_value)
        exp_obs_logps += bern_sp.logpmf(I_value)

        logp_vals = logp_vals_fn(A_idx_value, I_value)

        np.testing.assert_almost_equal(logp_vals,
                                       exp_obs_logps,
                                       decimal=decimals)
def test_GARCH11():
    # test data ~ N(0, 1)
    data = np.array(
        [
            -1.35078362,
            -0.81254164,
            0.28918551,
            -2.87043544,
            -0.94353337,
            0.83660719,
            -0.23336562,
            -0.58586298,
            -1.36856736,
            -1.60832975,
            -1.31403141,
            0.05446936,
            -0.97213128,
            -0.18928725,
            1.62011258,
            -0.95978616,
            -2.06536047,
            0.6556103,
            -0.27816645,
            -1.26413397,
        ]
    )
    omega = 0.6
    alpha_1 = 0.4
    beta_1 = 0.5
    initial_vol = np.float64(0.9)
    vol = np.empty_like(data)
    vol[0] = initial_vol
    for i in range(len(data) - 1):
        vol[i + 1] = np.sqrt(omega + beta_1 * vol[i] ** 2 + alpha_1 * data[i] ** 2)

    with Model() as t:
        y = GARCH11(
            "y",
            omega=omega,
            alpha_1=alpha_1,
            beta_1=beta_1,
            initial_vol=initial_vol,
            shape=data.shape,
        )
        z = Normal("z", mu=0, sigma=vol, shape=data.shape)
    garch_like = t["y"].logp({"z": data, "y": data})
    reg_like = t["z"].logp({"z": data, "y": data})
    decimal = select_by_precision(float64=7, float32=4)
    np.testing.assert_allclose(garch_like, reg_like, 10 ** (-decimal))
예제 #4
0
def test_matrix_multiplication():
    # Check matrix multiplication works between RVs, transformed RVs,
    # Deterministics, and numpy arrays
    with pm.Model() as linear_model:
        matrix = pm.Normal("matrix", shape=(2, 2))
        transformed = pm.Gamma("transformed", alpha=2, beta=1, shape=2)
        rv_rv = pm.Deterministic("rv_rv", matrix @ transformed)
        np_rv = pm.Deterministic("np_rv", np.ones((2, 2)) @ transformed)
        rv_np = pm.Deterministic("rv_np", matrix @ np.ones(2))
        rv_det = pm.Deterministic("rv_det", matrix @ rv_rv)
        det_rv = pm.Deterministic("det_rv", rv_rv @ transformed)

        posterior = pm.sample(10,
                              tune=0,
                              compute_convergence_checks=False,
                              progressbar=False)
        decimal = select_by_precision(7, 5)
        for point in posterior.points():
            npt.assert_almost_equal(
                point["matrix"] @ point["transformed"],
                point["rv_rv"],
                decimal=decimal,
            )
            npt.assert_almost_equal(
                np.ones((2, 2)) @ point["transformed"],
                point["np_rv"],
                decimal=decimal,
            )
            npt.assert_almost_equal(
                point["matrix"] @ np.ones(2),
                point["rv_np"],
                decimal=decimal,
            )
            npt.assert_almost_equal(
                point["matrix"] @ point["rv_rv"],
                point["rv_det"],
                decimal=decimal,
            )
            npt.assert_almost_equal(
                point["rv_rv"] @ point["transformed"],
                point["det_rv"],
                decimal=decimal,
            )
예제 #5
0
    def test_density_scaling_with_genarator(self):
        # We have different size generators

        def true_dens():
            g = gen1()
            for i, point in enumerate(g):
                yield stats.norm.logpdf(point).sum() * 10
        t = true_dens()
        # We have same size models
        with pm.Model() as model1:
            Normal('n', observed=gen1(), total_size=100)
            p1 = theano.function([], model1.logpt)

        with pm.Model() as model2:
            gen_var = generator(gen2())
            Normal('n', observed=gen_var, total_size=100)
            p2 = theano.function([], model2.logpt)

        for i in range(10):
            _1, _2, _t = p1(), p2(), next(t)
            decimals = select_by_precision(float64=7, float32=2)
            np.testing.assert_almost_equal(_1, _t, decimal=decimals)  # Value O(-50,000)
            np.testing.assert_almost_equal(_1, _2)
예제 #6
0
def test_accuracy_non_normal():
    _, model, (mu, _) = non_normal(4)
    with model:
        newstart = find_MAP(Point(x=[0.5, 0.01, 0.95, 0.99]))
        close_to(newstart["x"], mu,
                 select_by_precision(float64=1e-5, float32=1e-4))
예제 #7
0
def test_accuracy_normal():
    _, model, (mu, _) = simple_model()
    with model:
        newstart = find_MAP(Point(x=[-10.5, 100.5]))
        close_to(newstart["x"], [mu, mu],
                 select_by_precision(float64=1e-5, float32=1e-4))