Ejemplo n.º 1
0
def test_logp_helper_exceptions():
    with pytest.raises(TypeError, match="When RV is not a pure distribution"):
        logp(at.exp(Normal.dist()), [1, 2])

    with pytest.raises(NotImplementedError,
                       match="PyMC could not infer logp of input variable"):
        logp(at.cos(Normal.dist()), 1)
    def test_batched_sigma(self):
        ar_order, steps, batch_size = 4, 100, (7, 5)
        # AR order cannot be inferred from beta_tp because it is not fixed.
        # We specify it manually below
        beta_tp = aesara.shared(np.random.randn(ar_order))
        sigma_tp = np.abs(np.random.randn(*batch_size))
        y_tp = np.random.randn(*batch_size, steps)
        with Model() as t0:
            sigma = HalfNormal("sigma",
                               1.0,
                               shape=batch_size,
                               initval=sigma_tp)
            AR(
                "y",
                beta_tp,
                sigma=sigma,
                init_dist=Normal.dist(0, sigma[..., None]),
                size=batch_size,
                steps=steps,
                initval=y_tp,
                ar_order=ar_order,
            )
        with Model() as t1:
            sigma = HalfNormal("beta", 1.0, shape=batch_size, initval=sigma_tp)
            for i in range(batch_size[0]):
                for j in range(batch_size[1]):
                    AR(
                        f"y_{i}{j}",
                        beta_tp,
                        sigma=sigma[i][j],
                        init_dist=Normal.dist(0, sigma[i][j]),
                        shape=steps,
                        initval=y_tp[i, j],
                        ar_order=ar_order,
                    )

        # Check logp shape
        sigma_logp, y_logp = t0.compile_logp(sum=False)(t0.initial_point())
        assert tuple(y_logp.shape) == batch_size

        np.testing.assert_allclose(
            sigma_logp.sum() + y_logp.sum(),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp.set_value(np.zeros(
            (ar_order, )))  # Should always be close to zero
        sigma_tp = np.full(batch_size, [0.01, 0.1, 1, 10, 100])
        y_eval = t0["y"].eval({t0["sigma"]: sigma_tp})
        assert y_eval.shape == (*batch_size, steps + ar_order)
        assert np.allclose(y_eval.std(axis=(0, 2)), [0.01, 0.1, 1, 10, 100],
                           rtol=0.1)
Ejemplo n.º 3
0
def test_ignore_logprob_model():
    # logp that does not depend on input
    def logp(value, x):
        return value

    with Model() as m:
        x = Normal.dist()
        y = DensityDist("y", x, logp=logp)
    # Aeppl raises a KeyError when it finds an unexpected RV
    with pytest.raises(KeyError):
        joint_logp([y], {y: y.type()})

    with Model() as m:
        x = ignore_logprob(Normal.dist())
        y = DensityDist("y", x, logp=logp)
    assert joint_logp([y], {y: y.type()})
Ejemplo n.º 4
0
    def logp(self, value):
        """
        Calculate log-probability of AR distribution at specified value.

        Parameters
        ----------
        value: numeric
            Value for which log-probability is calculated.

        Returns
        -------
        TensorVariable
        """
        if self.constant:
            x = at.add(*(self.rho[i + 1] * value[self.p - (i + 1):-(i + 1)]
                         for i in range(self.p)))
            eps = value[self.p:] - self.rho[0] - x
        else:
            if self.p == 1:
                x = self.rho * value[:-1]
            else:
                x = at.add(*(self.rho[i] * value[self.p - (i + 1):-(i + 1)]
                             for i in range(self.p)))
            eps = value[self.p:] - x

        innov_like = Normal.dist(mu=0.0, tau=self.tau).logp(eps)
        init_like = self.init.logp(value[:self.p])

        return at.sum(innov_like) + at.sum(init_like)
Ejemplo n.º 5
0
    def test_batched_init_dist(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = aesara.shared(np.random.randn(ar_order), shape=(3, ))
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            init_dist = Normal.dist(0.0, 0.01, size=(batch_size, ar_order))
            AR("y",
               beta_tp,
               sigma=0.01,
               init_dist=init_dist,
               steps=steps,
               initval=y_tp)
        with Model() as t1:
            for i in range(batch_size):
                AR(f"y_{i}", beta_tp, sigma=0.01, shape=steps, initval=y_tp[i])

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        # Next values should keep close to previous ones
        beta_tp.set_value(np.full((ar_order, ), 1 / ar_order))
        # Init dist is cloned when creating the AR, so the original variable is not
        # part of the AR graph. We retrieve the one actually used manually
        init_dist = t0["y"].owner.inputs[2]
        init_dist_tp = np.full((batch_size, ar_order),
                               (np.arange(batch_size) * 100)[:, None])
        y_eval = t0["y"].eval({init_dist: init_dist_tp})
        assert y_eval.shape == (batch_size, steps + ar_order)
        assert np.allclose(y_eval[:, -10:].mean(-1),
                           np.arange(batch_size) * 100,
                           rtol=0.1,
                           atol=0.5)
Ejemplo n.º 6
0
def test_logpt_incsubtensor(indices, size):
    """Make sure we can compute a log-likelihood for ``Y[idx] = data`` where ``Y`` is univariate."""

    mu = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size)
    data = mu[indices]
    sigma = 0.001
    rng = np.random.RandomState(232)
    a_val = rng.normal(mu, sigma, size=size).astype(aesara.config.floatX)

    rng = aesara.shared(rng, borrow=False)
    a = Normal.dist(mu, sigma, size=size, rng=rng)
    a_value_var = a.type()
    a.name = "a"

    a_idx = at.set_subtensor(a[indices], data)

    assert isinstance(a_idx.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1))

    a_idx_value_var = a_idx.type()
    a_idx_value_var.name = "a_idx_value"

    a_idx_logp = logpt(a_idx, {a_idx: a_value_var}, sum=False)

    logp_vals = a_idx_logp.eval({a_value_var: a_val})

    # The indices that were set should all have the same log-likelihood values,
    # because the values they were set to correspond to the unique means along
    # that dimension.  This helps us confirm that the log-likelihood is
    # associating the assigned values with their correct parameters.
    a_val_idx = a_val.copy()
    a_val_idx[indices] = data
    exp_obs_logps = sp.norm.logpdf(a_val_idx, mu, sigma)
    np.testing.assert_almost_equal(logp_vals, exp_obs_logps)
Ejemplo n.º 7
0
    def dist(
        cls, mu=0.0, sigma=1.0, *, init=None, steps=None, size=None, **kwargs
    ) -> at.TensorVariable:

        mu = at.as_tensor_variable(floatX(mu))
        sigma = at.as_tensor_variable(floatX(sigma))

        steps = get_steps(
            steps=steps,
            shape=kwargs.get("shape", None),
            step_shape_offset=1,
        )
        if steps is None:
            raise ValueError("Must specify steps or shape parameter")
        steps = at.as_tensor_variable(intX(steps))

        # If no scalar distribution is passed then initialize with a Normal of same mu and sigma
        if init is None:
            init = Normal.dist(0, 1)
        else:
            if not (
                isinstance(init, at.TensorVariable)
                and init.owner is not None
                and isinstance(init.owner.op, RandomVariable)
                and init.owner.op.ndim_supp == 0
            ):
                raise TypeError("init must be a univariate distribution variable")
            check_dist_not_registered(init)

        # Ignores logprob of init var because that's accounted for in the logp method
        init = ignore_logprob(init)

        return super().dist([mu, sigma, init, steps], size=size, **kwargs)
Ejemplo n.º 8
0
def ar_logp(op, values, rhos, sigma, init_dist, steps, noise_rng, **kwargs):
    (value,) = values

    ar_order = op.ar_order
    constant_term = op.constant_term

    # Convolve rhos with values
    if constant_term:
        expectation = at.add(
            rhos[..., 0, None],
            *(
                rhos[..., i + 1, None] * value[..., ar_order - (i + 1) : -(i + 1)]
                for i in range(ar_order)
            ),
        )
    else:
        expectation = at.add(
            *(
                rhos[..., i, None] * value[..., ar_order - (i + 1) : -(i + 1)]
                for i in range(ar_order)
            )
        )
    # Compute and collapse logp across time dimension
    innov_logp = at.sum(
        logp(Normal.dist(0, sigma[..., None]), value[..., ar_order:] - expectation), axis=-1
    )
    init_logp = logp(init_dist, value[..., :ar_order])
    if init_dist.owner.op.ndim_supp == 0:
        init_logp = at.sum(init_logp, axis=-1)
    return init_logp + innov_logp
Ejemplo n.º 9
0
 def step(*args):
     *prev_xs, reversed_rhos, sigma, rng = args
     if constant_term:
         mu = reversed_rhos[-1] + at.sum(prev_xs * reversed_rhos[:-1], axis=0)
     else:
         mu = at.sum(prev_xs * reversed_rhos, axis=0)
     next_rng, new_x = Normal.dist(mu=mu, sigma=sigma, rng=rng).owner.outputs
     return new_x, {rng: next_rng}
Ejemplo n.º 10
0
def test_joint_logp_subtensor():
    """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables."""

    size = 5

    mu_base = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size)
    mu = np.stack([mu_base, -mu_base])
    sigma = 0.001
    rng = aesara.shared(np.random.RandomState(232), borrow=True)

    A_rv = Normal.dist(mu, sigma, rng=rng)
    A_rv.name = "A"

    p = 0.5

    I_rv = Bernoulli.dist(p, size=size, rng=rng)
    I_rv.name = "I"

    A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1]:]]

    assert isinstance(A_idx.owner.op,
                      (Subtensor, AdvancedSubtensor, AdvancedSubtensor1))

    A_idx_value_var = A_idx.type()
    A_idx_value_var.name = "A_idx_value"

    I_value_var = I_rv.type()
    I_value_var.name = "I_value"

    A_idx_logps = joint_logp(A_idx, {
        A_idx: A_idx_value_var,
        I_rv: I_value_var
    },
                             sum=False)
    A_idx_logp = at.add(*A_idx_logps)

    logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)

    # The compiled graph should not contain any `RandomVariables`
    assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])

    decimals = select_by_precision(float64=6, float32=4)

    for i in range(10):
        bern_sp = sp.bernoulli(p)
        I_value = bern_sp.rvs(size=size).astype(I_rv.dtype)

        norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1]:]], sigma)
        A_idx_value = norm_sp.rvs().astype(A_idx.dtype)

        exp_obs_logps = norm_sp.logpdf(A_idx_value)
        exp_obs_logps += bern_sp.logpmf(I_value)

        logp_vals = logp_vals_fn(A_idx_value, I_value)

        np.testing.assert_almost_equal(logp_vals,
                                       exp_obs_logps,
                                       decimal=decimals)
Ejemplo n.º 11
0
def test_logcdf_helper():
    value = at.vector("value")
    x = Normal.dist(0, 1, size=2)

    x_logp = logcdf(x, value, sum=False)
    np.testing.assert_almost_equal(x_logp.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1]))

    x_logp = logcdf(x, [0, 1], sum=False)
    np.testing.assert_almost_equal(x_logp.eval(), sp.norm(0, 1).logcdf([0, 1]))
Ejemplo n.º 12
0
    def __init__(self, w, mu, sigma=None, tau=None, sd=None, comp_shape=(), *args, **kwargs):
        if sd is not None:
            sigma = sd
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)

        self.mu = mu = at.as_tensor_variable(mu)
        self.sigma = self.sd = sigma = at.as_tensor_variable(sigma)

        super().__init__(w, Normal.dist(mu, sigma=sigma, shape=comp_shape), *args, **kwargs)
Ejemplo n.º 13
0
def test_logcdf_helper():
    value = at.vector("value")
    x = Normal.dist(0, 1)

    x_logcdf = logcdf(x, value)
    np.testing.assert_almost_equal(x_logcdf.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1]))

    x_logcdf = logcdf(x, [0, 1])
    np.testing.assert_almost_equal(x_logcdf.eval(), sp.norm(0, 1).logcdf([0, 1]))
Ejemplo n.º 14
0
    def dist(
        cls,
        rho,
        sigma=None,
        tau=None,
        *,
        init_dist=None,
        steps=None,
        constant=False,
        ar_order=None,
        **kwargs,
    ):
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)
        sigma = at.as_tensor_variable(floatX(sigma))
        rhos = at.atleast_1d(at.as_tensor_variable(floatX(rho)))

        if "init" in kwargs:
            warnings.warn(
                "init parameter is now called init_dist. Using init will raise an error in a future release.",
                FutureWarning,
            )
            init_dist = kwargs.pop("init")

        ar_order = cls._get_ar_order(rhos=rhos, constant=constant, ar_order=ar_order)
        steps = get_steps(steps=steps, shape=kwargs.get("shape", None), step_shape_offset=ar_order)
        if steps is None:
            raise ValueError("Must specify steps or shape parameter")
        steps = at.as_tensor_variable(intX(steps), ndim=0)

        if init_dist is not None:
            if not isinstance(init_dist, TensorVariable) or not isinstance(
                init_dist.owner.op, RandomVariable
            ):
                raise ValueError(
                    f"Init dist must be a distribution created via the `.dist()` API, "
                    f"got {type(init_dist)}"
                )
                check_dist_not_registered(init_dist)
            if init_dist.owner.op.ndim_supp > 1:
                raise ValueError(
                    "Init distribution must have a scalar or vector support dimension, ",
                    f"got ndim_supp={init_dist.owner.op.ndim_supp}.",
                )
        else:
            warnings.warn(
                "Initial distribution not specified, defaulting to "
                "`Normal.dist(0, 100, shape=...)`. You can specify an init_dist "
                "manually to suppress this warning.",
                UserWarning,
            )
            init_dist = Normal.dist(0, 100, shape=(*sigma.shape, ar_order))

        # Tell Aeppl to ignore init_dist, as it will be accounted for in the logp term
        init_dist = ignore_logprob(init_dist)

        return super().dist([rhos, sigma, init_dist, steps, ar_order, constant], **kwargs)
Ejemplo n.º 15
0
    def test_batched_rhos(self):
        ar_order, steps, batch_size = 3, 100, 5
        beta_tp = np.random.randn(batch_size, ar_order)
        y_tp = np.random.randn(batch_size, steps)
        with Model() as t0:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            AR(
                "y",
                beta,
                sigma=1.0,
                init_dist=Normal.dist(0, 1),
                shape=(batch_size, steps),
                initval=y_tp,
            )
        with Model() as t1:
            beta = Normal("beta",
                          0.0,
                          1.0,
                          shape=(batch_size, ar_order),
                          initval=beta_tp)
            for i in range(batch_size):
                AR(
                    f"y_{i}",
                    beta[i],
                    init_dist=Normal.dist(0, 1),
                    sigma=1.0,
                    shape=steps,
                    initval=y_tp[i],
                )

        np.testing.assert_allclose(
            t0.compile_logp()(t0.initial_point()),
            t1.compile_logp()(t1.initial_point()),
        )

        beta_tp[1] = 0  # Should always be close to zero
        y_eval = t0["y"].eval({t0["beta"]: beta_tp})
        assert y_eval.shape == (batch_size, steps)
        assert np.all(abs(y_eval[1]) < 5)
Ejemplo n.º 16
0
 def test_constant_random(self):
     x = AR.dist(
         rho=[100, 0, 0],
         sigma=0.1,
         init_dist=Normal.dist(-100.0, sigma=0.1),
         constant=True,
         shape=(6, ),
     )
     x_eval = x.eval()
     assert np.allclose(x_eval[:2], -100, rtol=0.1)
     assert np.allclose(x_eval[2:], 100, rtol=0.1)
Ejemplo n.º 17
0
    def __new__(cls,
                name,
                w,
                mu,
                sigma=None,
                tau=None,
                comp_shape=(),
                **kwargs):
        _, sigma = get_tau_sigma(tau=tau, sigma=sigma)

        return Mixture(name, w, Normal.dist(mu, sigma=sigma, size=comp_shape),
                       **kwargs)
Ejemplo n.º 18
0
    def logp(self, x):
        """
        Calculate log-probability of AR1 distribution at specified value.

        Parameters
        ----------
        x: numeric
            Value for which log-probability is calculated.

        Returns
        -------
        TensorVariable
        """
        k = self.k
        tau_e = self.tau_e  # innovation precision
        tau = tau_e * (1 - k**2)  # ar1 precision

        x_im1 = x[:-1]
        x_i = x[1:]
        boundary = Normal.dist(0.0, tau=tau).logp

        innov_like = Normal.dist(k * x_im1, tau=tau_e).logp(x_i)
        return boundary(x[0]) + at.sum(innov_like)
Ejemplo n.º 19
0
def test_ignore_logprob_basic():
    x = Normal.dist()
    (measurable_x_out, ) = get_measurable_outputs(x.owner.op, x.owner)
    assert measurable_x_out is x.owner.outputs[1]

    new_x = ignore_logprob(x)
    assert new_x is not x
    assert isinstance(new_x.owner.op, Normal)
    assert type(new_x.owner.op).__name__ == "UnmeasurableNormalRV"
    # Confirm that it does not have measurable output
    assert get_measurable_outputs(new_x.owner.op, new_x.owner) is None

    # Test that it will not clone a variable that is already unmeasurable
    new_new_x = ignore_logprob(new_x)
    assert new_new_x is new_x
Ejemplo n.º 20
0
    def logp(self, x):
        """
        Calculate log-probability of GARCH(1, 1) distribution at specified value.

        Parameters
        ----------
        x: numeric
            Value for which log-probability is calculated.

        Returns
        -------
        TensorVariable
        """
        vol = self.get_volatility(x)
        return at.sum(Normal.dist(0.0, sigma=vol).logp(x))
Ejemplo n.º 21
0
    def logp(self, x):
        """
        Calculate log-probability of EulerMaruyama distribution at specified value.

        Parameters
        ----------
        x: numeric
            Value for which log-probability is calculated.

        Returns
        -------
        TensorVariable
        """
        xt = x[:-1]
        f, g = self.sde_fn(x[:-1], *self.sde_pars)
        mu = xt + self.dt * f
        sigma = at.sqrt(self.dt) * g
        return at.sum(Normal.dist(mu=mu, sigma=sigma).logp(x[1:]))
Ejemplo n.º 22
0
    def dist(cls, mu=0.0, sigma=1.0, *, init_dist=None, steps=None, **kwargs) -> at.TensorVariable:

        mu = at.as_tensor_variable(floatX(mu))
        sigma = at.as_tensor_variable(floatX(sigma))

        steps = get_steps(
            steps=steps,
            shape=kwargs.get("shape"),
            step_shape_offset=1,
        )
        if steps is None:
            raise ValueError("Must specify steps or shape parameter")
        steps = at.as_tensor_variable(intX(steps))

        if "init" in kwargs:
            warnings.warn(
                "init parameter is now called init_dist. Using init will raise an error in a future release.",
                FutureWarning,
            )
            init_dist = kwargs.pop("init")

        # If no scalar distribution is passed then initialize with a Normal of same mu and sigma
        if init_dist is None:
            warnings.warn(
                "Initial distribution not specified, defaulting to `Normal.dist(0, 100)`."
                "You can specify an init_dist manually to suppress this warning.",
                UserWarning,
            )
            init_dist = Normal.dist(0, 100)
        else:
            if not (
                isinstance(init_dist, at.TensorVariable)
                and init_dist.owner is not None
                and isinstance(init_dist.owner.op, RandomVariable)
                and init_dist.owner.op.ndim_supp == 0
            ):
                raise TypeError("init must be a univariate distribution variable")
            check_dist_not_registered(init_dist)

        # Ignores logprob of init var because that's accounted for in the logp method
        init_dist = ignore_logprob(init_dist)

        return super().dist([mu, sigma, init_dist, steps], **kwargs)
Ejemplo n.º 23
0
def test_AR():
    # AR1
    data = np.array([0.3, 1, 2, 3, 4])
    phi = np.array([0.99])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR1 and AR(1)
    with Model() as t:
        rho = Normal("rho", 0.0, 1.0)
        y1 = AR1("y1", rho, 1.0, observed=data)
        y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
    initial_point = t.recompute_initial_point()
    np.testing.assert_allclose(y1.logp(initial_point), y2.logp(initial_point))

    # AR1 + constant
    with Model() as t:
        y = AR("y",
               np.hstack((0.3, phi)),
               sigma=1,
               shape=len(data),
               constant=True)
        z = Normal("z", mu=0.3 + phi * data[:-1], sigma=1, shape=len(data) - 1)
    ar_like = t["y"].logp({"z": data[1:], "y": data})
    reg_like = t["z"].logp({"z": data[1:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)

    # AR2
    phi = np.array([0.84, 0.10])
    with Model() as t:
        y = AR("y", phi, sigma=1, shape=len(data))
        z = Normal("z",
                   mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                   sigma=1,
                   shape=len(data) - 2)
    ar_like = t["y"].logp({"z": data[2:], "y": data})
    reg_like = t["z"].logp({"z": data[2:], "y": data})
    np.testing.assert_allclose(ar_like, reg_like)
Ejemplo n.º 24
0
    def logp(self, x):
        """
        Calculate log-probability of Gaussian Random Walk distribution at specified value.

        Parameters
        ----------
        x: numeric
            Value for which log-probability is calculated.

        Returns
        -------
        TensorVariable
        """
        if x.ndim > 0:
            x_im1 = x[:-1]
            x_i = x[1:]
            mu, sigma = self._mu_and_sigma(self.mu, self.sigma)
            innov_like = Normal.dist(mu=x_im1 + mu, sigma=sigma).logp(x_i)
            return self.init.logp(x[0]) + at.sum(innov_like)
        return self.init.logp(x)
Ejemplo n.º 25
0
    def logp(
        value: at.Variable,
        mu: at.Variable,
        sigma: at.Variable,
        init: at.Variable,
        steps: at.Variable,
    ) -> at.TensorVariable:
        """Calculate log-probability of Gaussian Random Walk distribution at specified value."""

        # Calculate initialization logp
        init_logp = logp(init, value[..., 0])

        # Make time series stationary around the mean value
        stationary_series = value[..., 1:] - value[..., :-1]
        # Add one dimension to the right, so that mu and sigma broadcast safely along
        # the steps dimension
        series_logp = logp(Normal.dist(mu[..., None], sigma[..., None]), stationary_series)

        return check_parameters(
            init_logp + series_logp.sum(axis=-1),
            steps > 0,
            msg="steps > 0",
        )
Ejemplo n.º 26
0
 def test_init_deprecated_arg(self):
     with pytest.warns(FutureWarning,
                       match="init parameter is now called init_dist"):
         pm.GaussianRandomWalk.dist(init=Normal.dist(), shape=(10, ))
Ejemplo n.º 27
0
 def test_init_deprecated_arg(self):
     with pytest.warns(FutureWarning,
                       match="init parameter is now called init_dist"):
         pm.AR.dist(rho=[1, 2, 3], init=Normal.dist(), shape=(10, ))
Ejemplo n.º 28
0
class TestGaussianRandomWalk:
    class TestGaussianRandomWalkRandom(BaseTestDistributionRandom):
        # Override default size for test class
        size = None

        pymc_dist = pm.GaussianRandomWalk
        pymc_dist_params = {
            "mu": 1.0,
            "sigma": 2,
            "init": pm.Constant.dist(0),
            "steps": 4
        }
        expected_rv_op_params = {
            "mu": 1.0,
            "sigma": 2,
            "init": pm.Constant.dist(0),
            "steps": 4
        }

        checks_to_run = [
            "check_pymc_params_match_rv_op",
            "check_rv_inferred_size",
        ]

        def check_rv_inferred_size(self):
            steps = self.pymc_dist_params["steps"]
            sizes_to_check = [None, (), 1, (1, )]
            sizes_expected = [(steps + 1, ), (steps + 1, ), (1, steps + 1),
                              (1, steps + 1)]

            for size, expected in zip(sizes_to_check, sizes_expected):
                pymc_rv = self.pymc_dist.dist(**self.pymc_dist_params,
                                              size=size)
                expected_symbolic = tuple(pymc_rv.shape.eval())
                assert expected_symbolic == expected

        def test_steps_scalar_check(self):
            with pytest.raises(ValueError,
                               match="steps must be an integer scalar"):
                self.pymc_dist.dist(steps=[1])

    def test_gaussianrandomwalk_inference(self):
        mu, sigma, steps = 2, 1, 1000
        obs = np.concatenate([[0],
                              np.random.normal(mu, sigma,
                                               size=steps)]).cumsum()

        with pm.Model():
            _mu = pm.Uniform("mu", -10, 10)
            _sigma = pm.Uniform("sigma", 0, 10)

            obs_data = pm.MutableData("obs_data", obs)
            grw = GaussianRandomWalk("grw",
                                     _mu,
                                     _sigma,
                                     steps=steps,
                                     observed=obs_data)

            trace = pm.sample(chains=1)

        recovered_mu = trace.posterior["mu"].mean()
        recovered_sigma = trace.posterior["sigma"].mean()
        np.testing.assert_allclose([mu, sigma],
                                   [recovered_mu, recovered_sigma],
                                   atol=0.2)

    @pytest.mark.parametrize("init", [None, pm.Normal.dist()])
    def test_gaussian_random_walk_init_dist_shape(self, init):
        """Test that init_dist is properly resized"""
        grw = pm.GaussianRandomWalk.dist(mu=0, sigma=1, steps=1, init=init)
        assert tuple(grw.owner.inputs[-2].shape.eval()) == ()

        grw = pm.GaussianRandomWalk.dist(mu=0,
                                         sigma=1,
                                         steps=1,
                                         init=init,
                                         size=(5, ))
        assert tuple(grw.owner.inputs[-2].shape.eval()) == (5, )

        grw = pm.GaussianRandomWalk.dist(mu=0,
                                         sigma=1,
                                         steps=1,
                                         init=init,
                                         shape=2)
        assert tuple(grw.owner.inputs[-2].shape.eval()) == ()

        grw = pm.GaussianRandomWalk.dist(mu=0,
                                         sigma=1,
                                         steps=1,
                                         init=init,
                                         shape=(5, 2))
        assert tuple(grw.owner.inputs[-2].shape.eval()) == (5, )

        grw = pm.GaussianRandomWalk.dist(mu=[0, 0],
                                         sigma=1,
                                         steps=1,
                                         init=init)
        assert tuple(grw.owner.inputs[-2].shape.eval()) == (2, )

        grw = pm.GaussianRandomWalk.dist(mu=0,
                                         sigma=[1, 1],
                                         steps=1,
                                         init=init)
        assert tuple(grw.owner.inputs[-2].shape.eval()) == (2, )

        grw = pm.GaussianRandomWalk.dist(mu=np.zeros((3, 1)),
                                         sigma=[1, 1],
                                         steps=1,
                                         init=init)
        assert tuple(grw.owner.inputs[-2].shape.eval()) == (3, 2)

    def test_shape_ellipsis(self):
        grw = pm.GaussianRandomWalk.dist(mu=0,
                                         sigma=1,
                                         steps=5,
                                         init=pm.Normal.dist(),
                                         shape=(3, ...))
        assert tuple(grw.shape.eval()) == (3, 6)
        assert tuple(grw.owner.inputs[-2].shape.eval()) == (3, )

    def test_gaussianrandomwalk_broadcasted_by_init_dist(self):
        grw = pm.GaussianRandomWalk.dist(mu=0,
                                         sigma=1,
                                         steps=4,
                                         init=pm.Normal.dist(size=(2, 3)))
        assert tuple(grw.shape.eval()) == (2, 3, 5)
        assert grw.eval().shape == (2, 3, 5)

    @pytest.mark.parametrize("shape", ((6, ), (3, 6)))
    def test_inferred_steps_from_shape(self, shape):
        x = GaussianRandomWalk.dist(shape=shape)
        steps = x.owner.inputs[-1]
        assert steps.eval() == 5

    @pytest.mark.parametrize("shape", (None, (5, ...)))
    def test_missing_steps(self, shape):
        with pytest.raises(ValueError,
                           match="Must specify steps or shape parameter"):
            GaussianRandomWalk.dist(shape=shape)

    def test_inconsistent_steps_and_shape(self):
        with pytest.raises(AssertionError,
                           match="Steps do not match last shape dimension"):
            x = GaussianRandomWalk.dist(steps=12, shape=45)

    def test_inferred_steps_from_dims(self):
        with pm.Model(coords={"batch": range(5), "steps": range(20)}):
            x = GaussianRandomWalk("x", dims=("batch", "steps"))
        steps = x.owner.inputs[-1]
        assert steps.eval() == 19

    def test_inferred_steps_from_observed(self):
        with pm.Model():
            x = GaussianRandomWalk("x", observed=np.zeros(10))
        steps = x.owner.inputs[-1]
        assert steps.eval() == 9

    @pytest.mark.parametrize(
        "init",
        [
            pm.HalfNormal.dist(sigma=2),
            pm.StudentT.dist(nu=4, mu=1, sigma=0.5),
        ],
    )
    def test_gaussian_random_walk_init_dist_logp(self, init):
        grw = pm.GaussianRandomWalk.dist(init=init, steps=1)
        assert np.isclose(
            pm.logp(grw, [0, 0]).eval(),
            pm.logp(init, 0).eval() + scipy.stats.norm.logpdf(0),
        )

    @pytest.mark.parametrize(
        "mu, sigma, init, steps, size, expected",
        [
            (0, 1, Normal.dist(1), 10, None, np.ones((11, ))),
            (1, 1, Normal.dist(0), 10, (2, ), np.full((2, 11), np.arange(11))),
            (1, 1, Normal.dist(
                [0, 1]), 10, None, np.vstack(
                    (np.arange(11), np.arange(11) + 1))),
            (0, [1, 1], Normal.dist(0), 10, None, np.zeros((2, 11))),
            (
                [1, -1],
                1,
                Normal.dist(0),
                10,
                (4, 2),
                np.full((4, 2, 11), np.vstack(
                    (np.arange(11), -np.arange(11)))),
            ),
        ],
    )
    def test_moment(self, mu, sigma, init, steps, size, expected):
        with Model() as model:
            GaussianRandomWalk("x",
                               mu=mu,
                               sigma=sigma,
                               init=init,
                               steps=steps,
                               size=size)
        assert_moment_is_expected(model, expected)
Ejemplo n.º 29
0
def test_logp_helper_derived_rv():
    assert np.isclose(
        logp(at.exp(Normal.dist()), 5).eval(),
        logp(LogNormal.dist(), 5).eval(),
    )