Beispiel #1
0
    def test_order1_logp(self):
        data = np.array([0.3, 1, 2, 3, 4])
        phi = np.array([0.99])
        with Model() as t:
            y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
            z = Normal("z", mu=phi * data[:-1], sigma=1, shape=len(data) - 1)
        ar_like = t.compile_logp(y)({"y": data})
        reg_like = t.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)

        with Model() as t_constant:
            y = AR(
                "y",
                np.hstack((0.3, phi)),
                sigma=1,
                init_dist=Flat.dist(),
                shape=len(data),
                constant=True,
            )
            z = Normal("z",
                       mu=0.3 + phi * data[:-1],
                       sigma=1,
                       shape=len(data) - 1)
        ar_like = t_constant.compile_logp(y)({"y": data})
        reg_like = t_constant.compile_logp(z)({"z": data[1:]})
        np.testing.assert_allclose(ar_like, reg_like)
Beispiel #2
0
    def __init__(self, rho, sigma=None, tau=None, constant=False, init=None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
        self.sigma = at.as_tensor_variable(sigma)
        self.tau = at.as_tensor_variable(tau)

        self.mean = at.as_tensor_variable(0.0)

        if isinstance(rho, list):
            p = len(rho)
        else:
            try:
                shape_ = rho.shape.tag.test_value
            except AttributeError:
                shape_ = rho.shape

            if hasattr(shape_, "size") and shape_.size == 0:
                p = 1
            else:
                p = shape_[0]

        if constant:
            self.p = p - 1
        else:
            self.p = p

        self.constant = constant
        self.rho = rho = at.as_tensor_variable(rho)
        self.init = init or Flat.dist()
Beispiel #3
0
    def __init__(
        self, mu=0.0, cov=None, tau=None, chol=None, lower=True, init=None, *args, **kwargs
    ):
        super().__init__(*args, **kwargs)

        self.init = init or Flat.dist()
        self.innovArgs = (mu, cov, tau, chol, lower)
        self.innov = multivariate.MvNormal.dist(*self.innovArgs, shape=self.shape)
        self.mean = at.as_tensor_variable(0.0)
Beispiel #4
0
 def test_order2_logp(self):
     data = np.array([0.3, 1, 2, 3, 4])
     phi = np.array([0.84, 0.10])
     with Model() as t:
         y = AR("y", phi, sigma=1, init_dist=Flat.dist(), shape=len(data))
         z = Normal("z",
                    mu=phi[0] * data[1:-1] + phi[1] * data[:-2],
                    sigma=1,
                    shape=len(data) - 2)
     ar_like = t.compile_logp(y)({"y": data})
     reg_like = t.compile_logp(z)({"z": data[2:]})
     np.testing.assert_allclose(ar_like, reg_like)
Beispiel #5
0
 def __init__(self, tau=None, init=None, sigma=None, mu=0.0, *args, **kwargs):
     kwargs.setdefault("shape", 1)
     super().__init__(*args, **kwargs)
     if sum(self.shape) == 0:
         raise TypeError("GaussianRandomWalk must be supplied a non-zero shape argument!")
     tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
     self.tau = at.as_tensor_variable(tau)
     sigma = at.as_tensor_variable(sigma)
     self.sigma = sigma
     self.mu = at.as_tensor_variable(mu)
     self.init = init or Flat.dist()
     self.mean = at.as_tensor_variable(0.0)
Beispiel #6
0
def test_linear():
    lam = -0.78
    sig2 = 5e-3
    N = 300
    dt = 1e-1
    sde = lambda x, lam: (lam * x, sig2)
    x = floatX(_gen_sde_path(sde, (lam, ), dt, N, 5.0))
    z = x + np.random.randn(x.size) * sig2
    # build model
    with Model() as model:
        lamh = Flat("lamh")
        xh = EulerMaruyama("xh", dt, sde, (lamh, ), shape=N + 1, initval=x)
        Normal("zh", mu=xh, sigma=sig2, observed=z)
    # invert
    with model:
        trace = sample(init="advi+adapt_diag", chains=1)

    ppc = sample_posterior_predictive(trace, model=model)

    p95 = [2.5, 97.5]
    lo, hi = np.percentile(trace[lamh], p95, axis=0)
    assert (lo < lam) and (lam < hi)
    lo, hi = np.percentile(ppc["zh"], p95, axis=0)
    assert ((lo < z) * (z < hi)).mean() > 0.95