Example #1
0
def test_missing():
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal('x', 1, 1)
        with pytest.warns(ImputationWarning):
            Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2,)

    model.logp(model.test_point)

    with model:
        prior_trace = sample_prior_predictive()
    assert {'x', 'y'} <= set(prior_trace.keys())
Example #2
0
def test_missing_dual_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        beta1 = Normal('beta1', 1, 1)
        beta2 = Normal('beta2', 2, 1)
        latent = Normal('theta', shape=5)
        with pytest.warns(ImputationWarning):
            ovar1 = Normal('o1', mu=beta1 * latent, observed=obs1)
        with pytest.warns(ImputationWarning):
            ovar2 = Normal('o2', mu=beta2 * latent, observed=obs2)

        prior_trace = sample_prior_predictive()
        assert {'beta1', 'beta2', 'theta', 'o1', 'o2'} <= set(prior_trace.keys())
        sample()
Example #3
0
def test_missing_pandas():
    data = pd.DataFrame([1, 2, numpy.nan, 4, numpy.nan])
    with Model() as model:
        x = Normal('x', 1, 1)
        with pytest.warns(ImputationWarning):
            Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2,)

    model.logp(model.test_point)

    with model:
        prior_trace = sample_prior_predictive()
    assert {'x', 'y'} <= set(prior_trace.keys())
Example #4
0
def get_garch_model():
    r = np.array([28, 8, -3, 7, -1, 1, 18, 12])
    sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18])
    alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18])
    shape = r.shape

    with Model() as garch:
        alpha1 = Normal('alpha1', mu=0., sd=1., shape=shape)
        BoundedNormal = Bound(Normal, upper=(1 - alpha1))
        beta1 = BoundedNormal('beta1', mu=0., sd=1e6, shape=shape)
        mu = Normal('mu', mu=0., sd=1e6, shape=shape)
        theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
                        beta1 * tt.pow(sigma1, 2))
        Normal('obs', mu, sd=theta, observed=r)
    return garch
Example #5
0
def test_minibatch():
    draws = 3000
    mu0 = 1
    sd0 = 1
    
    def f(x, a, b, c):
        return a*x**2 + b*x + c
    
    a, b, c = 1, 2, 3

    batch_size = 50
    total_size = batch_size*500
    x_train = np.random.uniform(-10, 10, size=(total_size,)).astype('float32')
    x_obs = pm.data.Minibatch(x_train, batch_size=batch_size)

    y_train = f(x_train, a, b, c) + np.random.normal(size=x_train.shape).astype('float32')
    y_obs = pm.data.Minibatch(y_train, batch_size=batch_size)

    with Model():
        abc = Normal('abc', mu=mu0, sd=sd0, shape=(3,))
        x = x_obs
        x2 = x**2
        o = tt.ones_like(x)
        X = tt.stack([x2, x, o]).T
        y = X.dot(abc)
        pm.Normal('y', mu=y, observed=y_obs)

        step_method = pm.SGFS(batch_size=batch_size, step_size=1., total_size=total_size)
        trace = pm.sample(draws=draws, step=step_method, init=None, cores=2)

    np.testing.assert_allclose(np.mean(trace['abc'], axis=0), np.asarray([a, b, c]), rtol=0.1)
Example #6
0
    def logp(self, observed):
        """Calculated the log likelihood of the observed streamflow given
        simulated streamflow from GR4J"""

        simulated = simulate_streamflow(self.precipitation,
                                        self.evaporation,
                                        self.S0,
                                        self.Pr0,
                                        self.R0,
                                        self.x1,
                                        self.x2,
                                        self.x3,
                                        self.x4,
                                        self.x4_limit,
                                        truncate_gradient=self.truncate,
                                        tv_x1=self.tv_x1)

        # This restricts likelihood calculations to fewer than len(observed)
        # points. This can potentially make for more rapid calculations.
        if self.subsample_index is not None:
            observed = observed[self.subsample_index]
            simulated = simulated[self.subsample_index]

        density = Normal.dist(mu=simulated, sd=self.sd)
        return tt.sum(density.logp(observed))
Example #7
0
def multidimensional_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        x = Normal('x', mu, tau, shape=(3,2), testval=.1*np.ones((3,2)) )

    return model.test_point, model, (mu, tau ** -1)
Example #8
0
def multidimensional_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, shape=(3, 2), testval=0.1 * tt.ones((3, 2)))

    return model.test_point, model, (mu, tau**-0.5)
Example #9
0
    def test_normal_mixture(self):
        with Model() as model:
            w = Dirichlet('w', np.ones_like(self.norm_w))

            mu = Normal('mu', 0., 10., shape=self.norm_w.size)
            tau = Gamma('tau', 1., 1., shape=self.norm_w.size)

            x_obs = NormalMixture('x_obs',
                                  w,
                                  mu,
                                  tau=tau,
                                  observed=self.norm_x)

            step = Metropolis()
            trace = sample(5000,
                           step,
                           random_seed=self.random_seed,
                           progressbar=False)

        assert_allclose(np.sort(trace['w'].mean(axis=0)),
                        np.sort(self.norm_w),
                        rtol=0.1,
                        atol=0.1)
        assert_allclose(np.sort(trace['mu'].mean(axis=0)),
                        np.sort(self.norm_mu),
                        rtol=0.1,
                        atol=0.1)
Example #10
0
def simple_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        x = Normal('x', mu, tau, shape=2, testval=[.1]*2)

    return model.test_point, model, (mu, tau ** -1)
Example #11
0
def simple_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1]))

    return model.initial_point, model, (mu, tau**-0.5)
Example #12
0
    def test_linear_component(self):
        vars_to_create = {
            "sigma", "sigma_interval__", "y_obs", "lm_x0", "lm_Intercept"
        }
        with Model() as model:
            lm = LinearComponent(self.data_linear["x"],
                                 self.data_linear["y"],
                                 name="lm")  # yields lm_x0, lm_Intercept
            sigma = Uniform("sigma", 0, 20)  # yields sigma_interval__
            Normal("y_obs", mu=lm.y_est, sigma=sigma,
                   observed=self.y_linear)  # yields y_obs
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(500,
                           tune=0,
                           step=step,
                           start=start,
                           progressbar=False,
                           random_seed=self.random_seed)

            assert round(abs(np.mean(trace["lm_Intercept"]) - self.intercept),
                         1) == 0
            assert round(abs(np.mean(trace["lm_x0"]) - self.slope), 1) == 0
            assert round(abs(np.mean(trace["sigma"]) - self.sd), 1) == 0
        assert vars_to_create == set(model.named_vars.keys())
Example #13
0
def Update_After_Win(Obs_Sent, X, N, Freq, trace):

    # Win Data = change in freqency distribution
    Obs_idx = np.argmin(abs(X - Obs_Sent))
    Freq[Obs_idx] = Freq[Obs_idx] + 1

    #Normalisation
    Freq * N / (N + 1)

    model = Model()
    with model:
        # Priors are posteriors from previous iteration
        alpha = from_posterior('alpha', trace['alpha'])
        beta = from_posterior('beta', trace['beta'])
        gamma = from_posterior('gamma', trace['gamma'])
        delta = from_posterior('delta', trace['delta'])
        #epsilon = from_posterior('epsilon', trace['epsilon'])

        Arg_A = np.linalg.norm(X - alpha)
        Arg_B = np.linalg.norm(X - beta)

        # Expected value of Frequency
        mu = gamma * np.exp(-2 * Arg_A**2) + delta * np.exp(-2 * Arg_B**2)

        # Likelihood (sampling distribution) of observations
        Y_obs = Normal('Y_obs', mu, 1, observed=Freq[Obs_idx])

        # draw 100 posterior samples
        trace = sample(100, cores=1)
    return (trace)
Example #14
0
    def test_linear_component(self):
        vars_to_create = {
            'sigma', 'sigma_interval__', 'y_obs', 'lm_x0', 'lm_Intercept'
        }
        with Model() as model:
            lm = LinearComponent(self.data_linear['x'],
                                 self.data_linear['y'],
                                 name='lm')  # yields lm_x0, lm_Intercept
            sigma = Uniform('sigma', 0, 20)  # yields sigma_interval__
            Normal('y_obs', mu=lm.y_est, sigma=sigma,
                   observed=self.y_linear)  # yields y_obs
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(500,
                           tune=0,
                           step=step,
                           start=start,
                           progressbar=False,
                           random_seed=self.random_seed)

            assert round(abs(np.mean(trace['lm_Intercept']) - self.intercept),
                         1) == 0
            assert round(abs(np.mean(trace['lm_x0']) - self.slope), 1) == 0
            assert round(abs(np.mean(trace['sigma']) - self.sd), 1) == 0
        assert vars_to_create == set(model.named_vars.keys())
Example #15
0
def test_expressions(expr):
    with Model() as model:
        var = expr((10, 10))
        Normal('obs', observed=var)
        assert var.tag.test_value.shape == (10, 10)
        assert len(model.free_RVs) == 3
        fit(1)
Example #16
0
def test_missing_with_predictors():
    predictors = array([0.5, 1, 0.5, 2, 0.3])
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal("x", 1, 1)
        with pytest.warns(ImputationWarning):
            Normal("y", x * predictors, 1, observed=data)

    (y_missing, ) = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)

    with model:
        prior_trace = sample_prior_predictive()
    assert {"x", "y"} <= set(prior_trace.keys())
Example #17
0
def multidimensional_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2)))

    return model.initial_point, model, (mu, tau**-0.5)
Example #18
0
def simple_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, shape=2, testval=tt.ones(2) * 0.1)

    return model.test_point, model, (mu, tau**-0.5)
Example #19
0
def simple_model():
  mu = 2.
  tau = 10.
  with Model() as model:
    x = Normal('x', mu, tau=tau)

  return model
Example #20
0
    def test_gradient_with_scaling(self):
        with pm.Model() as model1:
            genvar = generator(gen1())
            m = Normal('m')
            Normal('n', observed=genvar, total_size=1000)
            grad1 = theano.function([m], tt.grad(model1.logpt, m))
        with pm.Model() as model2:
            m = Normal('m')
            shavar = theano.shared(np.ones((1000, 100)))
            Normal('n', observed=shavar)
            grad2 = theano.function([m], tt.grad(model2.logpt, m))

        for i in range(10):
            shavar.set_value(np.ones((100, 100)) * i)
            g1 = grad1(1)
            g2 = grad2(1)
            np.testing.assert_almost_equal(g1, g2)
Example #21
0
    def test_mixture_list_of_normals(self):
        with Model() as model:
            w = Dirichlet('w', np.ones_like(self.norm_w))
            mu = Normal('mu', 0., 10., shape=self.norm_w.size)
            tau = Gamma('tau', 1., 1., shape=self.norm_w.size)
            Mixture('x_obs', w,
                    [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
                    observed=self.norm_x)
            step = Metropolis()
            trace = sample(5000, step, random_seed=self.random_seed, progressbar=False)

        assert_allclose(np.sort(trace['w'].mean(axis=0)),
                        np.sort(self.norm_w),
                        rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace['mu'].mean(axis=0)),
                        np.sort(self.norm_mu),
                        rtol=0.1, atol=0.1)
Example #22
0
 def test_common_errors(self):
     with pm.Model():
         with pytest.raises(ValueError) as e:
             Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2, 2])
         assert "Length of" in str(e.value)
         with pytest.raises(ValueError) as e:
             Normal("n", observed=[[1]], total_size=[2, 2, 2])
         assert "Length of" in str(e.value)
         with pytest.raises(TypeError) as e:
             Normal("n", observed=[[1]], total_size="foo")
         assert "Unrecognized" in str(e.value)
         with pytest.raises(TypeError) as e:
             Normal("n", observed=[[1]], total_size=["foo"])
         assert "Unrecognized" in str(e.value)
         with pytest.raises(ValueError) as e:
             Normal("n", observed=[[1]], total_size=[Ellipsis, Ellipsis])
         assert "Double Ellipsis" in str(e.value)
Example #23
0
 def test_common_errors(self):
     with pm.Model():
         with pytest.raises(ValueError) as e:
             Normal('n', observed=[[1]], total_size=[2, Ellipsis, 2, 2])
         assert 'Length of' in str(e.value)
         with pytest.raises(ValueError) as e:
             Normal('n', observed=[[1]], total_size=[2, 2, 2])
         assert 'Length of' in str(e.value)
         with pytest.raises(TypeError) as e:
             Normal('n', observed=[[1]], total_size='foo')
         assert 'Unrecognized' in str(e.value)
         with pytest.raises(TypeError) as e:
             Normal('n', observed=[[1]], total_size=['foo'])
         assert 'Unrecognized' in str(e.value)
         with pytest.raises(ValueError) as e:
             Normal('n', observed=[[1]], total_size=[Ellipsis, Ellipsis])
         assert 'Double Ellipsis' in str(e.value)
Example #24
0
    def test_mixture_list_of_normals(self):
        with Model() as model:
            w = Dirichlet('w', floatX(np.ones_like(self.norm_w)))
            mu = Normal('mu', 0., 10., shape=self.norm_w.size)
            tau = Gamma('tau', 1., 1., shape=self.norm_w.size)
            Mixture('x_obs', w,
                    [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
                    observed=self.norm_x)
            step = Metropolis()
            trace = sample(3500, step, random_seed=self.random_seed,
                           progressbar=False, chains=1)

        assert_allclose(np.sort(trace['w'].mean(axis=0)),
                        np.sort(self.norm_w),
                        rtol=0.1, atol=0.1)
        assert_allclose(np.sort(trace['mu'].mean(axis=0)),
                        np.sort(self.norm_mu),
                        rtol=0.1, atol=0.1)
Example #25
0
def test_allinmodel():
    model1 = Model()
    model2 = Model()
    with model1:
        x1 = Normal("x1", mu=0, sigma=1)
        y1 = Normal("y1", mu=0, sigma=1)
    with model2:
        x2 = Normal("x2", mu=0, sigma=1)
        y2 = Normal("y2", mu=0, sigma=1)

    starting.allinmodel([x1, y1], model1)
    starting.allinmodel([x1], model1)
    with raises(ValueError, match=r"Some variables not in the model: \['x2', 'y2'\]"):
        starting.allinmodel([x2, y2], model1)
    with raises(ValueError, match=r"Some variables not in the model: \['x2'\]"):
        starting.allinmodel([x2, y1], model1)
    with raises(ValueError, match=r"Some variables not in the model: \['x2'\]"):
        starting.allinmodel([x2], model1)
Example #26
0
    def test_mixture_list_of_normals(self):
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
            mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
            tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
            Mixture(
                "x_obs",
                w,
                [Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
                observed=self.norm_x,
            )
            step = Metropolis()
            trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
        assert_allclose(
            np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
        )
Example #27
0
def test_advi_minibatch():
    n = 1000
    sd0 = 2.
    mu0 = 4.
    sd = 3.
    mu = -5.

    data = sd * np.random.RandomState(0).randn(n) + mu

    d = n / sd**2 + 1 / sd0**2
    mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

    data_t = tt.vector()
    data_t.tag.test_value = np.zeros(1, )

    with Model() as model:
        mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
        x = Normal('x', mu=mu_, sd=sd, observed=data_t)

    minibatch_RVs = [x]
    minibatch_tensors = [data_t]

    def create_minibatch(data):
        while True:
            data = np.roll(data, 100, axis=0)
            yield (data[:100], )

    minibatches = create_minibatch(data)

    with model:
        advi_fit = advi_minibatch(n=1000,
                                  minibatch_tensors=minibatch_tensors,
                                  minibatch_RVs=minibatch_RVs,
                                  minibatches=minibatches,
                                  total_size=n,
                                  learning_rate=1e-1,
                                  random_seed=1)

        np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)

        trace = sample_vp(advi_fit, 10000)

    np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
    np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
Example #28
0
def test_advi_minibatch():
    n = 1000
    sd0 = 2.
    mu0 = 4.
    sd = 3.
    mu = -5.

    data = sd * np.random.RandomState(0).randn(n) + mu

    d = n / sd**2 + 1 / sd0**2
    mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

    data_t = tt.vector()
    data_t.tag.test_value = np.zeros(1, )

    with Model() as model:
        mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
        x = Normal('x', mu=mu_, sd=sd, observed=data_t)

        # mu = Normal('mu', mu=0, sd=1, testval=0)
        # sd = HalfNormal('sd', sd=1)
        # n = Normal('n', mu=mu, sd=sd, observed=data_t)

    minibatch_RVs = [x]
    minibatch_tensors = [data_t]

    def create_minibatch(data):
        while True:
            data = np.roll(data, 100, axis=0)
            yield data[:100]

    minibatches = [create_minibatch(data)]

    means, sds, elbos = advi_minibatch(model=model,
                                       n=1000,
                                       minibatch_tensors=minibatch_tensors,
                                       minibatch_RVs=minibatch_RVs,
                                       minibatches=minibatches,
                                       total_size=n,
                                       learning_rate=1e-1,
                                       seed=1)

    np.testing.assert_allclose(means['mu'], mu_post, rtol=0.1)
Example #29
0
    def test_gradient_with_scaling(self):
        with pm.Model() as model1:
            genvar = generator(gen1())
            m = Normal("m")
            Normal("n", observed=genvar, total_size=1000)
            grad1 = aesara.function([m.tag.value_var],
                                    at.grad(model1.logpt, m.tag.value_var))
        with pm.Model() as model2:
            m = Normal("m")
            shavar = aesara.shared(np.ones((1000, 100)))
            Normal("n", observed=shavar)
            grad2 = aesara.function([m.tag.value_var],
                                    at.grad(model2.logpt, m.tag.value_var))

        for i in range(10):
            shavar.set_value(np.ones((100, 100)) * i)
            g1 = grad1(1)
            g2 = grad2(1)
            np.testing.assert_almost_equal(g1, g2)
Example #30
0
def linear_regression(data: Callable, samples=None) -> pm.Model:

    x, y = data()
    basic_model = pm.Model()
    with basic_model:
        # Define priors
        sigma = HalfCauchy("sigma", beta=10, testval=1.0)
        intercept = Normal("Intercept", 0, sigma=20)
        x_coeff = Normal("x", 0, sigma=20)
        likelihood = Normal("y", mu=intercept + x_coeff * x, sigma=sigma, observed=y)
        map = map_estimation(basic_model, method="powell")
        if samples is not None:
            if not isinstance(samples, int):
                raise (ValueError, "samples arg must be int")
            elif samples < 50:
                raise (ValueError, "samples, must be greater than 50")
            else:
                trace = pm.sample(samples)
                traceplot(trace)
    return basic_model, trace
    def __init__(self, parent):
        self.player = parent
        X = np.loadtxt('AWS_collapsed.txt')
        X = (X + 1) / 2
        Freq = np.zeros(100)
        #
        for i in X:
            Freq[int(np.ceil(i * 100))] += 1
        #
        X = np.linspace(0, 1, 100)
        #
        with pm.Model():
            # Priors are posteriors from previous iteration
            alpha = Normal('alpha', 0.5, 0.5)
            beta = Normal('beta', 0.5, 0.5)
            gamma = Normal('gamma', 0.5, 0.5)
            delta = Normal('delta', 0.5, 0.5)

            # self.trace = initialise_prior(X,Freq)
            self.trace = pm.backends.text.load('AWS')
Example #32
0
    def test_normal_mixture_nd(self):
        nd, ncomp = 3, 5

        with Model() as model0:
            mus = Normal('mus', shape=(nd, ncomp))
            taus = Gamma('taus', alpha=1, beta=1, shape=(nd, ncomp))
            ws = Dirichlet('ws', np.ones(ncomp))
            mixture0 = NormalMixture('m', w=ws, mu=mus, tau=taus, shape=nd)

        with Model() as model1:
            mus = Normal('mus', shape=(nd, ncomp))
            taus = Gamma('taus', alpha=1, beta=1, shape=(nd, ncomp))
            ws = Dirichlet('ws', np.ones(ncomp))
            comp_dist = [Normal.dist(mu=mus[:, i], tau=taus[:, i])
                         for i in range(ncomp)]
            mixture1 = Mixture('m', w=ws, comp_dists=comp_dist, shape=nd)

        testpoint = model0.test_point
        testpoint['mus'] = np.random.randn(nd, ncomp)
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
Example #33
0
def build_model():
    y = shared(np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32))
    with Model() as arma_model:
        sigma = HalfCauchy('sigma', 5)
        theta = Normal('theta', 0, sd=2)
        phi = Normal('phi', 0, sd=2)
        mu = Normal('mu', 0, sd=10)

        err0 = y[0] - (mu + phi * mu)

        def calc_next(last_y, this_y, err, mu, phi, theta):
            nu_t = mu + phi * last_y + theta * err
            return this_y - nu_t

        err, _ = scan(fn=calc_next,
                      sequences=dict(input=y, taps=[-1, 0]),
                      outputs_info=[err0],
                      non_sequences=[mu, phi, theta])

        Potential('like', Normal.dist(0, sd=sigma).logp(err))
        mu, sds, elbo = variational.advi(n=2000)
    return arma_model
Example #34
0
    theta = Normal('theta', 0, sd=2)
    phi = Normal('phi', 0, sd=2)
    mu = Normal('mu', 0, sd=10)

    err0 = y[0] - (mu + phi*mu)

    def calc_next(last_y, this_y, err, mu, phi, theta):
        nu_t = mu + phi*last_y + theta*err
        return this_y - nu_t

    err, _ = scan(fn=calc_next,
                  sequences=dict(input=y, taps=[-1,0]),
                  outputs_info=[err0],
                  non_sequences=[mu, phi, theta])

    like = Potential('like', Normal.dist(0, sd=sigma).logp(err))

with arma_model:
    mu, sds, elbo = variational.advi(n=2000)


def run(n=1000):
    if n == "short":
        n = 50
    with arma_model:

        trace = sample(1000)

    burn = n/10

    traceplot(trace[burn:])
Example #35
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(w=np.ones(ncomp)/ncomp,
                                                mu=test_mus,
                                                sd=1/np.sqrt(test_taus),
                                                size=10)

        with Model() as model0:
            mus = Normal('mus', shape=comp_shape)
            taus = Gamma('taus', alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet('ws', np.ones(ncomp))
            mixture0 = NormalMixture('m', w=ws, mu=mus, tau=taus, shape=nd,
                                     comp_shape=comp_shape)
            obs0 = NormalMixture('obs', w=ws, mu=mus, tau=taus, shape=nd,
                                 comp_shape=comp_shape,
                                 observed=observed)

        with Model() as model1:
            mus = Normal('mus', shape=comp_shape)
            taus = Gamma('taus', alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet('ws', np.ones(ncomp))
            comp_dist = [Normal.dist(mu=mus[..., i], tau=taus[..., i],
                                     shape=nd)
                         for i in range(ncomp)]
            mixture1 = Mixture('m', w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture('obs', w=ws, comp_dists=comp_dist, shape=nd,
                           observed=observed)

        with Model() as model2:
            # Expected to fail if comp_shape is not provided,
            # nd is multidim and it does not broadcast with ncomp. If by chance
            # it does broadcast, an error is raised if the mixture is given
            # observed data.
            # Furthermore, the Mixture will also raise errors when the observed
            # data is multidimensional but it does not broadcast well with
            # comp_dists.
            mus = Normal('mus', shape=comp_shape)
            taus = Gamma('taus', alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet('ws', np.ones(ncomp))
            if len(nd) > 1:
                if nd[-1] != ncomp:
                    with pytest.raises(ValueError):
                        NormalMixture('m', w=ws, mu=mus, tau=taus,
                                      shape=nd)
                    mixture2 = None
                else:
                    mixture2 = NormalMixture('m', w=ws, mu=mus, tau=taus,
                                             shape=nd)
            else:
                mixture2 = NormalMixture('m', w=ws, mu=mus, tau=taus,
                                         shape=nd)
            observed_fails = False
            if len(nd) >= 1 and nd != (1,):
                try:
                    np.broadcast(np.empty(comp_shape), observed)
                except Exception:
                    observed_fails = True
            if observed_fails:
                with pytest.raises(ValueError):
                    NormalMixture('obs', w=ws, mu=mus, tau=taus,
                                  shape=nd,
                                  observed=observed)
                obs2 = None
            else:
                obs2 = NormalMixture('obs', w=ws, mu=mus, tau=taus,
                                     shape=nd,
                                     observed=observed)

        testpoint = model0.test_point
        testpoint['mus'] = test_mus
        testpoint['taus'] = test_taus
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
        assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
        if mixture2 is not None and obs2 is not None:
            assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
        if mixture2 is not None:
            assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
        if obs2 is not None:
            assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
Example #36
0
    def test_mixture_of_mixture(self):
        if theano.config.floatX == 'float32':
            rtol = 1e-4
        else:
            rtol = 1e-7
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(
                mu=Exponential('mu_g', lam=1.0, shape=nbr, transform=None),
                sigma=1,
                shape=nbr)
            l_comp = Lognormal.dist(
                mu=Exponential('mu_l', lam=1.0, shape=nbr, transform=None),
                sigma=1,
                shape=nbr)
            # weight vector for the mixtures
            g_w = Dirichlet('g_w', a=floatX(np.ones(nbr)*0.0000001), transform=None)
            l_w = Dirichlet('l_w', a=floatX(np.ones(nbr)*0.0000001), transform=None)
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet('mix_w', a=floatX(np.ones(2)), transform=None)
            mix = Mixture('mix', w=mix_w,
                          comp_dists=[g_mix, l_mix],
                          observed=np.exp(self.norm_x))

        test_point = model.test_point

        def mixmixlogp(value, point):
            floatX = theano.config.floatX
            priorlogp = st.dirichlet.logpdf(x=point['g_w'],
                                            alpha=np.ones(nbr)*0.0000001,
                                            ).astype(floatX) + \
                        st.expon.logpdf(x=point['mu_g']).sum(dtype=floatX) + \
                        st.dirichlet.logpdf(x=point['l_w'],
                                            alpha=np.ones(nbr)*0.0000001,
                                            ).astype(floatX) + \
                        st.expon.logpdf(x=point['mu_l']).sum(dtype=floatX) + \
                        st.dirichlet.logpdf(x=point['mix_w'],
                                            alpha=np.ones(2),
                                            ).astype(floatX)
            complogp1 = st.norm.logpdf(x=value,
                                       loc=point['mu_g']).astype(floatX)
            mixlogp1 = logsumexp(np.log(point['g_w']).astype(floatX) +
                                 complogp1,
                                 axis=-1, keepdims=True)
            complogp2 = st.lognorm.logpdf(value,
                                          1.,
                                          0.,
                                          np.exp(point['mu_l'])).astype(floatX)
            mixlogp2 = logsumexp(np.log(point['l_w']).astype(floatX) +
                                 complogp2,
                                 axis=-1, keepdims=True)
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(np.log(point['mix_w']).astype(floatX) +
                                    complogp_mix,
                                    axis=-1, keepdims=True)
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point),
                        rtol=rtol)

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(),
                        model.logp(test_point),
                        rtol=rtol)

        # check input and check logp again
        test_point['g_w'] = np.asarray([.1, .1, .2, .6])
        test_point['mu_g'] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point),
                        rtol=rtol)
        assert_allclose(priorlogp + mixmixlogpg.sum(),
                        model.logp(test_point),
                        rtol=rtol)