Example #1
0
 def test_common_errors(self):
     with pytest.raises(ValueError) as e:
         with pm.Model() as m:
             Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2, 2])
             m.logpt
     assert "Length of" in str(e.value)
     with pytest.raises(ValueError) as e:
         with pm.Model() as m:
             Normal("n", observed=[[1]], total_size=[2, 2, 2])
             m.logpt
     assert "Length of" in str(e.value)
     with pytest.raises(TypeError) as e:
         with pm.Model() as m:
             Normal("n", observed=[[1]], total_size="foo")
             m.logpt
     assert "Unrecognized" in str(e.value)
     with pytest.raises(TypeError) as e:
         with pm.Model() as m:
             Normal("n", observed=[[1]], total_size=["foo"])
             m.logpt
     assert "Unrecognized" in str(e.value)
     with pytest.raises(ValueError) as e:
         with pm.Model() as m:
             Normal("n", observed=[[1]], total_size=[Ellipsis, Ellipsis])
             m.logpt
     assert "Double Ellipsis" in str(e.value)
def test_elbo():
    mu0 = 1.5
    sigma = 1.0
    y_obs = np.array([1.6, 1.4])

    post_mu = np.array([1.88], dtype=theano.config.floatX)
    post_sd = np.array([1], dtype=theano.config.floatX)
    # Create a model for test
    with Model() as model:
        mu = Normal('mu', mu=mu0, sd=sigma)
        Normal('y', mu=mu, sd=1, observed=y_obs)

    # Create variational gradient tensor
    mean_field = MeanField(model=model)
    elbo = -KL(mean_field)()(mean_field.random())

    mean_field.shared_params['mu'].set_value(post_mu)
    mean_field.shared_params['rho'].set_value(np.log(np.exp(post_sd) - 1))

    f = theano.function([], elbo)
    elbo_mc = sum(f() for _ in range(10000)) / 10000

    # Exact value
    elbo_true = (-0.5 * (3 + 3 * post_mu**2 - 2 *
                         (y_obs[0] + y_obs[1] + mu0) * post_mu + y_obs[0]**2 +
                         y_obs[1]**2 + mu0**2 + 3 * np.log(2 * np.pi)) + 0.5 *
                 (np.log(2 * np.pi) + 1))
    np.testing.assert_allclose(elbo_mc, elbo_true, rtol=0, atol=1e-1)
Example #3
0
def get_samples():

    with Model() as gene_model:
        # normal priors on the mean and variance of blood pressures for various genes.
        g1 = Bernoulli("g1", 0.5)

        g2 = Bernoulli("g2", pm.math.switch(tt.eq(g1, 0), 0.1, 0.9))
        g3 = Bernoulli("g3", pm.math.switch(tt.eq(g1, 0), 0.1, 0.9))

        mean_g1 = pm.math.switch(tt.eq(g1, 0), 50, 60)
        mean_g2 = pm.math.switch(tt.eq(g2, 0), 50, 60)
        mean_g3 = pm.math.switch(tt.eq(g3, 0), 50, 60)

        x1 = Normal("x1", mean_g1, np.sqrt(10))
        x2 = Normal("x2", mean_g2, np.sqrt(10), observed=50)
        x3 = Normal("x3", mean_g3, np.sqrt(10))

    with gene_model:
        # obtain starting values via MAP
        start = find_MAP(model=gene_model)
        # start = 100
        # instantiate sampler
        step = pm.Metropolis()

        # draw 2000 posterior samples
        gene_trace = pm.sample(5000, step=step, start=start)

    from pymc3 import traceplot
    traceplot(gene_trace)
    print(pm.summary(gene_trace))
    plt.show()

    return gene_trace
Example #4
0
    def test_elbo(self):
        mu0 = 1.5
        sigma = 1.0
        y_obs = np.array([1.6, 1.4])

        post_mu = 1.88
        post_sd = 1
        # Create a model for test
        with Model() as model:
            mu = Normal('mu', mu=mu0, sd=sigma)
            Normal('y', mu=mu, sd=1, observed=y_obs)

        # Create variational gradient tensor
        elbo, _, updates, vp = sample_elbo(model, samples=10000)

        vp.shared.means['mu'].set_value(post_mu)
        vp.shared.rhos['mu'].set_value(sd2rho(post_sd))

        f = theano.function([], elbo, updates=updates)
        elbo_mc = f()

        # Exact value
        elbo_true = (-0.5 *
                     (3 + 3 * post_mu**2 - 2 *
                      (y_obs[0] + y_obs[1] + mu0) * post_mu + y_obs[0]**2 +
                      y_obs[1]**2 + mu0**2 + 3 * np.log(2 * np.pi)) + 0.5 *
                     (np.log(2 * np.pi) + 1))

        np.testing.assert_allclose(elbo_mc, elbo_true, rtol=0, atol=1e-1)
Example #5
0
    def test_advi_minibatch_shared(self):
        n = 1000
        sd0 = 2.
        mu0 = 4.
        sd = 3.
        mu = -5.

        data = sd * np.random.randn(n) + mu

        d = n / sd**2 + 1 / sd0**2
        mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

        data_t = shared(np.zeros(1, ))

        def create_minibatches(data):
            while True:
                data = np.roll(data, 100, axis=0)
                yield (data[:100], )

        with Model():
            mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
            x = Normal('x', mu=mu_, sd=sd, observed=data_t)
            advi_fit = advi_minibatch(n=1000,
                                      minibatch_tensors=[data_t],
                                      minibatch_RVs=[x],
                                      minibatches=create_minibatches(data),
                                      total_size=n,
                                      learning_rate=1e-1)
            np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)
            trace = sample_vp(advi_fit, 10000)

        np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
        np.testing.assert_allclose(np.std(trace['mu']),
                                   np.sqrt(1. / d),
                                   rtol=0.4)
Example #6
0
def _common_interceptions(hparams, tau_cmmn, verbose=0):
    prior_var_mu = hparams.prior_var_mu
    fix_mu_zero = hparams.fix_mu_zero

    if fix_mu_zero:
        mu1 = np.float32(0.0)
        mu2 = np.float32(0.0)

        if 10 <= verbose:
            print('Fix bias parameters to 0.0')

    else:
        if prior_var_mu == 'auto':
            tau1 = np.float32(1. / tau_cmmn[0])
            tau2 = np.float32(1. / tau_cmmn[1])
        else:
            v = prior_var_mu
            tau1 = np.float32(1. / v)
            tau2 = np.float32(1. / v)
        mu1 = Normal('mu1',
                     mu=np.float32(0.),
                     tau=np.float32(tau1),
                     dtype=floatX)
        mu2 = Normal('mu2',
                     mu=np.float32(0.),
                     tau=np.float32(tau2),
                     dtype=floatX)

        if 10 <= verbose:
            print('mu1.dtype = {}'.format(mu1.dtype))
            print('mu2.dtype = {}'.format(mu2.dtype))

    return mu1, mu2
        def test_optimizer_with_full_data(self):
            n = 1000
            sd0 = 2.
            mu0 = 4.
            sd = 3.
            mu = -5.

            data = sd * np.random.randn(n) + mu

            d = n / sd**2 + 1 / sd0**2
            mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

            with Model():
                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=data)
                inf = self.inference(start={})
                inf.fit(10)
                approx = inf.fit(
                    self.NITER,
                    obj_optimizer=self.optimizer,
                    callbacks=[pm.callbacks.CheckParametersConvergence()],
                )
                trace = approx.sample(10000)
            np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.1)
            np.testing.assert_allclose(np.std(trace['mu']),
                                       np.sqrt(1. / d),
                                       rtol=0.4)
Example #8
0
    def test_elbo(self):
        mu0 = 1.5
        sigma = 1.0
        y_obs = np.array([1.6, 1.4])

        # Create a model for test
        with Model() as model:
            mu = Normal('mu', mu=mu0, sd=sigma)
            Normal('y', mu=mu, sd=1, observed=y_obs)

        model_vars = inputvars(model.vars)

        # Create variational gradient tensor
        elbo, _ = _calc_elbo(model_vars,
                             model,
                             n_mcsamples=10000,
                             random_seed=self.random_seed)

        # Variational posterior parameters
        uw_ = np.array([1.88, np.log(1)])

        # Calculate elbo computed with MonteCarlo
        uw_shared = shared(uw_, 'uw_shared')
        elbo = CallableTensor(elbo)(uw_shared)
        f = function([], elbo)
        elbo_mc = f()

        # Exact value
        elbo_true = (-0.5 *
                     (3 + 3 * uw_[0]**2 - 2 *
                      (y_obs[0] + y_obs[1] + mu0) * uw_[0] + y_obs[0]**2 +
                      y_obs[1]**2 + mu0**2 + 3 * np.log(2 * np.pi)) + 0.5 *
                     (np.log(2 * np.pi) + 1))

        np.testing.assert_allclose(elbo_mc, elbo_true, rtol=0, atol=1e-1)
        def test_optimizer_minibatch_with_callback(self):
            n = 1000
            sd0 = 2.
            mu0 = 4.
            sd = 3.
            mu = -5.

            data = sd * np.random.randn(n) + mu

            d = n / sd**2 + 1 / sd0**2
            mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

            def create_minibatch(data):
                while True:
                    data = np.roll(data, 100, axis=0)
                    yield data[:100]

            minibatches = create_minibatch(data)
            with Model():
                data_t = theano.shared(next(minibatches))

                def cb(*_):
                    data_t.set_value(next(minibatches))

                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=data_t, total_size=n)
                inf = self.inference()
                approx = inf.fit(self.NITER, callbacks=[cb], obj_n_mc=10)
                trace = approx.sample_vp(10000)
            np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
            np.testing.assert_allclose(np.std(trace['mu']),
                                       np.sqrt(1. / d),
                                       rtol=0.4)
Example #10
0
    def test_density_scaling_with_genarator(self):
        # We have different size generators

        def true_dens():
            g = gen1()
            for i, point in enumerate(g):
                yield stats.norm.logpdf(point).sum() * 10

        t = true_dens()
        # We have same size models
        with pm.Model() as model1:
            Normal("n", observed=gen1(), total_size=100)
            p1 = aesara.function([], model1.logpt)

        with pm.Model() as model2:
            gen_var = generator(gen2())
            Normal("n", observed=gen_var, total_size=100)
            p2 = aesara.function([], model2.logpt)

        for i in range(10):
            _1, _2, _t = p1(), p2(), next(t)
            decimals = select_by_precision(float64=7, float32=2)
            np.testing.assert_almost_equal(
                _1, _t, decimal=decimals)  # Value O(-50,000)
            np.testing.assert_almost_equal(_1, _2)
Example #11
0
def test_elbo():
    mu0 = 1.5
    sigma = 1.0
    y_obs = np.array([1.6, 1.4])

    # Create a model for test
    with Model() as model:
        mu = Normal('mu', mu=mu0, sd=sigma)
        y = Normal('y', mu=mu, sd=1, observed=y_obs)

    vars = inputvars(model.vars)

    # Create variational gradient tensor
    grad, elbo, shared, uw = variational_gradient_estimate(vars,
                                                           model,
                                                           n_mcsamples=10000,
                                                           random_seed=1)

    # Variational posterior parameters
    uw_ = np.array([1.88, np.log(1)])

    # Calculate elbo computed with MonteCarlo
    f = function([uw], elbo)
    elbo_mc = f(uw_)

    # Exact value
    elbo_true = (-0.5 * (3 + 3 * uw_[0]**2 - 2 *
                         (y_obs[0] + y_obs[1] + mu0) * uw_[0] + y_obs[0]**2 +
                         y_obs[1]**2 + mu0**2 + 3 * np.log(2 * np.pi)) + 0.5 *
                 (np.log(2 * np.pi) + 1))

    np.testing.assert_allclose(elbo_mc, elbo_true, rtol=0, atol=1e-1)
        def test_optimizer_with_full_data(self):
            n = 1000
            sd0 = 2.
            mu0 = 4.
            sd = 3.
            mu = -5.

            data = sd * np.random.randn(n) + mu

            d = n / sd**2 + 1 / sd0**2
            mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

            with Model():
                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=data)
                pm.Deterministic('mu_sq', mu_**2)
                inf = self.inference()
                self.assertEqual(len(inf.hist), 0)
                inf.fit(10)
                self.assertEqual(len(inf.hist), 10)
                self.assertFalse(np.isnan(inf.hist).any())
                approx = inf.fit(self.NITER)
                self.assertEqual(len(inf.hist), self.NITER + 10)
                self.assertFalse(np.isnan(inf.hist).any())
                trace = approx.sample_vp(10000)
            np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.1)
            np.testing.assert_allclose(np.std(trace['mu']),
                                       np.sqrt(1. / d),
                                       rtol=0.2)
Example #13
0
def _common_interceptions(hparams, tau_cmmn, Normal, floatX, verbose):
    if True: # hparams['fix_mu_zero']: debug
        mu1 = np.float32(0.0)
        mu2 = np.float32(0.0)

        if 10 <= verbose:
            print('Fix bias parameters to 0.0')

    else:
        if hparams['prior_var_mu'] == 'auto':
            tau1 = np.float32(1. / tau_cmmn[0])
            tau2 = np.float32(1. / tau_cmmn[1])
        else:
            v = hparams['prior_var_mu']
            tau1 = np.float32(1. / v)
            tau2 = np.float32(1. / v)
        mu1 = Normal('mu1', mu=np.float32(0.), tau=np.float32(tau1), 
                     dtype=floatX)
        mu2 = Normal('mu2', mu=np.float32(0.), tau=np.float32(tau2), 
                     dtype=floatX)

        if 10 <= verbose:
            print('mu1.dtype = {}'.format(mu1.dtype))
            print('mu2.dtype = {}'.format(mu2.dtype))

    return mu1, mu2
Example #14
0
def _indvdl_gauss(
    hparams, std_x, n_samples, L_cov, Normal, Deterministic, floatX, 
    cholesky, tt, verbose):
    scale1 = np.float32(std_x[0] * hparams['v_indvdl_1'])
    scale2 = np.float32(std_x[1] * hparams['v_indvdl_2'])

    u1s = Normal(
        'u1s', mu=np.float32(0.), tau=np.float32(1.), 
        shape=(n_samples,), dtype=floatX
    )
    u2s = Normal(
        'u2s', mu=np.float32(0.), tau=np.float32(1.), 
        shape=(n_samples,), dtype=floatX
    )
    L_cov_ = cholesky(L_cov).astype(floatX)
    tt.set_subtensor(L_cov_[0, :], L_cov_[0, :] * scale1, inplace=True)
    tt.set_subtensor(L_cov_[1, :], L_cov_[1, :] * scale2, inplace=True)
    mu1s_ = Deterministic('mu1s_', 
                          L_cov[0, 0] * u1s + L_cov[0, 1] * u2s)
    mu2s_ = Deterministic('mu2s_', 
                          L_cov[1, 0] * u1s + L_cov[1, 1] * u2s)

    if 10 <= verbose:
        print('Normal for individual effect')
        print('u1s.dtype = {}'.format(u1s.dtype))
        print('u2s.dtype = {}'.format(u2s.dtype))

    return mu1s_, mu2s_
Example #15
0
def test_advi():
    n = 1000
    sd0 = 2.
    mu0 = 4.
    sd = 3.
    mu = -5.

    data = sd * np.random.RandomState(0).randn(n) + mu

    d = n / sd**2 + 1 / sd0**2
    mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

    with Model() as model:
        mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
        Normal('x', mu=mu_, sd=sd, observed=data)

    advi_fit = advi(model=model,
                    n=1000,
                    accurate_elbo=False,
                    learning_rate=1e-1,
                    random_seed=1)

    np.testing.assert_allclose(advi_fit.means['mu'], mu_post, rtol=0.1)

    trace = sample_vp(advi_fit, 10000, model)

    np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.4)
    np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.4)
        def test_optimizer_minibatch_with_generator(self):
            n = 1000
            sd0 = 2.
            mu0 = 4.
            sd = 3.
            mu = -5.

            data = sd * np.random.randn(n) + mu

            d = n / sd**2 + 1 / sd0**2
            mu_post = (n * np.mean(data) / sd**2 + mu0 / sd0**2) / d

            def create_minibatch(data):
                while True:
                    data = np.roll(data, 100, axis=0)
                    yield data[:100]

            minibatches = create_minibatch(data)
            with Model():
                mu_ = Normal('mu', mu=mu0, sd=sd0, testval=0)
                Normal('x', mu=mu_, sd=sd, observed=minibatches, total_size=n)
                inf = self.inference()
                approx = inf.fit(
                    self.NITER * 3,
                    obj_optimizer=self.optimizer,
                    callbacks=[pm.callbacks.CheckParametersConvergence()])
                trace = approx.sample(10000)
            np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.1)
            np.testing.assert_allclose(np.std(trace['mu']),
                                       np.sqrt(1. / d),
                                       rtol=0.4)
Example #17
0
    def test_density_scaling(self):
        with pm.Model() as model1:
            Normal('n', observed=[[1]], total_size=1)
            p1 = theano.function([], model1.logpt)

        with pm.Model() as model2:
            Normal('n', observed=[[1]], total_size=2)
            p2 = theano.function([], model2.logpt)
        assert p1() * 2 == p2()
Example #18
0
def test_missing():
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal('x', 1, 1)
        Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)
Example #19
0
def test_missing_pandas():
    data = pd.DataFrame([1, 2, numpy.nan, 4, numpy.nan])
    with Model() as model:
        x = Normal('x', 1, 1)
        Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)
Example #20
0
    def test_free_rv(self):
        with pm.Model() as model4:
            Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2])
            p4 = aesara.function([], model4.logpt)

        with pm.Model() as model5:
            n = Normal("n", total_size=[2, Ellipsis, 2], size=(2, 2))
            p5 = aesara.function([n.tag.value_var], model5.logpt)
        assert p4() == p5(pm.floatX([[1]]))
        assert p4() == p5(pm.floatX([[1, 1], [1, 1]]))
Example #21
0
def simple_arbitrary_det():
    @as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])
    def arbitrary_det(value):
        return value

    with Model() as model:
        a = Normal('a')
        b = arbitrary_det(a)
        Normal('obs', mu=b.astype('float64'), observed=np.array([1, 3, 5]))

    return model.test_point, model
Example #22
0
def simple_model(simple_model_data):
    with Model() as model:
        mu_ = Normal('mu',
                     mu=simple_model_data['mu0'],
                     sd=simple_model_data['sd0'],
                     testval=0)
        Normal('x',
               mu=mu_,
               sd=simple_model_data['sd'],
               observed=simple_model_data['data'],
               total_size=simple_model_data['n'])
    return model
Example #23
0
def test_internal_missing_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        with pytest.warns(ImputationWarning):
            theta1 = Normal('theta1', mu=2, observed=obs1)
        with pytest.warns(ImputationWarning):
            theta2 = Normal('theta2', mu=theta1, observed=obs2)

        prior_trace = sample_prior_predictive()
        assert set(['theta1', 'theta2']) <= set(prior_trace.keys())
        sample()
Example #24
0
    def test_free_rv(self):
        with pm.Model() as model4:
            Normal('n', observed=[[1, 1],
                                  [1, 1]], total_size=[2, 2])
            p4 = theano.function([], model4.logpt)

        with pm.Model() as model5:
            Normal('n', total_size=[2, Ellipsis, 2], shape=(1, 1), broadcastable=(False, False))
            p5 = theano.function([model5.n], model5.logpt)
        assert p4() == p5(pm.floatX([[1]]))
        assert p4() == p5(pm.floatX([[1, 1],
                                     [1, 1]]))
Example #25
0
def simple_arbitrary_det():
    scalar_type = tt.dscalar if theano.config.floatX == "float64" else tt.fscalar

    @as_op(itypes=[scalar_type], otypes=[scalar_type])
    def arbitrary_det(value):
        return value

    with Model() as model:
        a = Normal("a")
        b = arbitrary_det(a)
        Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5]))

    return model.test_point, model
Example #26
0
def get_garch_model():
    r = np.array([28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float64)
    sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float64)
    alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18], dtype=np.float64)
    shape = r.shape

    with Model() as garch:
        alpha1 = Uniform("alpha1", 0.0, 1.0, shape=shape)
        beta1 = Uniform("beta1", 0.0, 1 - alpha1, shape=shape)
        mu = Normal("mu", mu=0.0, sigma=100.0, shape=shape)
        theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) + beta1 * tt.pow(sigma1, 2))
        Normal("obs", mu, sigma=theta, observed=r)
    return garch
Example #27
0
def test_missing_pandas():
    data = pd.DataFrame([1, 2, numpy.nan, 4, numpy.nan])
    with Model() as model:
        x = Normal('x', 1, 1)
        with pytest.warns(ImputationWarning):
            Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)

    with model:
        prior_trace = sample_prior_predictive()
    assert set(['x', 'y']) <= set(prior_trace.keys())
Example #28
0
def test_missing():
    data = ma.masked_values([1, 2, -1, 4, -1], value=-1)
    with Model() as model:
        x = Normal('x', 1, 1)
        with pytest.warns(ImputationWarning):
            Normal('y', x, 1, observed=data)

    y_missing, = model.missing_values
    assert y_missing.tag.test_value.shape == (2, )

    model.logp(model.test_point)

    with model:
        prior_trace = sample_prior_predictive()
    assert set(['x', 'y']) <= set(prior_trace.keys())
Example #29
0
def test_missing_dual_observations():
    with Model() as model:
        obs1 = ma.masked_values([1, 2, -1, 4, -1], value=-1)
        obs2 = ma.masked_values([-1, -1, 6, -1, 8], value=-1)
        beta1 = Normal('beta1', 1, 1)
        beta2 = Normal('beta2', 2, 1)
        latent = Normal('theta', shape=5)
        with pytest.warns(ImputationWarning):
            ovar1 = Normal('o1', mu=beta1 * latent, observed=obs1)
        with pytest.warns(ImputationWarning):
            ovar2 = Normal('o2', mu=beta2 * latent, observed=obs2)

        prior_trace = sample_prior_predictive()
        assert set(['beta1', 'beta2', 'theta', 'o1', 'o2']) <= set(prior_trace.keys())
        sample()
Example #30
0
def get_garch_model():
    r = np.array([28, 8, -3, 7, -1, 1, 18, 12])
    sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18])
    alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18])
    shape = r.shape

    with Model() as garch:
        alpha1 = Normal('alpha1', mu=0., sd=1., shape=shape)
        BoundedNormal = Bound(Normal, upper=(1 - alpha1))
        beta1 = BoundedNormal('beta1', mu=0., sd=1e6, shape=shape)
        mu = Normal('mu', mu=0., sd=1e6, shape=shape)
        theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
                        beta1 * tt.pow(sigma1, 2))
        Normal('obs', mu, sd=theta, observed=r)
    return garch