示例#1
0
    def test(self):

        # Changepoint model
        M1 = Model(model_1)

        # Constant rate model
        M2 = Model(model_2)

        # Exponentially varying rate model
        M3 = Model(model_3)

        # print 'Docstring of model 1:'
        # print model_1.__doc__
        # print 'Docstring of model 2:'
        # print model_2.__doc__
        # print 'Docstring of model 3:'
        # print model_3.__doc__

        posterior = weight([M1, M2, M3], 10000)[0]

        # print 'Log posterior probability of changepoint model: ',log(posterior[M1])
        # print 'Log posterior probability of constant rate model: ',log(posterior[M2])
        # print 'Log posterior probability of linearly varying rate model: ',log(posterior[M3])

        assert (abs(log(posterior[M1])) < 1e-10)
        assert (abs(log(posterior[M2]) + 30) < 4)
        assert (abs(log(posterior[M3]) + 60) < 30)
示例#2
0
def test_allinmodel():
    model1 = Model()
    model2 = Model()
    with model1:
        x1 = Normal("x1", mu=0, sigma=1)
        y1 = Normal("y1", mu=0, sigma=1)
    with model2:
        x2 = Normal("x2", mu=0, sigma=1)
        y2 = Normal("y2", mu=0, sigma=1)

    x1 = model1.rvs_to_values[x1]
    y1 = model1.rvs_to_values[y1]
    x2 = model2.rvs_to_values[x2]
    y2 = model2.rvs_to_values[y2]

    starting.allinmodel([x1, y1], model1)
    starting.allinmodel([x1], model1)
    with pytest.raises(
            ValueError,
            match=r"Some variables not in the model: \['x2', 'y2'\]"):
        starting.allinmodel([x2, y2], model1)
    with pytest.raises(ValueError,
                       match=r"Some variables not in the model: \['x2'\]"):
        starting.allinmodel([x2, y1], model1)
    with pytest.raises(ValueError,
                       match=r"Some variables not in the model: \['x2'\]"):
        starting.allinmodel([x2], model1)
示例#3
0
    def test_mixture_of_mvn(self):
        mu1 = np.asarray([0.0, 1.0])
        cov1 = np.diag([1.5, 2.5])
        mu2 = np.asarray([1.0, 0.0])
        cov2 = np.diag([2.5, 3.5])
        obs = np.asarray([[0.5, 0.5], mu1, mu2])
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones(2)), transform=None, shape=(2,))
            mvncomp1 = MvNormal.dist(mu=mu1, cov=cov1)
            mvncomp2 = MvNormal.dist(mu=mu2, cov=cov2)
            y = Mixture("x_obs", w, [mvncomp1, mvncomp2], observed=obs)

        # check logp of each component
        complogp_st = np.vstack(
            (
                st.multivariate_normal.logpdf(obs, mu1, cov1),
                st.multivariate_normal.logpdf(obs, mu2, cov2),
            )
        ).T
        complogp = y.distribution._comp_logp(aesara.shared(obs)).eval()
        assert_allclose(complogp, complogp_st)

        # check logp of mixture
        testpoint = model.recompute_initial_point()
        mixlogp_st = logsumexp(np.log(testpoint["w"]) + complogp_st, axis=-1, keepdims=False)
        assert_allclose(y.logp_elemwise(testpoint), mixlogp_st)

        # check logp of model
        priorlogp = st.dirichlet.logpdf(
            x=testpoint["w"],
            alpha=np.ones(2),
        )
        assert_allclose(model.logp(testpoint), mixlogp_st.sum() + priorlogp)
示例#4
0
    def test_mixture_list_of_poissons(self):
        with Model() as model:
            w = Dirichlet("w",
                          floatX(np.ones_like(self.pois_w)),
                          shape=self.pois_w.shape)
            mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
            Mixture(
                "x_obs",
                w,
                [Poisson.dist(mu[0]), Poisson.dist(mu[1])],
                observed=self.pois_x)
            step = Metropolis()
            trace = sample(5000,
                           step,
                           random_seed=self.random_seed,
                           progressbar=False,
                           chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)),
                        np.sort(self.pois_w),
                        rtol=0.1,
                        atol=0.1)
        assert_allclose(np.sort(trace["mu"].mean(axis=0)),
                        np.sort(self.pois_mu),
                        rtol=0.1,
                        atol=0.1)
示例#5
0
def simple_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1]))

    return model.compute_initial_point(), model, (mu, tau**-0.5)
示例#6
0
def multidimensional_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2)))

    return model.compute_initial_point(), model, (mu, tau**-0.5)
示例#7
0
def multidimensional_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        x = Normal('x', mu, tau, shape=(3, 2), testval=.1 * np.ones((3, 2)))

    return model.test_point, model, (mu, tau**-1)
示例#8
0
def simple_model():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        x = Normal('x', mu, tau, shape=2, testval=[.1] * 2)

    return model.test_point, model, (mu, tau**-1)
示例#9
0
def simple_2model_continuous():
    mu = -2.1
    tau = 1.3
    with Model() as model:
        x = pm.Normal("x", mu, tau=tau, initval=0.1)
        pm.Deterministic("logx", at.log(x))
        pm.Beta("y", alpha=1, beta=1, size=2)
    return model.compute_initial_point(), model
示例#10
0
    def make_model(self, data):
        assert len(data) == 2, 'There must be exactly two data arrays'
        name1, name2 = sorted(data.keys())
        y1 = np.array(data[name1])
        y2 = np.array(data[name2])
        assert y1.ndim == 1
        assert y2.ndim == 1
        y = np.concatenate((y1, y2))

        mu_m = np.mean(y)
        mu_p = 0.000001 * 1 / np.std(y)**2

        sigma_low = np.std(y) / 1000
        sigma_high = np.std(y) * 1000

        # the five prior distributions for the parameters in our model
        group1_mean = Normal('group1_mean', mu_m, mu_p)
        group2_mean = Normal('group2_mean', mu_m, mu_p)
        group1_std = Uniform('group1_std', sigma_low, sigma_high)
        group2_std = Uniform('group2_std', sigma_low, sigma_high)
        nu_minus_one = Exponential('nu_minus_one', 1 / 29)

        @deterministic(plot=False)
        def nu(n=nu_minus_one):
            out = n + 1
            return out

        @deterministic(plot=False)
        def lam1(s=group1_std):
            out = 1 / s**2
            return out

        @deterministic(plot=False)
        def lam2(s=group2_std):
            out = 1 / s**2
            return out

        group1 = NoncentralT(name1,
                             group1_mean,
                             lam1,
                             nu,
                             value=y1,
                             observed=True)
        group2 = NoncentralT(name2,
                             group2_mean,
                             lam2,
                             nu,
                             value=y2,
                             observed=True)
        return Model({
            'group1': group1,
            'group2': group2,
            'group1_mean': group1_mean,
            'group2_mean': group2_mean,
            'group1_std': group1_std,
            'group2_std': group2_std,
        })
示例#11
0
def simple_2model():
    mu = -2.1
    tau = 1.3
    p = .4
    with Model() as model:
        x = pm.Normal('x', mu, tau, testval=.1)
        y = pm.Bernoulli('y', p)

    return model.test_point, model
示例#12
0
def simple_2model():
    mu = -2.1
    tau = 1.3
    p = 0.4
    with Model() as model:
        x = pm.Normal("x", mu, tau=tau, initval=0.1)
        pm.Deterministic("logx", at.log(x))
        pm.Bernoulli("y", p)
    return model.compute_initial_point(), model
示例#13
0
def _get_log_likelihood(model: Model, samples, backend=None) -> Dict:
    """Compute log-likelihood for all observations"""
    data = {}
    for v in model.observed_RVs:
        v_elemwise_logpt = model.logpt(v, sum=False)
        jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logpt)
        result = jax.jit(jax.vmap(jax.vmap(jax_fn)), backend=backend)(*samples)[0]
        data[v.name] = result
    return data
示例#14
0
def simple_categorical():
    p = floatX_array([0.1, 0.2, 0.3, 0.4])
    v = floatX_array([0.0, 1.0, 2.0, 3.0])
    with Model() as model:
        Categorical("x", p, size=3, initval=[1, 2, 3])

    mu = np.dot(p, v)
    var = np.dot(p, (v - mu)**2)
    return model.compute_initial_point(), model, (mu, var)
示例#15
0
def simple_2model():
    mu = -2.1
    tau = 1.3
    p = .4
    with Model() as model:
        x = pm.Normal('x', mu, tau, testval=.1)
        logx = pm.Deterministic('logx', log(x))
        y = pm.Bernoulli('y', p)

    return model.test_point, model
示例#16
0
def get_jaxified_logp(model: Model, negative_logp=True) -> Callable:
    model_logp = model.logp()
    if not negative_logp:
        model_logp = -model_logp
    logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logp])

    def logp_fn_wrap(x):
        return logp_fn(*x)[0]

    return logp_fn_wrap
示例#17
0
    def test_thread_safety(self):
        """Regression test for issue #1552: Thread safety of model context manager

        This test creates two threads that attempt to construct two
        unrelated models at the same time.
        For repeatable testing, the two threads are syncronised such
        that thread A enters the context manager first, then B,
        then A attempts to declare a variable while B is still in the context manager.
        """
        aInCtxt, bInCtxt, aDone = (threading.Event() for _ in range(3))
        modelA = Model()
        modelB = Model()

        def make_model_a():
            with modelA:
                aInCtxt.set()
                bInCtxt.wait()
                Normal("a", 0, 1)
            aDone.set()

        def make_model_b():
            aInCtxt.wait()
            with modelB:
                bInCtxt.set()
                aDone.wait()
                Normal("b", 0, 1)

        threadA = threading.Thread(target=make_model_a)
        threadB = threading.Thread(target=make_model_b)
        threadA.start()
        threadB.start()
        threadA.join()
        threadB.join()
        # now let's see which model got which variable
        # previous to #1555, the variables would be swapped:
        # - B enters it's model context after A, but before a is declared -> a goes into B
        # - A leaves it's model context before B attempts to declare b. A's context manager
        #   takes B from the stack, such that b ends up in model A
        assert (
            list(modelA.named_vars),
            list(modelB.named_vars),
        ) == (["a"], ["b"])
示例#18
0
def simple_arbitrary_det():
    scalar_type = at.dscalar if aesara.config.floatX == "float64" else at.fscalar

    @as_op(itypes=[scalar_type], otypes=[scalar_type])
    def arbitrary_det(value):
        return value

    with Model() as model:
        a = Normal("a")
        b = arbitrary_det(a)
        Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5]))

    return model.compute_initial_point(), model
示例#19
0
    def test_normal_mixture(self):
        with Model() as model:
            w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
            mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
            tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
            NormalMixture("x_obs", w, mu, tau=tau, observed=self.norm_x)
            step = Metropolis()
            trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)

        assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
        assert_allclose(
            np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
        )
示例#20
0
def test_find_MAP_issue_4488():
    # Test for https://github.com/pymc-devs/pymc/issues/4488
    with Model() as m:
        x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan]))
        y = Deterministic("y", x + 1)
        map_estimate = find_MAP()

    assert not set.difference({"x_missing", "x_missing_log__", "y"},
                              set(map_estimate.keys()))
    np.testing.assert_allclose(map_estimate["x_missing"],
                               0.2,
                               rtol=1e-4,
                               atol=1e-4)
    np.testing.assert_allclose(map_estimate["y"],
                               [2.0, map_estimate["x_missing"][0] + 1])
示例#21
0
def test_mixed_contexts():
    modelA = Model()
    modelB = Model()
    with raises((ValueError, TypeError)):
        modelcontext(None)
    with modelA:
        with modelB:
            assert Model.get_context() == modelB
            assert modelcontext(None) == modelB
        assert Model.get_context() == modelA
        assert modelcontext(None) == modelA
    assert Model.get_context(error_if_none=False) is None
    with raises(TypeError):
        Model.get_context(error_if_none=True)
    with raises((ValueError, TypeError)):
        modelcontext(None)
示例#22
0
def test_find_MAP_discrete():
    tol = 2.0**-11
    alpha = 4
    beta = 4
    n = 20
    yes = 15

    with Model() as model:
        p = Beta("p", alpha, beta)
        Binomial("ss", n=n, p=p)
        Binomial("s", n=n, p=p, observed=yes)

        map_est1 = starting.find_MAP()
        map_est2 = starting.find_MAP(vars=model.value_vars)

    close_to(map_est1["p"], 0.6086956533498806, tol)

    close_to(map_est2["p"], 0.695642178810167, tol)
    assert map_est2["ss"] == 14
示例#23
0
def test_find_MAP_discrete():
    tol = 2.0**-11
    alpha = 4
    beta = 4
    n = 20
    yes = 15

    with Model() as model:
        p = Beta('p', alpha, beta)
        ss = Binomial('ss', n=n, p=p)
        s = Binomial('s', n=n, p=p, observed=yes)

        map_est1 = starting.find_MAP()
        map_est2 = starting.find_MAP(vars=model.vars)

    close_to(map_est1['p'], 0.6086956533498806, tol)

    close_to(map_est2['p'], 0.695642178810167, tol)
    assert map_est2['ss'] == 14
 def __init__(self, simulatedSurvey, lowParallax, upParallax,
              minMeanAbsoluteMagnitude, maxMeanAbsoluteMagnitude, minTau,
              maxTau):
     """
 simulatedSurvey - a simulated survey object generated with one of the UniverseModels classes.
 lowParallax     - assumed lower limit on parallaxes (mas)
 upParallax      - assumed upper limit on parallaxes (mas)
 minMeanAbsoluteMagnitude - lower limit on prior distribution of mean absolute magnitude
 maxMeanAbsoluteMagnitude - upper limit on prior distribution of mean absolute magnitude
 minTau                   - lower limit on prior distribution of inverse variance
 maxTau                   - upper limit on prior distribution of inverse variance
 """
     self.simulatedSurvey = simulatedSurvey
     self.numberOfStarsInSurvey = self.simulatedSurvey.numberOfStarsInSurvey
     self.lowParallax = lowParallax
     self.upParallax = upParallax
     self.minMeanAbsoluteMagnitude = minMeanAbsoluteMagnitude
     self.maxMeanAbsoluteMagnitude = maxMeanAbsoluteMagnitude
     self.minTau = minTau
     self.maxTau = maxTau
     self.pyMCModel = Model(self._buildModel())
示例#25
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data - np.mean(data)) / np.std(data)

    with Model():
        mu = Uniform("mu", -1, 1)
        sigma = Uniform("sigma", 0.5, 1.5)
        Normal("y", mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP(progressbar=False)
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(progressbar=False, method="Powell")

    close_to(map_est1["mu"], 0, tol)
    close_to(map_est1["sigma"], 1, tol)

    close_to(map_est2["mu"], 0, tol)
    close_to(map_est2["sigma"], 1, tol)
示例#26
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data - np.mean(data)) / np.std(data)

    with Model() as model:
        mu = Uniform('mu', -1, 1)
        sigma = Uniform('sigma', .5, 1.5)
        y = Normal('y', mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP()
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(fmin=starting.optimize.fmin_powell)

    close_to(map_est1['mu'], 0, tol)
    close_to(map_est1['sigma'], 1, tol)

    close_to(map_est2['mu'], 0, tol)
    close_to(map_est2['sigma'], 1, tol)
示例#27
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(
            w=np.ones(ncomp) / ncomp, mu=test_mus, sd=1 / np.sqrt(test_taus), size=10
        )

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
            obs0 = NormalMixture(
                "obs", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape, observed=observed
            )

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, shape=nd, observed=observed)

        with Model() as model2:
            # Expected to fail if comp_shape is not provided,
            # nd is multidim and it does not broadcast with ncomp. If by chance
            # it does broadcast, an error is raised if the mixture is given
            # observed data.
            # Furthermore, the Mixture will also raise errors when the observed
            # data is multidimensional but it does not broadcast well with
            # comp_dists.
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            if len(nd) > 1:
                if nd[-1] != ncomp:
                    with pytest.raises(ValueError):
                        NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
                    mixture2 = None
                else:
                    mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            else:
                mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            observed_fails = False
            if len(nd) >= 1 and nd != (1,):
                try:
                    np.broadcast(np.empty(comp_shape), observed)
                except Exception:
                    observed_fails = True
            if observed_fails:
                with pytest.raises(ValueError):
                    NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
                obs2 = None
            else:
                obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)

        testpoint = model0.recompute_initial_point()
        testpoint["mus"] = test_mus
        testpoint["taus"] = test_taus
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
        assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
        if mixture2 is not None and obs2 is not None:
            assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
        if mixture2 is not None:
            assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
        if obs2 is not None:
            assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
示例#28
0
from pylab import *

# The mu and tau are in log units; to get to log units,
# do the following
# (has mean around 1e2, with a variance of 9 logs in base 10)
mean_b10 = 2
var_b10 = 9

print "Setting mean (base 10) to %f, variance (base 10) to %f" % (mean_b10, var_b10)

# The lognormal variable
k = Lognormal('k', mu=np.log(10 ** mean_b10),
                   tau=1./(np.log(10) * np.log(10 ** var_b10)))

# Sample it
m = MCMC(Model([k]))
m.sample(iter=50000)

ion()

# Plot the distribution in base e
figure()
y = log(m.trace('k')[:])
y10 = log10(m.trace('k')[:])
hist(y, bins=100)
print
print "Mean, base e: %f; Variance, base e: %f" % (mean(y), var(y))

# Plot the distribution in base 10
figure()
hist(y10, bins=100)
示例#29
0
    def test_mixture_of_mixture(self):
        if aesara.config.floatX == "float32":
            rtol = 1e-4
        else:
            rtol = 1e-7
        nbr = 4
        with Model() as model:
            # mixtures components
            g_comp = Normal.dist(
                mu=Exponential("mu_g", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
            )
            l_comp = LogNormal.dist(
                mu=Exponential("mu_l", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
            )
            # weight vector for the mixtures
            g_w = Dirichlet("g_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
            l_w = Dirichlet("l_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
            # mixture components
            g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
            l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
            # mixture of mixtures
            mix_w = Dirichlet("mix_w", a=floatX(np.ones(2)), transform=None, shape=(2,))
            mix = Mixture("mix", w=mix_w, comp_dists=[g_mix, l_mix], observed=np.exp(self.norm_x))

        test_point = model.recompute_initial_point()

        def mixmixlogp(value, point):
            floatX = aesara.config.floatX
            priorlogp = (
                st.dirichlet.logpdf(
                    x=point["g_w"],
                    alpha=np.ones(nbr) * 0.0000001,
                ).astype(floatX)
                + st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX)
                + st.dirichlet.logpdf(
                    x=point["l_w"],
                    alpha=np.ones(nbr) * 0.0000001,
                ).astype(floatX)
                + st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX)
                + st.dirichlet.logpdf(
                    x=point["mix_w"],
                    alpha=np.ones(2),
                ).astype(floatX)
            )
            complogp1 = st.norm.logpdf(x=value, loc=point["mu_g"]).astype(floatX)
            mixlogp1 = logsumexp(
                np.log(point["g_w"]).astype(floatX) + complogp1, axis=-1, keepdims=True
            )
            complogp2 = st.lognorm.logpdf(value, 1.0, 0.0, np.exp(point["mu_l"])).astype(floatX)
            mixlogp2 = logsumexp(
                np.log(point["l_w"]).astype(floatX) + complogp2, axis=-1, keepdims=True
            )
            complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
            mixmixlogpg = logsumexp(
                np.log(point["mix_w"]).astype(floatX) + complogp_mix, axis=-1, keepdims=False
            )
            return priorlogp, mixmixlogpg

        value = np.exp(self.norm_x)[:, None]
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)

        # check logp of mixture
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)

        # check model logp
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)

        # check input and check logp again
        test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
        test_point["mu_g"] = np.exp(np.random.randn(nbr))
        priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
        assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
        assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
示例#30
0
converted_data = [(float(x[0]), float(x[1]), float(x[2])) for x in data]
xs = [x[1] for x in converted_data]
ys = [x[2] for x in converted_data]
    

b0 = Normal("b0", 0, 0.0003)
b1 = Normal("b1", 0, 0.0003)

err = Uniform("err", 0, 500)

x_weight = Normal("weight", 0, 1, value=xs, observed=True)

@deterministic(plot=False)
def pred(b0=b0, b1=b1, x=x_weight):
    return b0 + b1*x

y = Normal("y", mu=pred, tau=err, value=ys, observed=True)

model = Model([pred, b0, b1, y, err, x_weight])

m = MCMC(model)
m.sample(burn=2000, iter=10000)

bb0 = sum(m.trace('b0')[:])/len(m.trace('b0')[:])
bb1 = sum(m.trace('b1')[:])/len(m.trace('b1')[:])
err = sum(m.trace('err')[:])/len(m.trace('err')[:])

plot(xs, ys)
plot([min(xs), max(xs)], [bb0 + bb1*min(xs), bb0 + bb1*max(xs)])
show()