Ejemplo n.º 1
0
    def test_simulator_metropolis_mcmc(self):
        with self.SMABC_test as m:
            step = pm.Metropolis([m.rvs_to_values[m["a"]], m.rvs_to_values[m["b"]]])
            trace = pm.sample(step=step, return_inferencedata=False)

        assert abs(self.data.mean() - trace["a"].mean()) < 0.05
        assert abs(self.data.std() - trace["b"].mean()) < 0.05
Ejemplo n.º 2
0
    def test_issue_5043_autoconvert_coord_values(self):
        coords = {"city": pd.Series(["Bonn", "Berlin"])}
        with pm.Model(coords=coords) as pmodel:
            # The model tracks coord values as (immutable) tuples
            assert isinstance(pmodel.coords["city"], tuple)
            pm.Normal("x", dims="city")
            mtrace = pm.sample(
                return_inferencedata=False,
                compute_convergence_checks=False,
                step=pm.Metropolis(),
                cores=1,
                tune=7,
                draws=15,
            )
            # The converter must convert coord values them to numpy arrays
            # because tuples as coordinate values causes problems with xarray.
            converter = InferenceDataConverter(trace=mtrace)
            assert isinstance(converter.coords["city"], np.ndarray)
            converter.to_inference_data()

            # We're not automatically converting things other than tuple,
            # so advanced use cases remain supported at the InferenceData level.
            # They just can't be used in the model construction already.
            converter = InferenceDataConverter(
                trace=mtrace,
                coords={
                    "city":
                    pd.MultiIndex.from_tuples([("Bonn", 53111),
                                               ("Berlin", 10178)],
                                              names=["name", "zipcode"])
                },
            )
            assert isinstance(converter.coords["city"], pd.MultiIndex)
Ejemplo n.º 3
0
 def test_save_warmup(self, save_warmup, chains, tune, draws):
     with pm.Model():
         pm.Uniform("u1")
         pm.Normal("n1")
         idata = pm.sample(
             tune=tune,
             draws=draws,
             chains=chains,
             cores=1,
             step=pm.Metropolis(),
             discard_tuned_samples=False,
             return_inferencedata=True,
             idata_kwargs={"save_warmup": save_warmup},
         )
     warmup_prefix = "" if save_warmup and (tune > 0) else "~"
     post_prefix = "" if draws > 0 else "~"
     test_dict = {
         f"{post_prefix}posterior": ["u1", "n1"],
         f"{post_prefix}sample_stats": ["~tune", "accept"],
         f"{warmup_prefix}warmup_posterior": ["u1", "n1"],
         f"{warmup_prefix}warmup_sample_stats": ["~tune"],
         "~warmup_log_likelihood": [],
         "~log_likelihood": [],
     }
     fails = check_multiple_attrs(test_dict, idata)
     assert not fails
     if hasattr(idata, "posterior"):
         assert idata.posterior.dims["chain"] == chains
         assert idata.posterior.dims["draw"] == draws
     if hasattr(idata, "warmup_posterior"):
         assert idata.warmup_posterior.dims["chain"] == chains
         assert idata.warmup_posterior.dims["draw"] == tune
def MCMC(model):
    import time
    with model:
        n = 6000
        START = time.time()
        try:
            start = pm.find_MAP()
        except AssertionError:
            return model, {'error':'AssertionError in pm.find_MAP()'}
        init_time = time.time()-START
        print 'Time to initialize: %ds' % (init_time)

        START = time.time()
        trace = pm.sample(n,pm.Metropolis(),start)
        duration = time.time()-START
        print 'Time to sample (MH): %ds' % (duration)

        # START = time.time()
        # trace = pm.sample(n,pm.Slice(),start)
        # print 'Time to sample (Slice): %ds' % (time.time()-START)

        # START = time.time()
        # trace = pm.sample(n,pm.HamiltonianMC(),start)
        # print 'Time to sample (HMC): %ds' % (time.time()-START)

        # error_b, error_x, output = error(trace,model.data.A,model.data.x_true,
        #                          model.data.b_obs,model.data.scaling)

        # fig = pm.traceplot(trace)
        # plot(error_b,error_x)
        # plt.show()
    return model, trace, init_time, duration
Ejemplo n.º 5
0
    def test_return_inferencedata(self):
        with self.model:
            kwargs = dict(draws=100, tune=50, cores=1, chains=2, step=pm.Metropolis())

            # trace with tuning
            with pytest.warns(UserWarning, match="will be included"):
                result = pm.sample(
                    **kwargs, return_inferencedata=False, discard_tuned_samples=False
                )
            assert isinstance(result, pm.backends.base.MultiTrace)
            assert len(result) == 150

            # inferencedata with tuning
            result = pm.sample(**kwargs, return_inferencedata=True, discard_tuned_samples=False)
            assert isinstance(result, InferenceData)
            assert result.posterior.sizes["draw"] == 100
            assert result.posterior.sizes["chain"] == 2
            assert len(result._groups_warmup) > 0

            # inferencedata without tuning, with idata_kwargs
            prior = pm.sample_prior_predictive(return_inferencedata=False)
            result = pm.sample(
                **kwargs,
                return_inferencedata=True,
                discard_tuned_samples=True,
                idata_kwargs={"prior": prior},
                random_seed=-1,
            )
            assert "prior" in result
            assert isinstance(result, InferenceData)
            assert result.posterior.sizes["draw"] == 100
            assert result.posterior.sizes["chain"] == 2
            assert len(result._groups_warmup) == 0
Ejemplo n.º 6
0
def test_empirical_from_trace(another_simple_model):
    with another_simple_model:
        step = pm.Metropolis()
        trace = pm.sample(100, step=step, chains=1, tune=0, return_inferencedata=False)
        emp = Empirical(trace)
        assert emp.histogram.shape[0].eval() == 100
        trace = pm.sample(100, step=step, chains=4, tune=0, return_inferencedata=False)
        emp = Empirical(trace)
        assert emp.histogram.shape[0].eval() == 400
Ejemplo n.º 7
0
def test_spawn_densitydist_function():
    with pm.Model() as model:
        mu = pm.Normal("mu", 0, 1)

        def func(x):
            return -2 * (x ** 2).sum()

        obs = pm.DensityDist("density_dist", logp=func, observed=np.random.randn(100))
        pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn")
Ejemplo n.º 8
0
    def test_metropolis_sampling(self):
        """Check if the Metropolis sampler can handle broadcasting."""
        with pm.Model() as test_model:
            test1 = pm.Normal("test1", mu=0.0, sigma=1.0, size=(1, 10))
            test2 = pm.Normal("test2", mu=test1, sigma=1.0, size=(10, 10))

            step = pm.Metropolis()
            # TODO FIXME: Assert whatever it is we're testing
            pm.sample(tune=5, draws=7, cores=1, step=step, compute_convergence_checks=False)
Ejemplo n.º 9
0
def test_sample_find_MAP_does_not_modify_start():
    # see https://github.com/pymc-devs/pymc/pull/4458
    with pm.Model():
        pm.LogNormal("untransformed")

        # make sure find_Map does not modify the start dict
        start = {"untransformed": 2}
        pm.find_MAP(start=start)
        assert start == {"untransformed": 2}

        # make sure sample does not modify the start dict
        start = {"untransformed": 0.2}
        pm.sample(draws=10, step=pm.Metropolis(), tune=5, start=start, chains=3)
        assert start == {"untransformed": 0.2}

        # make sure sample does not modify the start when passes as list of dict
        start = [{"untransformed": 2}, {"untransformed": 0.2}]
        pm.sample(draws=10, step=pm.Metropolis(), tune=5, start=start, chains=2)
        assert start == [{"untransformed": 2}, {"untransformed": 0.2}]
Ejemplo n.º 10
0
 def test_conversion_from_variables_subset(self):
     """This is a regression test for issue #5337."""
     with pm.Model() as model:
         x = pm.Normal("x")
         pm.Normal("y", x, observed=5)
         idata = pm.sample(
             tune=10, draws=20, chains=1, step=pm.Metropolis(), compute_convergence_checks=False
         )
         pm.sample_posterior_predictive(idata, var_names=["x"])
         pm.sample_prior_predictive(var_names=["x"])
Ejemplo n.º 11
0
def test_remote_pipe_closed():
    master_pid = os.getpid()
    with pm.Model():
        x = pm.Normal("x", shape=2, mu=0.1)
        at_pid = at.as_tensor_variable(np.array(master_pid, dtype="int32"))
        pm.Normal("y", mu=_crash_remote_process(x, at_pid), shape=2)

        step = pm.Metropolis()
        with pytest.raises(RuntimeError, match="Chain [0-9] failed"):
            pm.sample(step=step, mp_ctx="spawn", tune=2, draws=2, cores=2, chains=2)
Ejemplo n.º 12
0
def test_empirical_does_not_support_inference_data(another_simple_model):
    with another_simple_model:
        step = pm.Metropolis()
        trace = pm.sample(100,
                          step=step,
                          chains=1,
                          tune=0,
                          return_inferencedata=True)
        with pytest.raises(NotImplementedError,
                           match="return_inferencedata=False"):
            Empirical(trace)
Ejemplo n.º 13
0
 def test_sampler_stat_tune(self, cores):
     with self.model:
         tune_stat = pm.sample(
             tune=5,
             draws=7,
             cores=cores,
             discard_tuned_samples=False,
             return_inferencedata=False,
             step=pm.Metropolis(),
         ).get_sampler_stats("tune", chains=1)
         assert list(tune_stat).count(True) == 5
         assert list(tune_stat).count(False) == 7
Ejemplo n.º 14
0
def test_spawn_densitydist_bound_method():
    N = 100
    with pm.Model() as model:
        mu = pm.Normal("mu", 0, 1)
        normal_dist = pm.Normal.dist(mu, 1, size=N)

        def logp(x):
            out = pm.logp(normal_dist, x)
            return out

        obs = pm.DensityDist("density_dist", logp=logp, observed=np.random.randn(N), size=N)
        pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn")
Ejemplo n.º 15
0
    def test_autodetect_coords_from_model(self, use_context):
        pd = pytest.importorskip("pandas")
        df_data = pd.DataFrame(columns=["date"]).set_index("date")
        dates = pd.date_range(start="2020-05-01", end="2020-05-20")
        for city, mu in {"Berlin": 15, "San Marino": 18, "Paris": 16}.items():
            df_data[city] = np.random.normal(loc=mu, size=len(dates))
        df_data.index = dates
        df_data.index.name = "date"

        coords = {"date": df_data.index, "city": df_data.columns}
        with pm.Model(coords=coords) as model:
            europe_mean = pm.Normal("europe_mean_temp", mu=15.0, sigma=3.0)
            city_offset = pm.Normal("city_offset",
                                    mu=0.0,
                                    sigma=3.0,
                                    dims="city")
            city_temperature = pm.Deterministic("city_temperature",
                                                europe_mean + city_offset,
                                                dims="city")

            data_dims = ("date", "city")
            data = pm.ConstantData("data", df_data, dims=data_dims)
            _ = pm.Normal("likelihood",
                          mu=city_temperature,
                          sigma=0.5,
                          observed=data,
                          dims=data_dims)

            trace = pm.sample(
                return_inferencedata=False,
                compute_convergence_checks=False,
                cores=1,
                chains=1,
                tune=20,
                draws=30,
                step=pm.Metropolis(),
            )
            if use_context:
                idata = to_inference_data(trace=trace)
        if not use_context:
            idata = to_inference_data(trace=trace, model=model)

        assert "city" in list(idata.posterior.dims)
        assert "city" in list(idata.observed_data.dims)
        assert "date" in list(idata.observed_data.dims)

        np.testing.assert_array_equal(idata.posterior.coords["city"],
                                      coords["city"])
        np.testing.assert_array_equal(idata.observed_data.coords["date"],
                                      coords["date"])
        np.testing.assert_array_equal(idata.observed_data.coords["city"],
                                      coords["city"])
Ejemplo n.º 16
0
def test_iterator():
    with pm.Model() as model:
        a = pm.Normal("a", shape=1)
        b = pm.HalfNormal("b")
        step1 = pm.NUTS([model.rvs_to_values[a]])
        step2 = pm.Metropolis([model.rvs_to_values[b]])

    step = pm.CompoundStep([step1, step2])

    start = {"a": floatX(np.array([1.0])), "b_log__": floatX(np.array(2.0))}
    sampler = ps.ParallelSampler(10, 10, 3, 2, [2, 3, 4], [start] * 3, step, 0, False)
    with sampler:
        for draw in sampler:
            pass
Ejemplo n.º 17
0
    def test_save_warmup_issue_1208_after_3_9(self):
        with pm.Model():
            pm.Uniform("u1")
            pm.Normal("n1")
            trace = pm.sample(
                tune=100,
                draws=200,
                chains=2,
                cores=1,
                step=pm.Metropolis(),
                discard_tuned_samples=False,
                return_inferencedata=False,
            )
            assert isinstance(trace, pm.backends.base.MultiTrace)
            assert len(trace) == 300

            # from original trace, warmup draws should be separated out
            idata = to_inference_data(trace, save_warmup=True)
            test_dict = {
                "posterior": ["u1", "n1"],
                "sample_stats": ["~tune", "accept"],
                "warmup_posterior": ["u1", "n1"],
                "warmup_sample_stats": ["~tune", "accept"],
            }
            fails = check_multiple_attrs(test_dict, idata)
            assert not fails
            assert idata.posterior.dims["chain"] == 2
            assert idata.posterior.dims["draw"] == 200

            # manually sliced trace triggers the same warning as <=3.8
            with pytest.warns(UserWarning, match="Warmup samples"):
                idata = to_inference_data(trace[-30:], save_warmup=True)
            test_dict = {
                "posterior": ["u1", "n1"],
                "sample_stats": ["~tune", "accept"],
                "~warmup_posterior": [],
                "~warmup_sample_stats": [],
            }
            fails = check_multiple_attrs(test_dict, idata)
            assert not fails
            assert idata.posterior.dims["chain"] == 2
            assert idata.posterior.dims["draw"] == 30
Ejemplo n.º 18
0
def test_abort(mp_start_method):
    with pm.Model() as model:
        a = pm.Normal("a", shape=1)
        b = pm.HalfNormal("b")
        step1 = pm.NUTS([model.rvs_to_values[a]])
        step2 = pm.Metropolis([model.rvs_to_values[b]])

    step = pm.CompoundStep([step1, step2])

    # on Windows we cannot fork
    if platform.system() == "Windows" and mp_start_method == "fork":
        return
    if mp_start_method == "spawn":
        step_method_pickled = cloudpickle.dumps(step, protocol=-1)
    else:
        step_method_pickled = None

    for abort in [False, True]:
        ctx = multiprocessing.get_context(mp_start_method)
        proc = ps.ProcessAdapter(
            10,
            10,
            step,
            chain=3,
            seed=1,
            mp_ctx=ctx,
            start={
                "a": floatX(np.array([1.0])),
                "b_log__": floatX(np.array(2.0))
            },
            step_method_pickled=step_method_pickled,
        )
        proc.start()
        while True:
            proc.write_next()
            out = ps.ProcessAdapter.recv_draw([proc])
            if out[1]:
                break
        if abort:
            proc.abort()
        proc.join()
    muB = pm.Normal('muB', 0, .100)
    tauB = pm.Gamma('tauB', .01, .01)
    udfB = pm.Uniform('udfB', 0, 1)
    tdfB = 1 + tdfBgain * (-pm.log(1 - udfB))
    # define the priors
    tau = pm.Gamma('tau', 0.01, 0.01)
    beta0 = pm.Normal('beta0', mu=0, tau=1.0E-12)
    beta1 = pm.T('beta1', mu=muB, lam=tauB, nu=tdfB, shape=n_predictors)
    mu = beta0 + pm.dot(beta1, x.values.T)
    # define the likelihood
    #mu = beta0 + beta1[0] * x.values[:,0] + beta1[1] * x.values[:,1]
    yl = pm.Normal('yl', mu=mu, tau=tau, observed=y)
    # Generate a MCMC chain
    start = pm.find_MAP()
    step1 = pm.NUTS([beta1])
    step2 = pm.Metropolis([beta0, tau, muB, tauB, udfB])
    trace = pm.sample(10000, [step1, step2], start, progressbar=False)

# EXAMINE THE RESULTS
burnin = 2000
thin = 1

# Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

# Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[mu, tau])
#pm.autocorrplot(trace, vars =[beta0])

## Plot KDE and sampled values for each parameter.
Ejemplo n.º 20
0
y_sd = np.std(y)
zx = (x - x_m) / x_sd
zy = (y - y_m) / y_sd

# THE MODEL
with pm.Model() as model:
    # define the priors
    beta0 = pm.Normal('beta0', mu=0, tau=1.0E-12)
    beta1 = pm.Normal('beta1', mu=0, tau=1.0E-12)
    tau = pm.Gamma('tau', 0.001, 0.001)
    # define the likelihood
    mu = beta0 + beta1 * zx
    yl = pm.Normal('yl', mu=mu, tau=tau, observed=zy)
    # Generate a MCMC chain
    start = pm.find_MAP()
    step = [pm.Metropolis([rv]) for rv in model.unobserved_RVs]
    trace = pm.sample(10000, step, start, progressbar=False)

# EXAMINE THE RESULTS
burnin = 5000
thin = 10

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[tau])
#pm.autocorrplot(trace, vars =[tau])

## Plot KDE and sampled values for each parameter.
Ejemplo n.º 21
0
# THE MODEL.
with pm.Model() as model:
    # Hyperprior on model index:
    model_index = pm.DiscreteUniform('model_index', lower=0, upper=1)
    # Prior
    nu = pm.Normal('nu', mu=0, tau=0.1)  # it is posible to use tau or sd
    eta = pm.Gamma('eta', .1, .1)
    theta0 = 1 / (1 + pm.exp(-nu))  # theta from model index 0
    theta1 = pm.exp(-eta)  # theta from model index 1
    theta = pm.switch(pm.eq(model_index, 0), theta0, theta1)
    # Likelihood
    y = pm.Bernoulli('y', p=theta, observed=y)
    # Sampling
    start = pm.find_MAP()
    steps = [pm.Metropolis([i]) for i in model.unobserved_RVs[1:]]
    steps.append(pm.ElemwiseCategoricalStep(var=model_index, values=[0, 1]))
    trace = pm.sample(10000, steps, start=start, progressbar=False)

# EXAMINE THE RESULTS.
burnin = 1000
thin = 5

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[nu, eta])
#pm.autocorrplot(trace, vars =[nu, eta])


# THE MODEL
with pm.Model() as model:
    # define the priors
    beta0 = pm.Normal('beta0', mu=0, tau=1.0E-12)
    beta1 = pm.Normal('beta1', mu= 0, tau=1.0E-12, shape=n_predictors)
    tau = pm.Gamma('tau', 0.01, 0.01)
    mu = beta0 + pm.dot(beta1, x.values.T)
    # define the likelihood
    yl = pm.Normal('yl', mu=mu, tau=tau, observed=y)
    # Generate a MCMC chain
    start = pm.find_MAP()
    step1 = pm.NUTS([beta1])
    step2 = pm.Metropolis([beta0, tau])
    trace = pm.sample(10000, [step1, step2], start, progressbar=False)

# EXAMINE THE RESULTS
burnin = 5000
thin = 1

# Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

# Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[mu, tau])
#pm.autocorrplot(trace, vars =[beta0])

## Plot KDE and sampled values for each parameter.
# THE MODEL.
with pm.Model() as model:
    # Hyperprior on model index:
    model_index = pm.DiscreteUniform('model_index', lower=0, upper=1)
    # Prior
    nu = pm.Normal('nu', mu=0, tau=0.1)  # it is posible to use tau or sd
    eta = pm.Gamma('eta', .1, .1)
    theta0 = 1 / (1 + pm.exp(-nu))  # theta from model index 0
    theta1 = pm.exp(-eta)  # theta from model index 1
    theta = pm.switch(pm.eq(model_index, 0), theta0, theta1)
    # Likelihood
    y = pm.Bernoulli('y', p=theta, observed=y)
    # Sampling
    start = pm.find_MAP()
    step1 = pm.Metropolis(model.vars[1:])
    step2 = pm.ElemwiseCategoricalStep(var=model_index, values=[0, 1])
    trace = pm.sample(10000, [step1, step2], start=start, progressbar=False)

# EXAMINE THE RESULTS.
burnin = 1000
thin = 5

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[nu, eta])
#pm.autocorrplot(trace, vars =[nu, eta])
Ejemplo n.º 24
0
y = np.repeat([0, 1], [3, 6])  # 3 tails 6 heads

with pm.Model() as model:
    # Hyperhyperprior:
    model_index = pm.DiscreteUniform('model_index', lower=0, upper=1)
    # Hyperprior:
    kappa_theta = 12
    mu_theta = pm.switch(pm.eq(model_index, 1), 0.25, 0.75)
    # Prior distribution:
    a_theta = mu_theta * kappa_theta
    b_theta = (1 - mu_theta) * kappa_theta
    theta = pm.Beta('theta', a_theta, b_theta) # theta distributed as beta density
    #likelihood
    y = pm.Bernoulli('y', theta, observed=y)
    start = pm.find_MAP()
    step1 = pm.Metropolis([model_index])
    step2 = pm.Metropolis([theta])
    trace = pm.sample(10000, [step1, step2], start=start, progressbar=False)


## Check the results.
burnin = 2000  # posterior samples to discard
thin = 1  # posterior samples to discard

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
#pm.autocorrplot(trace)
Ejemplo n.º 25
0
import numpy as np
import pymc as pm
from plot_post import *

# Generate the data
y = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
              0])  # 11 heads and 3 tails

with pm.Model() as model:
    # define the prior
    theta = pm.Beta('theta', 1, 1)  # prior
    # define the likelihood
    y = pm.Bernoulli('y', p=theta, observed=y)

    # Generate a MCMC chain
    trace = pm.sample(5000, pm.Metropolis(),
                      progressbar=False)  # Use Metropolis sampling
#    start = pm.find_MAP()  # Find starting value by optimization
#    step = pm.NUTS()  # Instantiate NUTS sampler
#    trace = pm.sample(5000, step, start=start, progressbar=False)

# create an array with the posterior sample
theta_sample = trace['theta']

print theta_sample

plt.subplot(1, 2, 1)
plt.plot(theta_sample[:500], np.arange(500), marker='o')
plt.xlim(0, 1)
plt.xlabel(r'$\theta$')
plt.ylabel('Position in Chain')
Ejemplo n.º 26
0
    d = pm.Gamma('d', 1, 1)
    sG = m**2 / d**2
    rG = m / d**2
    # define the priors
    sigma = pm.Uniform('sigma', 0,
                       10)  # y values are assumed to be standardized
    tau = pm.Gamma('tau', sG, rG)
    a0 = pm.Normal('a0', mu=0,
                   tau=0.001)  # y values are assumed to be standardized
    a = pm.Normal('a', mu=0, tau=atau, shape=NxLvl)
    mu = a0 + a
    # define the likelihood
    yl = pm.Normal('yl', mu[x], tau=tau, observed=z)
    # Generate a MCMC chain
    start = pm.find_MAP()
    steps = pm.Metropolis()
    trace = pm.sample(20000, steps, start, progressbar=False)

# EXAMINE THE RESULTS
burnin = 2000
thin = 50

# Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

# Check for mixing and autocorrelation
pm.autocorrplot(trace[burnin::thin], vars=model.unobserved_RVs[:-1])

## Plot KDE and sampled values for each parameter.
#pm.traceplot(trace[burnin::thin])
    55, 40, 46, 56, 47, 54, 54, 42, 34, 35, 41, 48, 46, 39, 55, 30, 49, 27, 51,
    41, 36, 45, 41, 53, 32, 43, 33
])
condition = np.repeat([0, 1, 2, 3], nSubj)

# Specify the model in PyMC
with pm.Model() as model:
    kappa = pm.Gamma('kappa', 1, 0.1, shape=ncond)
    mu = pm.Beta('mu', 1, 1, shape=ncond)
    theta = pm.Beta('theta',
                    mu[condition] * kappa[condition],
                    (1 - mu[condition]) * kappa[condition],
                    shape=len(z))
    y = pm.Binomial('y', p=theta, n=N, observed=z)
    start = pm.find_MAP()
    step1 = pm.Metropolis([mu])
    step2 = pm.Metropolis([theta])
    step3 = pm.NUTS([kappa])
    #    samplers = [pm.Metropolis([rv]) for rv in model.unobserved_RVs]
    trace = pm.sample(10000, [step1, step2, step3],
                      start=start,
                      progressbar=False)

## Check the results.
burnin = 5000  # posterior samples to discard
thin = 10  # posterior samples to discard

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)
y_sd = np.std(y)
zx = (x - x_m) / x_sd
zy = (y - y_m) / y_sd

# THE MODEL
with pm.Model() as model:
    # define the priors
    tau = pm.Gamma('tau', 0.001, 0.001)
    beta0 = pm.Normal('beta0', mu=0, tau=1.0E-12)
    beta1 = pm.Normal('beta1', mu=0, tau=1.0E-12)
    mu = beta0 + beta1 * zx
    # define the likelihood
    yl = pm.Normal('yl', mu=mu, tau=tau, observed=zy)
    # Generate a MCMC chain
    start = pm.find_MAP()
    step = pm.Metropolis()
    trace = pm.sample(10000, step, start, progressbar=False)

# EXAMINE THE RESULTS
burnin = 5000
thin = 10

## Print summary for each trace
#pm.summary(trace[burnin::thin])
#pm.summary(trace)

## Check for mixing and autocorrelation
#pm.autocorrplot(trace[burnin::thin], vars =[tau])
#pm.autocorrplot(trace, vars =[tau])

## Plot KDE and sampled values for each parameter.
Ejemplo n.º 29
0
 def make_step(cls):
     args = {}
     if hasattr(cls, "step_args"):
         args.update(cls.step_args)
     return pm.Metropolis(**args)