Exemple #1
0
    def test_potentials_warning(self):
        warning_msg = "The effect of Potentials on other parameters is ignored during"
        with pm.Model() as m:
            a = pm.Normal("a", 0, 1)
            p = pm.Potential("p", a + 1)
            obs = pm.Normal("obs", a, 1, observed=5)

        trace = az.from_dict({"a": np.random.rand(10)})
        with pytest.warns(UserWarning, match=warning_msg):
            pm.sample_posterior_predictive_w(samples=5, traces=[trace, trace], models=[m, m])
Exemple #2
0
    def test_sample_posterior_predictive_w(self):
        data0 = np.random.normal(0, 1, size=50)
        warning_msg = "The number of samples is too small to check convergence reliably"

        with pm.Model() as model_0:
            mu = pm.Normal("mu", mu=0, sigma=1)
            y = pm.Normal("y", mu=mu, sigma=1, observed=data0)
            with pytest.warns(UserWarning, match=warning_msg):
                trace_0 = pm.sample(10, tune=0, chains=2, return_inferencedata=False)
            idata_0 = az.from_pymc3(trace_0)

        with pm.Model() as model_1:
            mu = pm.Normal("mu", mu=0, sigma=1, shape=len(data0))
            y = pm.Normal("y", mu=mu, sigma=1, observed=data0)
            with pytest.warns(UserWarning, match=warning_msg):
                trace_1 = pm.sample(10, tune=0, chains=2, return_inferencedata=False)
            idata_1 = az.from_pymc3(trace_1)

        with pm.Model() as model_2:
            # Model with no observed RVs.
            mu = pm.Normal("mu", mu=0, sigma=1)
            with pytest.warns(UserWarning, match=warning_msg):
                trace_2 = pm.sample(10, tune=0, return_inferencedata=False)

        traces = [trace_0, trace_1]
        idatas = [idata_0, idata_1]
        models = [model_0, model_1]

        ppc = pm.sample_posterior_predictive_w(traces, 100, models)
        assert ppc["y"].shape == (100, 50)

        ppc = pm.sample_posterior_predictive_w(idatas, 100, models)
        assert ppc["y"].shape == (100, 50)

        with model_0:
            ppc = pm.sample_posterior_predictive_w([idata_0.posterior], None)
            assert ppc["y"].shape == (20, 50)

        with pytest.raises(ValueError, match="The number of traces and weights should be the same"):
            pm.sample_posterior_predictive_w([idata_0.posterior], 100, models, weights=[0.5, 0.5])

        with pytest.raises(ValueError, match="The number of models and weights should be the same"):
            pm.sample_posterior_predictive_w([idata_0.posterior], 100, models)

        with pytest.raises(
            ValueError, match="The number of observed RVs should be the same for all models"
        ):
            pm.sample_posterior_predictive_w([trace_0, trace_2], 100, [model_0, model_2])
Exemple #3
0
    def test_sample_posterior_predictive_w(self):
        data0 = np.random.normal(0, 1, size=500)

        with pm.Model() as model_0:
            mu = pm.Normal("mu", mu=0, sigma=1)
            y = pm.Normal("y", mu=mu, sigma=1, observed=data0)
            trace_0 = pm.sample()

        with pm.Model() as model_1:
            mu = pm.Normal("mu", mu=0, sigma=1, shape=len(data0))
            y = pm.Normal("y", mu=mu, sigma=1, observed=data0)
            trace_1 = pm.sample()

        traces = [trace_0, trace_0]
        models = [model_0, model_0]
        ppc = pm.sample_posterior_predictive_w(traces, 100, models)
        assert ppc["y"].shape == (100, 500)

        traces = [trace_0, trace_1]
        models = [model_0, model_1]
        ppc = pm.sample_posterior_predictive_w(traces, 100, models)
        assert ppc["y"].shape == (100, 500)
    def test_sample_posterior_predictive_w(self):
        data0 = np.random.normal(0, 1, size=500)

        with pm.Model() as model_0:
            mu = pm.Normal("mu", mu=0, sigma=1)
            y = pm.Normal("y", mu=mu, sigma=1, observed=data0)
            trace_0 = pm.sample()

        with pm.Model() as model_1:
            mu = pm.Normal("mu", mu=0, sigma=1, shape=len(data0))
            y = pm.Normal("y", mu=mu, sigma=1, observed=data0)
            trace_1 = pm.sample()

        traces = [trace_0, trace_0]
        models = [model_0, model_0]
        ppc = pm.sample_posterior_predictive_w(traces, 100, models)
        assert ppc["y"].shape == (100, 500)

        traces = [trace_0, trace_1]
        models = [model_0, model_1]
        ppc = pm.sample_posterior_predictive_w(traces, 100, models)
        assert ppc["y"].shape == (100, 500)
Exemple #5
0
},
                    method='BB-pseudo-BMA')
cmp_df

# In[12]:

az.plot_compare(cmp_df)
plt.savefig('B11197_05_08.png', dpi=300)

# ## Model Averaging

# In[13]:

w = 0.5
y_lp = pm.sample_posterior_predictive_w([trace_l, trace_p],
                                        samples=1000,
                                        models=[model_l, model_p],
                                        weights=[w, 1 - w])

# In[14]:

_, ax = plt.subplots(figsize=(10, 6))
az.plot_kde(y_l, plot_kwargs={'color': 'C1'}, label='linear model', ax=ax)
az.plot_kde(y_p, plot_kwargs={'color': 'C2'}, label='order 2 model', ax=ax)
az.plot_kde(y_lp['y_pred'],
            plot_kwargs={'color': 'C3'},
            label='weighted model',
            ax=ax)

plt.plot(y_1s, np.zeros_like(y_1s), '|', label='observed data')
plt.yticks([])
plt.legend()
Exemple #6
0
# %%
az.plot_forest(traces, figsize=(10, 5))

# %%
kcal_per_g = np.repeat(0, 30)
neocortex = np.linspace(0.5, 0.8, 30)
mass = np.repeat(4.5, 30)

# %%
mass_shared.set_value(np.log(mass))
neocortex_shared.set_value(neocortex)
post_pred = pm.sample_posterior_predictive(trace_m6_14, samples=10000, model=m6_14)

# %%
milk_ensemble = pm.sample_posterior_predictive_w(
    traces, 10000, models, weights=compare_df.weight.sort_index(ascending=True)
)

# %%
plt.figure(figsize=(8, 6))

plt.plot(neocortex, post_pred["kcal"].mean(0), ls="--", color="k")
az.plot_hpd(
    neocortex,
    post_pred["kcal"],
    fill_kwargs={"alpha": 0},
    plot_kwargs={"alpha": 1, "color": "k", "ls": "--"},
)

plt.plot(neocortex, milk_ensemble["kcal"].mean(0), color="C1")
az.plot_hpd(neocortex, milk_ensemble["kcal"])
# Model averaging
model_dict = dict(zip([model_0, model_1, model_2], traces))
comp = pm.compare(model_dict, method='stacking')
# comp = pm.compare(model_dict, method='BB-pseudo-BMA')
# comp = pm.compare(model_dict, method='pseudo-BMA')
print(comp)
# az.compare({'model_0':trace_0, 'model_1':trace_1, 'model_2':trace_2}, method='BB-pseudo-BMA')

# Now we are going to use the previously copmuted weights to generate predictions based not
# on a single model but on the weighted set of models. This is one way to perform
# model averaging. Using PyMC3 we can call the sample_posterior_predictive_w
# function as follows:
ppc_w = pm.sample_posterior_predictive_w(
    traces,
    1000, [model_0, model_1, model_2],
    weights=comp.weight.sort_index(ascending=True),
    progressbar=False)

# We are also going to compute PPCs for the lowest-WAIC model
ppc_2 = pm.sample_posterior_predictive(trace_2,
                                       1000,
                                       model_2,
                                       progressbar=False)

# A simple way to compare both kind of predictions is to plot their mean and hpd interval
mean_w = ppc_w['kcal'].mean()
hpd_w = pm.hpd(ppc_w['kcal']).mean(0)
mean = ppc_2['kcal'].mean()
hpd = pm.hpd(ppc_2['kcal']).mean(0)