Exemple #1
0
import pymc3 as pm
import arviz as az

# speed of light data
# Data from http://www.stat.columbia.edu/~gelman/book/data/light.asc
data = np.array([
    28, 26, 33, 24, 34, -44, 27, 16, 40, -2, 29, 22, 24, 21, 25, 30, 23, 29,
    31, 19, 24, 20, 36, 32, 36, 28, 25, 21, 28, 29, 37, 25, 28, 26, 30, 32, 36,
    26, 30, 22, 36, 23, 27, 27, 28, 27, 31, 27, 26, 33, 26, 32, 32, 24, 39, 28,
    24, 25, 32, 25, 29, 27, 28, 29, 16, 23
])

np.random.seed(42)
n = 100
mu = 2
sigma = 1
data = sigma * np.random.randn(n) + mu

with pm.Model() as model_g:
    μ = pm.Uniform('μ', lower=-10, upper=10)
    σ = pm.HalfNormal('σ', sd=10)
    y = pm.Normal('y', mu=μ, sd=σ, observed=data)
    trace_g = pm.sample(1000)

axes = az.plot_joint(trace_g, kind='kde', fill_last=False)
ax = axes[0]
mu = np.mean(data)
sigma = np.std(data)
ax.plot(mu, sigma, 'r*', markersize=12)
pml.savefig('kde-gauss-2d.pdf')
Exemple #2
0
"""
Joint Plot
==========

_thumb: .5, .8
"""
import matplotlib.pyplot as plt
import arviz as az

az.style.use("arviz-darkgrid")

data = az.load_arviz_data("non_centered_eight")

az.plot_joint(
    data,
    var_names=["theta"],
    coords={"school": ["Choate", "Phillips Andover"]},
    kind="hexbin",
    figsize=(10, 10),
)
plt.show()
Exemple #3
0
    #     y = stats.gaussian_kde(samples)(x)
    #     plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true, 'cs': cs_true}[param], c='k')
    #     plt.ylabel('Probability density')
    #     plt.title(param)
    #
    # plt.tight_layout();

    data_spp = az.from_pymc3(trace=trace)
    trace_K = az.plot_posterior(data_spp, var_names=['K'], kind='hist')
    trace_φ = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
    trace_H = az.plot_posterior(data_spp, var_names=['H'], kind='hist')
    trace_Q = az.plot_posterior(data_spp, var_names=['Q'], kind='hist')
    trace_ct = az.plot_posterior(data_spp, var_names=['ct'], kind='hist')
    trace_cs = az.plot_posterior(data_spp, var_names=['cs'], kind='hist')
    joint_plt = az.plot_joint(data_spp,
                              var_names=['K', 'φ'],
                              kind='kde',
                              fill_last=False)
    # trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], compact=True);

    plt.show()

    # a = np.random.uniform(0.1, 0.3)
    # b = np.random.uniform(0.5e-12, 1.5e-12)
    # _, ax = plt.subplots(1, 2, figsize=(10, 4))
    # az.plot_dist(a, color="C1", label="Prior", ax=ax[0])
    # az.plot_posterior(data_spp, color="C2", var_names=['φ'], ax=ax[1], kind='hist')
    # az.plot_dist(b, color="C1", label="Prior", ax=ax[1])
    # az.plot_posterior(data_spp, color="C2", var_names=['K'], label="Posterior",  ax=ax[0], kind='hist')

    plt.show()
Exemple #4
0
data.prior['depth_1'] = data.prior['depths'][0, :, 1]
data.prior['depth_2'] = data.prior['depths'][0, :, 2]
data.prior['depth_3'] = data.prior['depths'][0, :, 3]

# %%
data.posterior['depth_0'] = data.posterior['depths'][0, :, 0]
data.posterior['depth_1'] = data.posterior['depths'][0, :, 1]
data.posterior['depth_2'] = data.posterior['depths'][0, :, 2]
data.posterior['depth_3'] = data.posterior['depths'][0, :, 3]

# %%
az.plot_trace(
    data, var_names=['depth_0', 'depth_1', 'depth_2', 'depth_3', 'gravity'])

# %%
az.plot_joint(data, var_names=['depth_1', 'depth_2'])

# %%
# !git pull
from gempy.bayesian import plot_posterior as pp

import seaborn as sns
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets

# %%
# %matplotlib notebook
from importlib import reload
reload(pp)
p = pp.PlotPosterior(data)
p.create_figure(figsize=(9, 3), joyplot=True)
Exemple #5
0
    # specify a decision boundary for the data
    db = pm.Deterministic("db",
                          -alpha / beta[1] - beta[0] / beta[1] * x_c[:, 0])
    # specify the likelihood of the data
    y_obs = pm.Bernoulli("y_obs", p=theta, observed=y_0)
    # inference step
    trace = pm.sample(2000, tune=1500)

# ---------------------- analyse the posterior ---------------------------- #

with MultLog_model:
    # analyse the summary
    log.info("The summary of the trace is as follows: %s",
             az.summary(trace, var_names=["alpha", "beta"]))
    # plot the joint posterior
    az.plot_joint(trace, kind="kde", var_names=["beta"])

# ---------------------- plot the data with the decision boundary ------------- #

# initialize a figure
plt.figure(figsize=(12, 5))
# get the index to order the independent variable
idx = np.argsort(x_c[:, 0])
# get the mean of the decision boundary to plot
db = trace["db"].mean(0)[idx]
# scatter the true data
plt.scatter(x_c[:, 0], x_c[:, 1], c=[f'C{x}' for x in y_0])
# plot the decision boundary
plt.plot(x_c[:, 0][idx], db, c="k")
# get the hpd
az.plot_hpd(x_c[:, 0], trace["db"], color="k")
# $$y \sim N(\mu, \sigma)$$
#
# For our initial prior parameters, we'll set l=40 and h=70, which covers all of the obervations. Set $\sigma_\sigma=10$, just a large (therefore vague) number.

# In[13]:

with pm.Model() as chem_shift_model:
    μ = pm.Uniform('μ', lower=40, upper=70)
    σ = pm.HalfNormal('σ', sd=10)
    y = pm.Normal('y', mu=μ, sd=σ, observed=data)
    trace_csm = pm.sample(1000)

# In[14]:

az.plot_trace(trace_csm)
az.plot_joint(trace_csm, kind='kde', fill_last=False)
plt.show()
az.summary(trace_csm)

# Since the posterior here has 2 parameters, the trace is bi-dimensional and the plot has output graphs of the 2 parameters - these are called the __marginal distributions__ of each paramter. In addition to the trace plot we've also output the joint plot, which shows each marginal distribution as well as a contour map of both of them combined.
#
# Now lets do our PPCs. Using `sample_posterior_predictive` we can specify the number of samples to take. Each sample will have the same number of data points as the original dataset we passed in the model.
#
# Thus function returns a dictionary with the keys being the observed value (y), and the values an array of shape (samples, size) with the prediction results. You can then pull out the values and plot the ppc results against the original data. In the below plot, the black line is the KDE of the data, the other lines are plots of all 100 of the posterior predictives.

# In[15]:

y_post_pred = pm.sample_posterior_predictive(trace_csm,
                                             samples=100,
                                             model=chem_shift_model)
data_ppc = az.from_pymc3(trace=trace_csm, posterior_predictive=y_post_pred)
    y = pm.Normal("obs", mu=mu, sigma=sigma, observed=data)
    # inference step
    trace = pm.sample(1000)

# ----------------------- analyse the posterior --------------------------------------- #

with gaussian_model:
    # show the trace
    log.info("The trace of mu is %s:",
             trace["mu"]), log.info("the shape is %s", trace["mu"].shape)
    log.info("The trace of sigma is %s:",
             trace["sigma"]), log.info("the shape is %s", trace["sigma"].shape)
    # show the trace summary
    az.summary(trace)
    # plot the trace KDE and MCMC draws
    az.plot_trace(trace)
    # plot the trace joint KDE
    az.plot_joint(trace, kind="kde", fill_last=False)

# ------------------------ get samples of the data from the posterior ----------------- #

with gaussian_model:
    # get the samples of the data
    y_new = pm.sample_posterior_predictive(trace)
    log.info("The samples from the data is: %s", y_new["obs"])
    log.info("The shape of the samples is: %s", y_new["obs"].shape)
    # visual check for whether the original sample makes sense given the posterior
    data_ppc = az.from_pymc3(trace=trace, posterior_predictive=y_new)
    ax = az.plot_ppc(data_ppc, figsize=(12, 6), mean=False)
    ax[0].legend(fontsize=15)
Exemple #8
0
"""
Joint Plot
==========

_thumb: .5, .8
"""
import arviz as az

az.style.use('arviz-darkgrid')

data = az.load_arviz_data('non_centered_eight')

az.plot_joint(data,
              var_names=['theta'],
              coords={'school': ['Choate', 'Phillips Andover']},
              kind='hexbin',
              figsize=(10, 10))
    beta_eff[i] = get_beta_eff(i, beta_start, beta_end, k, 90)

plt.plot(np.arange(0, 180), beta_eff)
plt.xlabel('Time (days)')
plt.ylabel('Beta (effective)')
plt.savefig('../results/plots/beta_over_time.pdf')
plt.clf()
"""
seir.plot_incidence()
plt.plot(np.arange(0,180),daily_cases.newcountconfirmed[20:200],label='observed')
plt.legend()
plt.tight_layout()
plt.savefig('../results/plots/model_fit.pdf')
"""

inference_data = az.from_cmdstan('../results/outputs/*.csv')
az.plot_trace(inference_data)
plt.savefig('../results/plots/model_trace.pdf')
az.plot_joint(inference_data,
              var_names=['beta_start', 'beta_end'],
              kind='kde',
              figsize=(6, 6))
plt.tight_layout()
plt.savefig('../results/plots/model_joint_betas.pdf')
az.plot_posterior(inference_data)
plt.savefig('../results/plots/model_posterior.pdf')
az.plot_autocorr(inference_data, combined=True)
plt.savefig('../results/plots/model_autocorrelation.pdf')
az.plot_rank(inference_data)
plt.savefig('../results/plots/model_rank.pdf')