# we can check the status of the chain using the plot_diagnostics method
chain.plot_diagnostics()

# We can automatically set sensible burn and thin values for the sample
chain.autoselect_burn()
chain.autoselect_thin()

# we can get a quick overview of the posterior using the matrix_plot
# functionality of chain objects, which plots all possible 1D & 2D
# marginal distributions of the full parameter set (or a chosen sub-set).
chain.matrix_plot()

# We can easily estimate 1D marginal distributions for any parameter
# using the get_marginal method:
w1_pdf = chain.get_marginal(1, unimodal=True)
w2_pdf = chain.get_marginal(3, unimodal=True)

# get_marginal returns a density estimator object, which can be called
# as a function to return the value of the pdf at any point.
# Make an axis on which to evaluate the PDFs:
ax = linspace(0.2, 4., 1000)
plt.plot(ax, w1_pdf(ax), label='width #1 marginal', lw=2)
plt.plot(ax, w2_pdf(ax), label='width #2 marginal', lw=2)
plt.xlabel('peak width')
plt.ylabel('probability density')
plt.legend()
plt.grid()
plt.show()

# what if instead we wanted a PDF for the ratio of the two widths?
Exemple #2
0
# we can check the status of the chain using the plot_diagnostics method:
chain.plot_diagnostics()

# The burn-in (how many samples from the start of the chain are discarded)
# can be chosen by setting the burn attribute of the chain object:
chain.burn = 5000

# we can get a quick overview of the posterior using the matrix_plot method
# of chain objects, which plots all possible 1D & 2D marginal distributions
# of the full parameter set (or a chosen sub-set).
chain.matrix_plot(labels=['area', 'width', 'center', 'background'])

# We can easily estimate 1D marginal distributions for any parameter
# using the get_marginal method:
area_pdf = chain.get_marginal(0)
area_pdf.plot_summary(label='Gaussian area')

# We can assess the level of uncertainty in the model predictions by passing each sample
# through the forward-model and observing the distribution of model expressions that result:

# generate an axis on which to evaluate the model
x_fits = linspace(0, 12, 500)
# get the sample
sample = chain.get_sample()
# pass each through the forward model
curves = array([PeakModel.forward_model(x_fits, theta) for theta in sample])

# We could plot the predictions for each sample all on a single graph, but this is
# often cluttered and difficult to interpret.
derived from the sample itself, and the eigenvectors are re-calculated.
"""

# create our posterior with two highly-correlated parameters
posterior = CorrelatedLinePosterior()

# create a PcaChain, and also a GibbsChain for comparison
pca = PcaChain(posterior=posterior, start=[-1, 1, -1])
gibbs = GibbsChain(posterior=posterior, start=[-1, 1, -1])

# advance both chains for the same amount of samples
pca.advance(50000)
gibbs.advance(50000)

# get an estimate of the marginal distribution of one of the correlated parameters
pca_pdf = pca.get_marginal(2, burn=5000)
gibbs_pdf = gibbs.get_marginal(2, burn=5000)

# over-plot the marginal estimates to compare the performance
marginal_axis = linspace(-4, 2, 500)
plt.plot(marginal_axis,
         pca_pdf(marginal_axis),
         lw=2,
         label='PcaChain estimate')
plt.plot(marginal_axis,
         gibbs_pdf(marginal_axis),
         lw=2,
         label='GibbsChain estimate')
plt.grid()
plt.legend()
plt.tight_layout()