예제 #1
0
# After discarding burn-in, what we have left should be a representative
# sample drawn from the posterior. Repeating the previous plot as a
# scatter-plot shows the sample:
p = chain.get_probabilities()  # color the points by their probability value
plt.scatter(chain.get_parameter(0),
            chain.get_parameter(1),
            c=exp(p - max(p)),
            marker='.')
plt.xlabel('parameter 1')
plt.ylabel('parameter 2')
plt.grid()
plt.show()

# We can easily estimate 1D marginal distributions for any parameter
# using the 'get_marginal' method:
pdf_1 = chain.get_marginal(0, unimodal=True)
pdf_2 = chain.get_marginal(1, unimodal=True)

# get_marginal returns a density estimator object, which can be called
# as a function to return the value of the pdf at any point.
# Make an axis on which to evaluate the PDFs:
ax = linspace(-3, 4, 500)

# plot the results
plt.plot(ax, pdf_1(ax), label='param #1 marginal', lw=2)
plt.plot(ax, pdf_2(ax), label='param #2 marginal', lw=2)

plt.xlabel('parameter value')
plt.ylabel('probability density')
plt.legend()
plt.grid()
"""

# create our posterior with two highly-correlated parameters
posterior = CorrelatedLinePosterior()

# create a PcaChain, and also a GibbsChain for comparison
pca = PcaChain(posterior=posterior, start=[-1, 1, -1])
gibbs = GibbsChain(posterior=posterior, start=[-1, 1, -1])

# advance both chains for the same amount of samples
pca.advance(50000)
gibbs.advance(50000)

# get an estimate of the marginal distribution of one of the correlated parameters
pca_pdf = pca.get_marginal(2, burn=5000)
gibbs_pdf = gibbs.get_marginal(2, burn=5000)

# over-plot the marginal estimates to compare the performance
marginal_axis = linspace(-4, 2, 500)
plt.plot(marginal_axis,
         pca_pdf(marginal_axis),
         lw=2,
         label='PcaChain estimate')
plt.plot(marginal_axis,
         gibbs_pdf(marginal_axis),
         lw=2,
         label='GibbsChain estimate')
plt.grid()
plt.legend()
plt.tight_layout()
plt.show()