plt.tight_layout() plt.savefig('spectroscopy_data.png') plt.close() print(' # spectroscopy data plot finished') # create the posterior object posterior = SpectroPosterior(x_data, y_data, errors) # create the markov chain object chain = PcaChain(posterior=posterior, start=[1000, 1, 1000, 1, 20]) # generate a sample by advancing the chain chain.advance(50000) # we can check the status of the chain using the plot_diagnostics method chain.plot_diagnostics(show=False, filename='plot_diagnostics_example.png') print(' # diagnostics plot finished') # We can automatically set sensible burn and thin values for the sample chain.autoselect_burn_and_thin() # we can get a quick overview of the posterior using the matrix_plot # functionality of chain objects, which plots all possible 1D & 2D # marginal distributions of the full parameter set (or a chosen sub-set). chain.thin = 1 labels = [ 'peak 1 area', 'peak 1 width', 'peak 2 area', 'peak 2 width', 'background' ] chain.matrix_plot(show=False, labels=labels, filename='matrix_plot_example.png') print(' # matrix plot finished')
from inference.posterior import Posterior posterior = Posterior(likelihood=likelihood, prior=prior) # Now we have constructed a posterior distribution, we can sample from it # using Markov-chain Monte-Carlo (MCMC). # The inference.mcmc module contains implementations of various MCMC sampling algorithms. # Here we import the PcaChain class and use it to create a Markov-chain object: from inference.mcmc import PcaChain chain = PcaChain(posterior=posterior, start=initial_guess) # We generate samples by advancing the chain by a chosen number of steps using the advance method: chain.advance(25000) # we can check the status of the chain using the plot_diagnostics method: chain.plot_diagnostics() # The burn-in (how many samples from the start of the chain are discarded) # can be chosen by setting the burn attribute of the chain object: chain.burn = 5000 # we can get a quick overview of the posterior using the matrix_plot method # of chain objects, which plots all possible 1D & 2D marginal distributions # of the full parameter set (or a chosen sub-set). chain.matrix_plot(labels=['area', 'width', 'center', 'background']) # We can easily estimate 1D marginal distributions for any parameter # using the get_marginal method: area_pdf = chain.get_marginal(0) area_pdf.plot_summary(label='Gaussian area')
from inference.posterior import Posterior posterior = Posterior(likelihood=likelihood, prior=prior) # Now we have constructed a posterior distribution, we can sample from it # using Markov-chain Monte-Carlo (MCMC). # The inference.mcmc module contains implementations of various MCMC sampling algorithms. # Here we import the PcaChain class and use it to create a Markov-chain object: from inference.mcmc import PcaChain chain = PcaChain(posterior=posterior, start=initial_guess) # We generate samples by advancing the chain by a chosen number of steps using the advance method: chain.advance(25000) # we can check the status of the chain using the plot_diagnostics method: chain.plot_diagnostics(filename='plot_diagnostics_example.png') # The burn-in (how many samples from the start of the chain are discarded) # can be chosen by setting the burn attribute of the chain object: chain.burn = 5000 # we can get a quick overview of the posterior using the matrix_plot method # of chain objects, which plots all possible 1D & 2D marginal distributions # of the full parameter set (or a chosen sub-set). chain.matrix_plot(labels=['area', 'width', 'center', 'background'], filename='matrix_plot_example.png') # We can easily estimate 1D marginal distributions for any parameter # using the get_marginal method: area_pdf = chain.get_marginal(0) area_pdf.plot_summary(label='Gaussian area',