Exemple #1
0
    def gaussian_model(mu, sigma, seed=43, n_samples=1000):
        """Simulator model"""
        # sim = stats.norm(loc=mu, scale=sigma).rvs(size=n_samples)
        model = pylfi.Prior('norm', loc=mu, scale=sigma, name='model')
        sim = model.rvs(size=n_samples, seed=seed)

        return sim
Exemple #2
0
# be summary statistics of the data (these are actually sufficient summary
# statistics):


def stat_calc(y):
    sum_stats = [numpy.mean(y), numpy.std(y)]
    return sum_stats

###############################################################################
# We then place priors over the unknown model parameters using the `.Prior`
# class. In the present example, we define the priors:


mu_prior = pylfi.Prior('norm',
                       loc=165,
                       scale=2,
                       name='mu',
                       tex=r'$\mu$'
                       )

sigma_prior = pylfi.Prior('uniform',
                          loc=12,
                          scale=7,
                          name='sigma',
                          tex=r'$\sigma$'
                          )

priors = [mu_prior, sigma_prior]

fig, axes = plt.subplots(nrows=2, figsize=(8, 4), tight_layout=True)
x = np.linspace(159, 171, 1000)
mu_prior.plot_prior(x, ax=axes[0])
Exemple #3
0

if __name__ == "__main__":
    import arviz as az
    import matplotlib.pyplot as plt
    import pylfi
    import scipy.stats as stats
    import seaborn as sns

    N = 1000
    mu_true = 163
    sigma_true = 15
    true_parameter_values = [mu_true, sigma_true]
    # likelihood = stats.norm(loc=mu_true, scale=sigma_true)
    likelihood = pylfi.Prior('norm',
                             loc=mu_true,
                             scale=sigma_true,
                             name='likelihood')

    obs_data = likelihood.rvs(size=N, seed=30)

    # simulator model
    def gaussian_model(mu, sigma, seed=43, n_samples=1000):
        """Simulator model"""
        # sim = stats.norm(loc=mu, scale=sigma).rvs(size=n_samples)
        model = pylfi.Prior('norm', loc=mu, scale=sigma, name='model')
        sim = model.rvs(size=n_samples, seed=seed)

        return sim

    # summary stats
    def summary_calculator(data):
Exemple #4
0
    def gaussian_model(mu, sigma, n_samples=1000):
        """Simulator model"""
        sim = stats.norm(loc=mu, scale=sigma).rvs(size=n_samples)
        return sim

    # summary stats
    def summary_calculator(data):
        """returns summary statistic(s)"""
        sumstat = np.array([np.mean(data), np.std(data)])
        #sumstat = np.mean(sim)
        return sumstat

    # priors
    #mu = pylfi.Prior('norm', loc=165, scale=2, name='mu', tex='$\mu$')
    #sigma = pylfi.Prior('norm', loc=17, scale=4, name='sigma', tex='$\sigma$')
    mu = pylfi.Prior('uniform', loc=160, scale=10, name='mu')
    sigma = pylfi.Prior('uniform', loc=10, scale=10, name='sigma')
    priors = [mu, sigma]

    # initialize sampler
    sampler = RejABC(obs_data,
                     gaussian_model,
                     summary_calculator,
                     priors,
                     distance_metric='l2',
                     seed=42
                     )

    # inference config
    n_samples = 1000
    epsilon = 1.0
Exemple #5
0
        PRNG advanced delta steps.
    """
    return np.random.PCG64(seed).advance(delta)


if __name__ == "__main__":
    import pylfi

    seed = None
    n_samples = 10
    n_jobs = 3

    #theta = pylfi.Prior('uniform', loc=0, scale=1, name='theta')
    alpha = 60  # prior hyperparameter (inverse gamma distribution)
    beta = 130  # prior hyperparameter (inverse gamma distribution)
    theta = pylfi.Prior('invgamma', alpha, loc=0, scale=beta, name='theta')

    def sample(n_samples, seed):
        samples = []
        for i in range(n_samples):
            #next_gen = np.random.PCG64(seed).advance(i)
            next_gen = advance_PRNG_state(seed, i)
            rng = np.random.default_rng(next_gen)
            samples.append(rng.normal())
        return samples

    def sample2(n_samples, seed):
        samples = []
        for i in range(n_samples):
            next_gen = advance_PRNG_state(seed, i)
            sample = theta.rvs(seed=next_gen)
Exemple #6
0
import matplotlib.pyplot as plt
import numpy as np
import pylfi

###############################################################################
# We initialize a Gaussian prior over the parameter :math:`\theta`. The first
# positional argument can be any `scipy.stats` distribution passed as `str`.
# Following positional and keyword arguments are distribution specific
# (see `scipy.stats` documentation). The `name` keyword argument is required
# and expects the name of the parameter passed as `str`. The optional `tex`
# keyword argument can be used to provide LaTeX typesetting for the parameter
# name, which is used as axis label in `pyLFI`'s plotting procedures if
# provided.
theta_prior = pylfi.Prior('norm',
                          loc=0,
                          scale=1,
                          name='theta',
                          tex=r'$\theta$')

###############################################################################
# Sampling from the prior is done through the `.rvs` method. The `size` keyword
# can be used to set the output size of the sample. The sampling can also be
# seeded through the `seed` keyword argument.
theta_prior.rvs(size=10, seed=42)

###############################################################################
# The `~.plot_prior` method plots the prior pdf or pmf, depending on whether
# the distribution is continuous or discrete, respectively, evaluated at points
# :math:`x`.
x = np.linspace(-4, 4, 1000)
theta_prior.plot_prior(x)
Exemple #7
0
    def _sample(self, n_samples, seed):
        """Sample n_samples from posterior."""

        self._n_sims = 0
        self._n_iter = 0
        samples = []

        # initialize chain
        thetas_current = self._draw_first_posterior_sample(seed)
        samples.append(thetas_current)

        # Pre-loop computations to better efficiency
        # create instance before loop to avoid some overhead
        unif_distr = pylfi.Prior('uniform', loc=0, scale=1, name='u')

        # Only needs to be re-computed if proposal is accepted
        log_prior_current = np.array([prior_logpdf(theta_current)
                                      for prior_logpdf, theta_current in
                                      zip(self._prior_logpdfs, thetas_current)]
                                     ).prod()

        # Metropolis-Hastings algorithm
        for _ in range(n_samples):
            # Advance PRNG state
            next_gen = advance_PRNG_state(seed, self._n_iter)

            # Gaussian proposal distribution (which is symmetric)
            proposal_distr = stats.norm(
                loc=thetas_current,
                scale=self._sigma,
            )

            # Draw proposal parameters (suggest new positions)
            thetas_proposal = [proposal_distr.rvs(
                random_state=self._rng(seed=next_gen))]
            print(type(thetas_proposal), thetas_proposal)

            # Compute Metropolis-Hastings ratio.
            # Since the proposal density is symmetric, the proposal density
            # ratio in MH acceptance probability cancel. Thus, we need only
            # to evaluate the prior ratio. In case of multiple parameters,
            # the joint prior logpdf is computed.
            log_prior_proposal = np.array([prior_logpdf(thetas_proposal)
                                           for prior_logpdf, thetas_proposal in
                                           zip(self._prior_logpdfs, thetas_proposal)]
                                          ).prod()

            r = np.exp(log_prior_proposal - log_prior_current)

            # Compute acceptance probability
            alpha = np.minimum(1., r)

            # Draw a uniform random number
            u = unif_distr.rvs(seed=next_gen)

            # Reject/accept step
            if u < alpha:
                sim = self._simulator(*thetas_proposal)
                sim_sumstat = self._stat_calc(sim)
                distance = self.distance(
                    self._obs_sumstat, sim_sumstat)
                if distance <= self._epsilon:
                    thetas_current = thetas_proposal

                    # Re-compute current log-density for next iteration
                    log_prior_current = np.array([prior_logpdf(theta_current)
                                                  for prior_logpdf, theta_current in
                                                  zip(self._prior_logpdfs, thetas_current)]
                                                 ).prod()

            self._n_iter += 1
            # Update chain
            samples.append(thetas_current)

        return samples
Exemple #8
0
import numpy as np
import pathos as pa
import pylfi
import scipy.stats as stats

# prior distribution
prior = pylfi.Prior('norm', loc=0, scale=1, name='theta')
prior_pdf = prior.pdf
prior_logpdf = prior.logpdf

# draw from prior distribution
#thetas_current = [prior.rvs()]
thetas_current = [prior.rvs(), prior.rvs()]

# proposal distribution
sigma = 0.5
proposal_distr = stats.norm(loc=thetas_current, scale=sigma)
uniform_distr = stats.uniform(loc=0, scale=1)

# draw from proposal
thetas_proposal = [proposal_distr.rvs()]
print(thetas_proposal)
for theta in thetas_proposal:
    print(theta)

print()
# Compute prior probability of current and proposed
prior_current = prior_pdf(thetas_current)
prior_proposal = prior_pdf(thetas_proposal)
log_prior_current = prior_logpdf(thetas_current).prod()
log_prior_proposal = prior_logpdf(thetas_proposal).prod()