コード例 #1
0
    def test_errors_and_warnings(self):
        with pm.Model():
            A = pm.Normal("A")
            B = pm.Uniform("B")
            strace = pm.sampling.NDArray(vars=[A, B])
            strace.setup(10, 0)

            with pytest.raises(ValueError, match="from existing MultiTrace"):
                pm.sampling._choose_backend(trace=MultiTrace([strace]))

            strace.record({"A": 2, "B_interval__": 0.1})
            assert len(strace) == 1
            with pytest.raises(ValueError, match="Continuation of traces"):
                pm.sampling._choose_backend(trace=strace)
コード例 #2
0
def point_list_to_multitrace(point_list: List[Dict[str, np.ndarray]],
                             model: Optional[Model] = None) -> MultiTrace:
    """transform point list into MultiTrace"""
    _model = modelcontext(model)
    varnames = list(point_list[0].keys())
    with _model:
        chain = NDArray(model=_model, vars=[_model[vn] for vn in varnames])
        chain.setup(draws=len(point_list), chain=0)

        # since we are simply loading a trace by hand, we need only a vacuous function for
        # chain.record() to use. This crushes the default.
        def point_fun(point):
            return [point[vn] for vn in varnames]

        chain.fn = point_fun
        for point in point_list:
            chain.record(point)
    return MultiTrace([chain])
コード例 #3
0
def _sample(draws,
            step,
            start=None,
            trace=None,
            chain=0,
            tune=None,
            progressbar=True,
            model=None,
            random_seed=None):
    sampling = _iter_sample(draws, step, start, trace, chain, tune, model,
                            random_seed)
    progress = progress_bar(draws)
    try:
        for i, trace in enumerate(sampling):
            if progressbar:
                progress.update(i)
    except KeyboardInterrupt:
        trace.close()
    return MultiTrace([trace])
コード例 #4
0
def sample_smc(
    draws=2000,
    kernel=IMH,
    *,
    start=None,
    model=None,
    random_seed=None,
    chains=None,
    cores=None,
    compute_convergence_checks=True,
    return_inferencedata=True,
    idata_kwargs=None,
    progressbar=True,
    **kernel_kwargs,
):
    r"""
    Sequential Monte Carlo based sampling.

    Parameters
    ----------
    draws: int
        The number of samples to draw from the posterior (i.e. last stage). And also the number of
        independent chains. Defaults to 2000.
    kernel: SMC Kernel used. Defaults to pm.smc.IMH (Independent Metropolis Hastings)
    start: dict, or array of dict
        Starting point in parameter space. It should be a list of dict with length `chains`.
        When None (default) the starting point is sampled from the prior distribution.
    model: Model (optional if in ``with`` context)).
    random_seed: int
        random seed
    chains : int
        The number of chains to sample. Running independent chains is important for some
        convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
        is larger.
    cores : int
        The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
        system.
    compute_convergence_checks : bool
        Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.
        Defaults to ``True``.
    return_inferencedata : bool, default=True
        Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
        Defaults to ``True``.
    idata_kwargs : dict, optional
        Keyword arguments for :func:`pymc.to_inference_data`
    progressbar : bool, optional default=True
        Whether or not to display a progress bar in the command line.
    **kernel_kwargs: keyword arguments passed to the SMC kernel.
        The default IMH kernel takes the following keywords:
            threshold: float
                Determines the change of beta from stage to stage, i.e. indirectly the number of stages,
                the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
                It should be between 0 and 1.
            n_steps: int
                The number of steps of each Markov Chain. If ``tune_steps == True`` ``n_steps`` will be used
                for the first stage and for the others it will be determined automatically based on the
                acceptance rate and `p_acc_rate`, the max number of steps is ``n_steps``.
            tune_steps: bool
                Whether to compute the number of steps automatically or not. Defaults to True
            p_acc_rate: float
                Used to compute ``n_steps`` when ``tune_steps == True``. The higher the value of
                ``p_acc_rate`` the higher the number of steps computed automatically. Defaults to 0.85.
                It should be between 0 and 1.
        Keyword arguments for other kernels should be checked in the respective docstrings

    Notes
    -----
    SMC works by moving through successive stages. At each stage the inverse temperature
    :math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
    we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
    So in more general terms we are always computing samples from a tempered posterior that we can
    write as:

    .. math::

        p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)

    A summary of the algorithm is:

     1. Initialize :math:`\beta` at zero and stage at zero.
     2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
        tempered posterior is the prior).
     3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
        value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
     4. Compute a set of N importance weights W. The weights are computed as the ratio of the
        likelihoods of a sample at stage i+1 and stage i.
     5. Obtain :math:`S_{w}` by re-sampling according to W.
     6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
     7. For stages other than 0 use the acceptance rate from the previous stage to estimate
        `n_steps`.
     8. Run N independent Metropolis-Hastings (IMH) chains (each one of length `n_steps`),
        starting each one from a different sample in :math:`S_{w}`. Samples are IMH as the proposal
        mean is the of the previous posterior stage and not the current point in parameter space.
     9. Repeat from step 3 until :math:`\beta \ge 1`.
     10. The final result is a collection of N samples from the posterior.


    References
    ----------
    .. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
        Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
        Geophysical Journal International, 2013, 194(3), pp.1701-1726,
        `link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__

    .. [Ching2007] Ching, J. and Chen, Y. (2007).
        Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
        Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
        816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
        %282007%29133:7%28816%29>`__
    """

    if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
        warnings.warn(
            f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
            f"It is no longer needed to distinguish between `abc` and `metropolis`",
            FutureWarning,
            stacklevel=2,
        )
        kernel = IMH

    if kernel_kwargs.pop("save_sim_data", None) is not None:
        warnings.warn(
            "save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
            "to obtain the same type of samples.",
            FutureWarning,
            stacklevel=2,
        )

    if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
        warnings.warn(
            "save_log_pseudolikelihood has been deprecated. This information is "
            "now saved as log_likelihood in models with Simulator distributions.",
            FutureWarning,
            stacklevel=2,
        )

    parallel = kernel_kwargs.pop("parallel", None)
    if parallel is not None:
        warnings.warn(
            "The argument parallel is deprecated, use the argument cores instead.",
            FutureWarning,
            stacklevel=2,
        )
        if parallel is False:
            cores = 1

    if cores is None:
        cores = _cpu_count()

    if chains is None:
        chains = max(2, cores)
    else:
        cores = min(chains, cores)

    if random_seed == -1:
        raise FutureWarning(
            f"random_seed should be a non-negative integer or None, got: {random_seed}"
            "This will raise a ValueError in the Future")
        random_seed = None
    if isinstance(random_seed, int) or random_seed is None:
        rng = np.random.default_rng(seed=random_seed)
        random_seed = list(rng.integers(2**30, size=chains))
    elif isinstance(random_seed, Iterable):
        if len(random_seed) != chains:
            raise ValueError(
                f"Length of seeds ({len(seeds)}) must match number of chains {chains}"
            )
    else:
        raise TypeError(
            "Invalid value for `random_seed`. Must be tuple, list, int or None"
        )

    model = modelcontext(model)

    _log = logging.getLogger("pymc")
    _log.info("Initializing SMC sampler...")
    _log.info(f"Sampling {chains} chain{'s' if chains > 1 else ''} "
              f"in {cores} job{'s' if cores > 1 else ''}")

    params = (
        draws,
        kernel,
        start,
        model,
    )

    t1 = time.time()
    if cores > 1:
        pbar = progress_bar((), total=100, display=progressbar)
        pbar.update(0)
        pbars = [pbar] + [None] * (chains - 1)

        pool = mp.Pool(cores)

        # "manually" (de)serialize params before/after multiprocessing
        params = tuple(cloudpickle.dumps(p) for p in params)
        kernel_kwargs = {
            key: cloudpickle.dumps(value)
            for key, value in kernel_kwargs.items()
        }
        results = _starmap_with_kwargs(
            pool,
            _sample_smc_int,
            [(*params, random_seed[chain], chain, pbars[chain])
             for chain in range(chains)],
            repeat(kernel_kwargs),
        )
        results = tuple(cloudpickle.loads(r) for r in results)
        pool.close()
        pool.join()

    else:
        results = []
        pbar = progress_bar((), total=100 * chains, display=progressbar)
        pbar.update(0)
        for chain in range(chains):
            pbar.offset = 100 * chain
            pbar.base_comment = f"Chain: {chain+1}/{chains}"
            results.append(
                _sample_smc_int(*params, random_seed[chain], chain, pbar,
                                **kernel_kwargs))

    (
        traces,
        sample_stats,
        sample_settings,
    ) = zip(*results)

    trace = MultiTrace(traces)
    idata = None

    # Save sample_stats
    _t_sampling = time.time() - t1
    sample_settings_dict = sample_settings[0]
    sample_settings_dict["_t_sampling"] = _t_sampling

    sample_stats_dict = sample_stats[0]
    if chains > 1:
        # Collect the stat values from each chain in a single list
        for stat in sample_stats[0].keys():
            value_list = []
            for chain_sample_stats in sample_stats:
                value_list.append(chain_sample_stats[stat])
            sample_stats_dict[stat] = value_list

    if not return_inferencedata:
        for stat, value in sample_stats_dict.items():
            setattr(trace.report, stat, value)
        for stat, value in sample_settings_dict.items():
            setattr(trace.report, stat, value)
    else:
        for stat, value in sample_stats_dict.items():
            if chains > 1:
                # Different chains might have more iteration steps, leading to a
                # non-square `sample_stats` dataset, we cast as `object` to avoid
                # numpy ragged array deprecation warning
                sample_stats_dict[stat] = np.array(value, dtype=object)
            else:
                sample_stats_dict[stat] = np.array(value)

        sample_stats = dict_to_dataset(
            sample_stats_dict,
            attrs=sample_settings_dict,
            library=pymc,
        )

        ikwargs = dict(model=model)
        if idata_kwargs is not None:
            ikwargs.update(idata_kwargs)
        idata = to_inference_data(trace, **ikwargs)
        idata = InferenceData(**idata, sample_stats=sample_stats)

    if compute_convergence_checks:
        if draws < 100:
            warnings.warn(
                "The number of samples is too small to check convergence reliably.",
                stacklevel=2,
            )
        else:
            if idata is None:
                idata = to_inference_data(trace, log_likelihood=False)
            trace.report._run_convergence_checks(idata, model)
    trace.report._log_summary()

    return idata if return_inferencedata else trace