Ejemplo n.º 1
0
    def evidence_error(self,
                       quantiles=[0.025, 0.5, 0.975],
                       n_simulate=200,
                       simulate_weights=True,
                       flip_skew=True,
                       **kwargs):
        """ Estimate evidence error for nestcheck-compatible runs.

        E.g., if you want the interval about the median containing the evidence
        of 90% of realisations, declare ``quantiles=[0.05,0.5,0.95]``.

        """
        if kwargs:
            self.set_subset(**kwargs)

        nestcheck_bcknds, runs = self._filter_nestcheck_compatible()

        _quantiles = {}
        for bcknd, run in zip(nestcheck_bcknds, runs):
            _quantiles[run.prepend_ID] = [
                run_ci_bootstrap(bcknd,
                                 estimator_list=[logz],
                                 cred_int=q,
                                 n_simulate=n_simulate,
                                 simulate_weights=simulate_weights,
                                 flip_skew=flip_skew)[0] for q in quantiles
            ]
        return _quantiles
Ejemplo n.º 2
0
 def calculate_intervals(quantiles):
     cred = _np.zeros((len(quantiles), len(quantiles)), dtype=_np.double)
     for j, p in enumerate(quantiles):
         for k, q in enumerate(quantiles):
             cred[j,k] = run_ci_bootstrap(run.nestcheck_backend,
                              estimator_list=[get_estimator(p, ind)],
                              cred_int=q,
                              n_simulate=n_simulate,
                              simulate_weights=True,
                              flip_skew=True)[0]
     return cred
Ejemplo n.º 3
0
    def KL_divergence(self,
                      base='bits',
                      bootstrap=False,
                      quantiles=[0.025, 0.5, 0.975],
                      n_simulate=200,
                      **kwargs):
        """ Kullback-Leibler divergence integral jointly for all parameters.

        E.g., if you want the interval about the median containing divergence
        of 90% of realisations, declare ``quantiles=[0.05,0.5,0.95]``.

        """
        if kwargs:
            self.set_subset(**kwargs)

        nestcheck_bcknds, runs = self._filter_nestcheck_compatible()

        def estimator(ns_run, logw):
            w_rel = _np.exp(logw - logw.max())
            KL = _np.sum(w_rel * ns_run['logl']) / _np.sum(w_rel)
            KL -= logsumexp(logw)

            if base == 'bits':
                return KL / _np.log(2.0)
            elif base == 'nats':
                return KL
            else:
                raise ValueError('Invalid base for KL-divergence.')

        if bootstrap:
            _quantiles = {}
            for bcknd, run in zip(nestcheck_bcknds, runs):
                _quantiles[run.prepend_ID] = [
                    run_ci_bootstrap(bcknd,
                                     estimator_list=[estimator],
                                     cred_int=q,
                                     n_simulate=n_simulate,
                                     simulate_weights=True,
                                     flip_skew=True)[0] for q in quantiles
                ]
            return _quantiles
        else:
            divergence = {}
            for bcknd in nestcheck_bcknds:
                divergence[run.prepend_ID] = estimator(bcknd, get_logw(bcknd))

            return divergence
Ejemplo n.º 4
0
    def _add_prior_density(self, plotter, posterior,
                           ndraws, normalize,
                           KL_divergence, KL_base,
                           bootstrap, n_simulate):
        """ Crudely estimate the prior density.

        Kullback-Leibler divergence estimated in bits for a combined run or
        the same run for which the credible intervals are calculated.

        """
        run = posterior.subset_to_plot[0]

        yield 'Plotting prior for posterior %s...' % posterior.ID

        l = posterior.likelihood

        if l is None:
            return # quietly do nothing
        elif not hasattr(l, 'prior'):
            return
        elif not hasattr(l.prior, 'draw'):
            return
        elif not callable(l.prior.draw):
            return

        samples, _ = l.prior.draw(ndraws, transform=True)

        color, lw = (run.contours[key] for key in ('color', 'lw'))

        quantiles = [None] * 3

        with verbose(KL_divergence,
                     'Estimating 1D marginal KL-divergences in %s' % KL_base,
                     'Estimated 1D marginal KL-divergences') as condition:
            for i, ax in enumerate([plotter.subplots[i,i] \
                                for i in range(plotter.subplots.shape[0])]):

                name = self.params.names[i]
                bounds = {name: posterior.bounds[name]}
                settings = {'fine_bins': 1024,
                            'smooth_scale_1D': 0.3,
                            'boundary_correction_order': 1,
                            'mult_bias_correction_order': 1} # adopt from posterior settings or take custom input?

                idx = l.index(name)
                if idx is None: idx = l.prior.index(name)

                bcknd = MCSamples(sampler='uncorrelated',
                                  samples=samples[:,idx],
                                  weights=None,
                                  names=[name],
                                  ranges=bounds,
                                  settings=settings)

                if normalize:
                    bcknd.get1DDensity(name).normalize(by='integral',
                                                       in_place=True)

                x = _np.linspace(ax.xaxis.get_view_interval()[0],
                                 ax.xaxis.get_view_interval()[1],
                                 1000)

                ax.plot(x, bcknd.get1DDensity(name).Prob(x),
                        ls='-.', color=color, lw=lw)

                if not condition: continue # go to next iteration if no KL

                # a prototype Kullback-Leibler divergence callback
                # information in bits
                def KL(ns_run, logw):
                    x = ns_run['theta'][:,posterior.get_index(name)]
                    w_rel = _np.exp(logw - logw.max())
                    where = w_rel > run.kde_settings.get('min_weight_ratio',
                                                         1.0e-30)
                    prior = bcknd.get1DDensity(name).Prob(x[where])
                    p = getdist_kde(x[where], x, w_rel,
                                        ranges=[posterior.bounds[name]],
                                        idx=0,
                                        normalize=normalize,
                                        settings=run.kde_settings)
                    # Due to spline interpolation, very small densities can be
                    # negative, so manually give a small postive value which
                    # does not affect KL integral approximation
                    p[p<=0.0] = p[p>0.0].min()

                    KL = _np.sum(w_rel[where] \
                                   * (_np.log(p) - _np.log(prior))) \
                                   /_np.sum(w_rel[where])

                    if KL_base == 'bits':
                        return KL / _np.log(2.0)
                    elif KL_base == 'nats':
                        return KL
                    else:
                        raise ValueError('Invalid base for KL-divergence.')

                if bootstrap:
                    for j, cred_int in enumerate([0.025, 0.5, 0.975]):
                        quantiles[j] = run_ci_bootstrap(run.nestcheck_backend,
                                                     estimator_list=[KL],
                                                     cred_int=cred_int,
                                                     n_simulate=n_simulate,
                                                     simulate_weights=True,
                                                     flip_skew=True)
                    # KL in bits
                    interval = r'$D_{\mathrm{KL}}=%.2f_{-%.2f}^{+%.2f}$' \
                                                  % (quantiles[1],
                                                     quantiles[1] - quantiles[0],
                                                     quantiles[2] - quantiles[1])

                    yield ('%s KL-divergence = %.4f/-%.4f/+%.4f'
                            % (name,
                               quantiles[1],
                               quantiles[1] - quantiles[0],
                               quantiles[2] - quantiles[1]))

                    if not rcParams['text.usetex']:
                        fontsize = plotter.settings.lab_fontsize - 1
                    else:
                        fontsize = plotter.settings.lab_fontsize

                    ax.set_title(interval, color=color,
                                 fontsize=fontsize)
                else:
                    where = run.samples[:,0] > 0.0

                    ns_run = {'theta': run.samples[where,2:]}
                    divergence = KL(ns_run, _np.log(run.samples[where,0]))

                    yield ('%s KL-divergence = %.4f' % (name, divergence))

                    divergence = (r'$D_{\mathrm{KL}}=%.2f$' % divergence)

                    if not rcParams['text.usetex']:
                        fontsize = plotter.settings.lab_fontsize - 1
                    else:
                        fontsize = plotter.settings.lab_fontsize

                    ax.set_title(divergence, color=color,
                                 fontsize=fontsize)

        yield None