Esempio n. 1
0
    def testDensitySymmetries(self):
        # check flipping samples gives flipped density
        samps = Gaussian1D(0, 1, xmin=-1, xmax=4).MCSamples(12000)
        d = samps.get1DDensity('x')
        samps.samples[:, 0] *= -1
        samps = MCSamples(samples=samps.samples, names=['x'], ranges={'x': [-4, 1]})
        d2 = samps.get1DDensity('x')
        self.assertTrue(np.allclose(d.P, d2.P[::-1]))

        samps = Gaussian2D([0, 0], np.diagflat([1, 2]), xmin=-1, xmax=2, ymin=0, ymax=3).MCSamples(12000)
        d = samps.get2DDensity('x', 'y')
        samps.samples[:, 0] *= -1
        samps = MCSamples(samples=samps.samples, names=['x', 'y'], ranges={'x': [-2, 1], 'y': [0, 3]})
        d2 = samps.get2DDensity('x', 'y')
        self.assertTrue(np.allclose(d.P, d2.P[:, ::-1]))
        samps.samples[:, 0] *= -1
        samps.samples[:, 1] *= -1
        samps = MCSamples(samples=samps.samples, names=['x', 'y'], ranges={'x': [-1, 2], 'y': [-3, 0]})
        d2 = samps.get2DDensity('x', 'y')
        self.assertTrue(np.allclose(d.P, d2.P[::-1, ::], atol=1e-5))
Esempio n. 2
0
    def testDensitySymmetries(self):
        # check flipping samples gives flipped density
        samps = Gaussian1D(0, 1, xmin=-1, xmax=4).MCSamples(12000)
        d = samps.get1DDensity('x')
        samps.samples[:, 0] *= -1
        samps = MCSamples(samples=samps.samples, names=['x'], ranges={'x': [-4, 1]})
        d2 = samps.get1DDensity('x')
        self.assertTrue(np.allclose(d.P, d2.P[::-1]))

        samps = Gaussian2D([0, 0], np.diagflat([1, 2]), xmin=-1, xmax=2, ymin=0, ymax=3).MCSamples(12000)
        d = samps.get2DDensity('x', 'y')
        samps.samples[:, 0] *= -1
        samps = MCSamples(samples=samps.samples, names=['x', 'y'], ranges={'x': [-2, 1], 'y': [0, 3]})
        d2 = samps.get2DDensity('x', 'y')
        self.assertTrue(np.allclose(d.P, d2.P[:, ::-1]))
        samps.samples[:, 0] *= -1
        samps.samples[:, 1] *= -1
        samps = MCSamples(samples=samps.samples, names=['x', 'y'], ranges={'x': [-1, 2], 'y': [-3, 0]})
        d2 = samps.get2DDensity('x', 'y')
        self.assertTrue(np.allclose(d.P, d2.P[::-1, ::], atol=1e-5))
Esempio n. 3
0
    def _add_prior_density(self, plotter, posterior,
                           ndraws, normalize,
                           KL_divergence, KL_base,
                           bootstrap, n_simulate):
        """ Crudely estimate the prior density.

        Kullback-Leibler divergence estimated in bits for a combined run or
        the same run for which the credible intervals are calculated.

        """
        run = posterior.subset_to_plot[0]

        yield 'Plotting prior for posterior %s...' % posterior.ID

        l = posterior.likelihood

        if l is None:
            return # quietly do nothing
        elif not hasattr(l, 'prior'):
            return
        elif not hasattr(l.prior, 'draw'):
            return
        elif not callable(l.prior.draw):
            return

        samples, _ = l.prior.draw(ndraws, transform=True)

        color, lw = (run.contours[key] for key in ('color', 'lw'))

        quantiles = [None] * 3

        with verbose(KL_divergence,
                     'Estimating 1D marginal KL-divergences in %s' % KL_base,
                     'Estimated 1D marginal KL-divergences') as condition:
            for i, ax in enumerate([plotter.subplots[i,i] \
                                for i in range(plotter.subplots.shape[0])]):

                name = self.params.names[i]
                bounds = {name: posterior.bounds[name]}
                settings = {'fine_bins': 1024,
                            'smooth_scale_1D': 0.3,
                            'boundary_correction_order': 1,
                            'mult_bias_correction_order': 1} # adopt from posterior settings or take custom input?

                idx = l.index(name)
                if idx is None: idx = l.prior.index(name)

                bcknd = MCSamples(sampler='uncorrelated',
                                  samples=samples[:,idx],
                                  weights=None,
                                  names=[name],
                                  ranges=bounds,
                                  settings=settings)

                if normalize:
                    bcknd.get1DDensity(name).normalize(by='integral',
                                                       in_place=True)

                x = _np.linspace(ax.xaxis.get_view_interval()[0],
                                 ax.xaxis.get_view_interval()[1],
                                 1000)

                ax.plot(x, bcknd.get1DDensity(name).Prob(x),
                        ls='-.', color=color, lw=lw)

                if not condition: continue # go to next iteration if no KL

                # a prototype Kullback-Leibler divergence callback
                # information in bits
                def KL(ns_run, logw):
                    x = ns_run['theta'][:,posterior.get_index(name)]
                    w_rel = _np.exp(logw - logw.max())
                    where = w_rel > run.kde_settings.get('min_weight_ratio',
                                                         1.0e-30)
                    prior = bcknd.get1DDensity(name).Prob(x[where])
                    p = getdist_kde(x[where], x, w_rel,
                                        ranges=[posterior.bounds[name]],
                                        idx=0,
                                        normalize=normalize,
                                        settings=run.kde_settings)
                    # Due to spline interpolation, very small densities can be
                    # negative, so manually give a small postive value which
                    # does not affect KL integral approximation
                    p[p<=0.0] = p[p>0.0].min()

                    KL = _np.sum(w_rel[where] \
                                   * (_np.log(p) - _np.log(prior))) \
                                   /_np.sum(w_rel[where])

                    if KL_base == 'bits':
                        return KL / _np.log(2.0)
                    elif KL_base == 'nats':
                        return KL
                    else:
                        raise ValueError('Invalid base for KL-divergence.')

                if bootstrap:
                    for j, cred_int in enumerate([0.025, 0.5, 0.975]):
                        quantiles[j] = run_ci_bootstrap(run.nestcheck_backend,
                                                     estimator_list=[KL],
                                                     cred_int=cred_int,
                                                     n_simulate=n_simulate,
                                                     simulate_weights=True,
                                                     flip_skew=True)
                    # KL in bits
                    interval = r'$D_{\mathrm{KL}}=%.2f_{-%.2f}^{+%.2f}$' \
                                                  % (quantiles[1],
                                                     quantiles[1] - quantiles[0],
                                                     quantiles[2] - quantiles[1])

                    yield ('%s KL-divergence = %.4f/-%.4f/+%.4f'
                            % (name,
                               quantiles[1],
                               quantiles[1] - quantiles[0],
                               quantiles[2] - quantiles[1]))

                    if not rcParams['text.usetex']:
                        fontsize = plotter.settings.lab_fontsize - 1
                    else:
                        fontsize = plotter.settings.lab_fontsize

                    ax.set_title(interval, color=color,
                                 fontsize=fontsize)
                else:
                    where = run.samples[:,0] > 0.0

                    ns_run = {'theta': run.samples[where,2:]}
                    divergence = KL(ns_run, _np.log(run.samples[where,0]))

                    yield ('%s KL-divergence = %.4f' % (name, divergence))

                    divergence = (r'$D_{\mathrm{KL}}=%.2f$' % divergence)

                    if not rcParams['text.usetex']:
                        fontsize = plotter.settings.lab_fontsize - 1
                    else:
                        fontsize = plotter.settings.lab_fontsize

                    ax.set_title(divergence, color=color,
                                 fontsize=fontsize)

        yield None