Example #1
0
 def test_cached_gaussian_prior(self):
     """Check the cached_gaussian prior."""
     settings = get_minimal_settings()
     self.assertRaises(TypeError,
                       priors.GaussianCached,
                       prior_scale=10,
                       unexpected=0)
     with warnings.catch_warnings(record=True) as war:
         warnings.simplefilter("always")
         settings.prior = priors.GaussianCached(prior_scale=10,
                                                save_dict=True,
                                                n_dim=settings.n_dim,
                                                cache_dir=TEST_CACHE_DIR,
                                                interp_density=10,
                                                logx_min=-30)
         self.assertEqual(len(war), 1)
     # Test inside and outside cached regime (logx<-10).
     # Need fairly low number of places
     for logx in [-1, -11]:
         self.assertAlmostEqual(settings.logx_given_logl(
             settings.logl_given_logx(logx)),
                                logx,
                                places=3)
     # Test array version of the function too
     logx = np.asarray([-2])
     self.assertAlmostEqual(settings.logx_given_logl(
         settings.logl_given_logx(logx)[0]),
                            logx[0],
                            places=12)
     settings.get_settings_dict()
     # Generate NS run using get_run_data to check it checks the cache
     # before submitting process to parallel apply
     ns_run = ns.get_run_data(settings, 1, load=False, save=False)[0]
     values = nestcheck.ns_run_utils.run_estimators(ns_run, ESTIMATOR_LIST)
     self.assertFalse(np.any(np.isnan(values)))
     # check the argument options and messages for interp_r_logx_dict
     with warnings.catch_warnings(record=True) as war:
         warnings.simplefilter("always")
         self.assertRaises(
             TypeError,
             perfectns.cached_gaussian_prior.interp_r_logx_dict,
             2000,
             10,
             logx_min=-100,
             interp_density=1,
             unexpected=0)
         self.assertEqual(len(war), 1)
     self.assertRaises(TypeError,
                       perfectns.cached_gaussian_prior.interp_r_logx_dict,
                       200,
                       10,
                       logx_min=-100,
                       interp_density=1,
                       unexpected=0)
Example #2
0
 def test_get_run_data_caching(self):
     settings = get_minimal_settings()
     settings.dynamic_goal = None
     settings.n_samples_max = 100
     ns.get_run_data(settings,
                     1,
                     save=True,
                     load=True,
                     check_loaded_settings=True,
                     cache_dir=TEST_CACHE_DIR)
     # test loading and checking settings
     ns.get_run_data(settings,
                     1,
                     save=True,
                     load=True,
                     check_loaded_settings=True,
                     cache_dir=TEST_CACHE_DIR)
     # test loading and checking settings when settings are not the same
     # this only works for changing a setting which dosnt affect the save
     # name
     settings.dynamic_goal = 0
     ns.get_run_data(settings,
                     1,
                     save=True,
                     load=True,
                     check_loaded_settings=True,
                     cache_dir=TEST_CACHE_DIR)
     settings.n_samples_max += 1
     with warnings.catch_warnings(record=True) as war:
         warnings.simplefilter("always")
         ns.get_run_data(settings,
                         1,
                         save=True,
                         load=True,
                         check_loaded_settings=True,
                         cache_dir=TEST_CACHE_DIR)
         self.assertEqual(len(war), 1)
Example #3
0
def plot_dynamic_nlive(dynamic_goals, settings_in, **kwargs):
    """
    Plot the allocations of live points as a function of logX for different
    dynamic_goal settings.
    Plots also include analytically calculated distributions of relative
    posterior mass and relative posterior mass remaining.

    Parameters
    ----------
    dynamic_goals: list of ints or None
        dynamic_goal setting values to plot.
    settings_in: PerfectNSSettings object
    tuned_dynamic_ps: list of bools, optional
        tuned_dynamic_ps settings corresponding to each dynamic goal settings.
        Defaults to False for all dynamic goals.
    logx_min: float, optional
        Lower limit of logx axis. If not specified this is set to the lowest
        logx reached by any of the runs.
    load: bool, optional
        Should the nested sampling runs be loaded from cache if available?
    save: bool, optional
        Should the nested sampling runs be cached?
    ymax: bool, optional
        Maximum value for plot's nlive axis (yaxis).
    n_run: int, optional
        How many runs to plot for each dynamic goal.
    npoints: int, optional
        How many points to have in the logx array used to calculate and plot
        analytical weights.
    figsize: tuple, optional
        Size of figure in inches.

    Returns
    -------
    fig: matplotlib figure
    """
    tuned_dynamic_ps = kwargs.pop('tuned_dynamic_ps',
                                  [False] * len(dynamic_goals))
    save = kwargs.pop('save', True)
    load = kwargs.pop('load', True)
    npoints = kwargs.pop('npoints', 100)
    n_run = kwargs.pop('n_run', 10)
    # Confine settings edits to within this function
    settings = copy.deepcopy(settings_in)
    run_dict = {}
    # work out n_samples_max from first set of runs
    n_sample_stats = np.zeros((len(dynamic_goals), 2))
    method_names = []  # use list to store labels so order is preserved
    for i, dg in enumerate(dynamic_goals):
        print('dynamic_goal=' + str(dg))
        # Make label
        if dg is None:
            label = 'standard'
        else:
            label = 'dynamic $G=' + str(dg) + '$'
            if tuned_dynamic_ps[i] is True:
                label = 'tuned ' + label
        method_names.append(label)
        settings.dynamic_goal = dg
        settings.tuned_dynamic_p = tuned_dynamic_ps[i]
        temp_runs = ns.get_run_data(settings, n_run, parallel=True,
                                    load=load, save=save)
        n_samples = np.asarray([run['logl'].shape[0] for run in temp_runs])
        n_sample_stats[i, 0] = np.mean(n_samples)
        n_sample_stats[i, 1] = np.std(n_samples, ddof=1)
        if i == 0 and settings.n_samples_max is None:
            settings.n_samples_max = int(n_sample_stats[0, 0] *
                                         (settings.nlive_const - 1) /
                                         settings.nlive_const)
        run_dict[label] = temp_runs
        print('mean samples per run:', n_sample_stats[i, 0],
              'std:', n_sample_stats[i, 1])
    fig = nestcheck.plots.plot_run_nlive(
        method_names, run_dict, post_mass_norm='dynamic $G=1$',
        npoints=npoints, logx_given_logl=settings.logx_given_logl,
        logl_given_logx=settings.logl_given_logx,
        cum_post_mass_norm='dynamic $G=0$', **kwargs)
    # Plot the tuned posterior mass
    if 'tuned dynamic $G=1$' in method_names:
        ax = fig.axes[0]
        logx = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], npoints)
        # Get expected magnitude of parameter
        # This is not defined for logx=0 so exclude final value of logx
        param_exp = settings.r_given_logx(logx[:-1]) / np.sqrt(settings.n_dim)
        # Tuned weight is the relative posterior mass times the expected
        # magnitude of the paramer being considered
        w_an = nestcheck.plots.rel_posterior_mass(
            logx, settings.logl_given_logx(logx))
        w_tuned = w_an[:-1] * param_exp
        w_tuned /= np.trapz(w_tuned, x=logx[:-1])
        # Get the normalising constant
        integrals = np.zeros(len(run_dict['tuned dynamic $G=1$']))
        for nr, run in enumerate(run_dict['tuned dynamic $G=1$']):
            logx_run = settings.logx_given_logl(run['logl'])
            logx[0] = 0  # to make lines extend all the way to the end
            # for normalising analytic weight lines
            integrals[nr] = -np.trapz(run['nlive_array'], x=logx_run)
        w_tuned *= np.mean(integrals)
        # Plot the tuned posterior mass
        ax.plot(logx[:-1], w_tuned, linewidth=2, label='tuned importance',
                linestyle='-.', dashes=(6, 1.5, 1, 1.5), color='k')
        ax.legend(ncol=3)
    return fig
Example #4
0
def get_bootstrap_results(n_run, n_simulate, estimator_list, settings,
                          **kwargs):
    """
    Generate data frame showing the standard deviations of the results of
    repeated calculations and estimated sampling errors from bootstrap
    resampling.

    This function was used for Table 5 in 'Dynamic nested sampling: an improved
    algorithm for nested sampling parameter estimation and evidence
    calculation' (Higson et al., 2019). See the paper for more details.

    Parameters
    ----------
    n_run: int
        how many runs to use
    n_simulate: int
        how many times to resample the nested sampling run in each bootstrap
        standard deviation estimate.
    estimator_list: list of estimator objects
    settings: PerfectNSSettings object
    load: bool, optional
        should run data and results be loaded if available?
    save: bool, optional
        should run data and results be saved?
    parallel: bool, optional
    cache_dir: str, optional
        Directory to use for caching.
    add_sim_method: bool, optional
        should we also calculate standard deviations using the simulated
        weights method for comparison with bootstrap resampling? This method is
        inaccurate for parameter estimation.
    n_simulate_ci: int, optional
        how many times to resample the nested sampling run in each bootstrap
        credible interval estimate. These may require more simulations than the
        standard deviation estimate.
    run_random_seeds: list, optional
        list of random seeds to use for generating runs.
    n_run_ci: int, optional
        how many runs to use for each credible interval estimate. You may want
        to set this to lower than n_run if n_simulate_ci is large as otherwise
        the credible interval estimate may take a long time.
    cred_int: float, optional
        one-tailed credible interval to calculate
    max_workers: int or None, optional
        Number of processes.
        If max_workers is None then concurrent.futures.ProcessPoolExecutor
        defaults to using the number of processors of the machine.
        N.B. If max_workers=None and running on supercomputer clusters with
        multiple nodes, this may default to the number of processors on a
        single node and therefore there will be no speedup from multiple
        nodes (must specify manually in this case).

    Returns
    -------
    results: pandas data frame
        results data frame.
        Contains two columns for each estimator - the second column (with
        '_unc' appended to the title) shows the numerical uncertainty in the
        first column.
        Contains rows:
            true values: analytical values of estimators for this likelihood
                and posterior if available
            repeats mean: mean calculation result
            repeats std: standard deviation of calculation results
            bs std / repeats std: mean bootstrap standard deviation estimate as
                a fraction of the standard deviation of repeated results.
            bs estimate % variation: standard deviation of bootstrap estimates
                as a percentage of the mean estimate.
            [only if add sim method is True]:
                sim std / repeats std: as for 'bs std / repeats std' but with
                    simulation method standard deviation estimates.
                sim estimate % variation: as for 'bs estimate % variation' but
                    with simulation method standard deviation estimates.
            bs [cred_int] CI: mean bootstrap credible interval estimate.
            bs +-1std % coverage: % of calculation results falling within +- 1
                mean bootstrap standard deviation estimate of the mean.
            bs [cred_int] CI % coverage: % of calculation results which are
                less than the mean bootstrap credible interval estimate.
    """
    load = kwargs.pop('load', False)
    save = kwargs.pop('save', False)
    max_workers = kwargs.pop('max_workers', None)
    ninit_sep = kwargs.pop('ninit_sep', True)
    parallel = kwargs.pop('parallel', True)
    cache_dir = kwargs.pop('cache_dir', 'cache')
    add_sim_method = kwargs.pop('add_sim_method', False)
    n_simulate_ci = kwargs.pop('n_simulate_ci', n_simulate)
    n_run_ci = kwargs.pop('n_run_ci', n_run)
    cred_int = kwargs.pop('cred_int', 0.95)
    run_random_seeds = kwargs.pop('run_random_seeds', list(range(n_run)))
    if kwargs:
        raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
    # make save_name
    save_root = ('bootstrap_results_' + str(n_simulate) + 'nsim_' +
                 str(ninit_sep) + 'sep')
    save_root += '_' + settings.save_name()
    save_root += '_' + str(n_run) + 'reps'
    save_file = cache_dir + '/' + save_root + '.pkl'
    # try loading results
    if load:
        try:
            return pd.read_pickle(save_file)
        except OSError:
            pass
    # start function
    est_names = [est.latex_name for est in estimator_list]
    # generate runs
    run_list = ns.get_run_data(settings, n_run, save=save, load=load,
                               random_seeds=run_random_seeds,
                               cache_dir=cache_dir,
                               max_workers=max_workers,
                               parallel=parallel)
    # sort in order of random seeds. This makes credible intervals results
    # reproducable even when only the first section of run_list is used.
    run_list = sorted(run_list, key=lambda r: r['random_seed'])
    rep_values = pu.parallel_apply(
        nestcheck.ns_run_utils.run_estimators, run_list,
        func_args=(estimator_list,), max_workers=max_workers,
        parallel=parallel)
    results = pf.summary_df_from_list(rep_values, est_names)
    new_index = ['repeats ' +
                 results.index.get_level_values('calculation type'),
                 results.index.get_level_values('result type')]
    results.set_index(new_index, inplace=True)
    results.index.rename('calculation type', level=0, inplace=True)
    # get bootstrap std estimate
    bs_values = pu.parallel_apply(
        nestcheck.error_analysis.run_std_bootstrap, run_list,
        func_args=(estimator_list,),
        func_kwargs={'n_simulate': n_simulate},
        max_workers=max_workers,
        parallel=parallel)
    bs_df = pf.summary_df_from_list(bs_values, est_names)
    # Get the mean bootstrap std estimate as a fraction of the std measured
    # from repeated calculations.
    results.loc[('bs std / repeats std', 'value'), :] = \
        (bs_df.loc[('mean', 'value')] / results.loc[('repeats std', 'value')])
    bs_std_ratio_unc = pf.array_ratio_std(
        bs_df.loc[('mean', 'value')],
        bs_df.loc[('mean', 'uncertainty')],
        results.loc[('repeats std', 'value')],
        results.loc[('repeats std', 'uncertainty')])
    results.loc[('bs std / repeats std', 'uncertainty'), :] = \
        bs_std_ratio_unc
    # Get the fractional variation of std estimates
    # multiply by 100 to express as a percentage
    results.loc[('bs estimate % variation', 'value'), :] = \
        100 * bs_df.loc[('std', 'value')] / bs_df.loc[('mean', 'value')]
    results.loc[('bs estimate % variation', 'uncertainty'), :] = \
        100 * bs_df.loc[('std', 'uncertainty')] / bs_df.loc[('mean', 'value')]
    if add_sim_method:
        # get std from simulation estimate
        sim_values = pu.parallel_apply(
            nestcheck.error_analysis.run_std_simulate, run_list,
            func_args=(estimator_list,),
            func_kwargs={'n_simulate': n_simulate},
            max_workers=max_workers,
            parallel=parallel)
        sim_df = pf.summary_df_from_list(sim_values, est_names)
        # Get the mean simulation std estimate as a fraction of the std
        # measured from repeated calculations.
        results.loc[('sim std / repeats std', 'value'), :] = \
            (sim_df.loc[('mean', 'value')] /
             results.loc[('repeats std', 'value')])
        sim_std_ratio_unc = pf.array_ratio_std(
            sim_df.loc[('mean', 'value')],
            sim_df.loc[('mean', 'uncertainty')],
            results.loc[('repeats std', 'value')],
            results.loc[('repeats std', 'uncertainty')])
        results.loc[('sim std / repeats std', 'uncertainty'), :] = \
            sim_std_ratio_unc
        # Get the fractional variation of std estimates
        # Multiply by 100 to express as a percentage
        results.loc[('sim estimate % variation', 'value'), :] = \
            100 * sim_df.loc[('std', 'value')] / sim_df.loc[('mean', 'value')]
        results.loc[('sim estimate % variation', 'uncertainty'), :] = \
            (100 * sim_df.loc[('std', 'uncertainty')] /
             sim_df.loc[('mean', 'value')])
    # get bootstrap CI estimates
    bs_cis = pu.parallel_apply(
        nestcheck.error_analysis.run_ci_bootstrap, run_list[:n_run_ci],
        func_args=(estimator_list,),
        func_kwargs={'n_simulate': n_simulate_ci,
                     'cred_int': cred_int,
                     'random_seeds': range(n_simulate_ci)},
        max_workers=max_workers, parallel=parallel)
    bs_ci_df = pf.summary_df_from_list(bs_cis, est_names)
    results.loc[('bs ' + str(cred_int) + ' CI', 'value'), :] = \
        bs_ci_df.loc[('mean', 'value')]
    results.loc[('bs ' + str(cred_int) + ' CI', 'uncertainty'), :] = \
        bs_ci_df.loc[('mean', 'uncertainty')]
    # add coverage for +- 1 bootstrap std estimate
    max_value = (results.loc[('repeats mean', 'value')].values
                 + bs_df.loc[('mean', 'value')].values)
    min_value = (results.loc[('repeats mean', 'value')].values
                 - bs_df.loc[('mean', 'value')].values)
    rep_values_array = np.stack(rep_values, axis=1)
    assert rep_values_array.shape == (len(estimator_list), n_run)
    coverage = np.zeros(rep_values_array.shape[0])
    for i, _ in enumerate(coverage):
        ind = np.where((rep_values_array[i, :] > min_value[i]) &
                       (rep_values_array[i, :] < max_value[i]))
        coverage[i] = ind[0].shape[0] / rep_values_array.shape[1]
    # multiply by 100 to express as a percentage
    results.loc[('bs +-1std % coverage', 'value'), :] = coverage * 100
    # add credible interval coverage
    max_value = results.loc[('bs ' + str(cred_int) + ' CI', 'value')].values
    ci_coverage = np.zeros(len(estimator_list))
    for i, _ in enumerate(coverage):
        ind = np.where(rep_values_array[i, :] < max_value[i])
        ci_coverage[i] = ind[0].shape[0] / rep_values_array.shape[1]
    # multiply by 100 to express as a percentage
    results.loc[('bs ' + str(cred_int) + ' CI % coverage', 'value'), :] = \
        (ci_coverage * 100)
    if save:
        # save the results data frame
        print('get_bootstrap_results: results saved to\n' + save_file)
        results.to_pickle(save_file)
    return results
Example #5
0
def get_dynamic_results(n_run, dynamic_goals_in, estimator_list_in,
                        settings_in, **kwargs):
    """
    Generate data frame showing the standard deviations of the results of
    repeated calculations and efficiency gains (ratios of variances of results
    calculations) from different dynamic goals. To make the comparison fair,
    for dynamic nested sampling settings.n_samples_max is set to slightly below
    the mean number of samples used by standard nested sampling.

    This function was used for Tables 1, 2, 3 and 4, as well as to generate the
    results shown in figures 6 and 7 of 'Dynamic nested sampling: an improved
    algorithm for nested sampling parameter estimation and evidence
    calculation' (Higson et al., 2019). See the paper for a more detailed
    description.

    Parameters
    ----------
    n_run: int
        how many runs to use
    dynamic_goals_in: list of floats
        which dynamic goals to test
    estimator_list_in: list of estimator objects
    settings_in: PerfectNSSettings object
    load: bool, optional
        should run data and results be loaded if available?
    save: bool, optional
        should run data and results be saved?
    overwrite_existing: bool, optional
        if a file exists already but we generate new run data, should we
        overwrite the existing file when saved?
    run_random_seeds: list, optional
        list of random seeds to use for generating runs.
    parallel: bool, optional
    cache_dir: str, optional
        Directory to use for caching.
    tuned_dynamic_ps: list of bools, same length as dynamic_goals_in, optional
    max_workers: int or None, optional
        Number of processes.
        If max_workers is None then concurrent.futures.ProcessPoolExecutor
        defaults to using the number of processors of the machine.
        N.B. If max_workers=None and running on supercomputer clusters with
        multiple nodes, this may default to the number of processors on a
        single node and therefore there will be no speedup from multiple
        nodes (must specify manually in this case).

    Returns
    -------
    results: pandas data frame
        results data frame.
        Contains rows:
            mean [dynamic goal]: mean calculation result for standard nested
                sampling and dynamic nested sampling with each input dynamic
                goal.
            std [dynamic goal]: standard deviation of results for standard
                nested sampling and dynamic nested sampling with each input
                dynamic goal.
            gain [dynamic goal]: the efficiency gain (computational speedup)
                from dynamic nested sampling compared to standard nested
                sampling. This equals (variance of standard results) /
                (variance of dynamic results); see the dynamic nested
                sampling paper for more details.
    """
    load = kwargs.pop('load', False)
    save = kwargs.pop('save', False)
    max_workers = kwargs.pop('max_workers', None)
    parallel = kwargs.pop('parallel', True)
    cache_dir = kwargs.pop('cache_dir', 'cache')
    overwrite_existing = kwargs.pop('overwrite_existing', True)
    run_random_seeds = kwargs.pop('run_random_seeds', list(range(n_run)))
    tuned_dynamic_ps = kwargs.pop('tuned_dynamic_ps',
                                  [False] * len(dynamic_goals_in))
    assert len(tuned_dynamic_ps) == len(dynamic_goals_in)
    for goal in dynamic_goals_in:
        assert goal is not None, \
            'Goals should be dynamic - standard NS already included'
    # Add a standard nested sampling run for comparison:
    dynamic_goals = [None] + dynamic_goals_in
    tuned_dynamic_ps = [False] + tuned_dynamic_ps
    if kwargs:
        raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
    # Make a copy of the input settings to stop us editing them
    settings = copy.deepcopy(settings_in)
    # make save_name
    save_root = 'dynamic_test'
    for dg in dynamic_goals_in:
        save_root += '_' + str(dg).replace('.', '_')
    save_root += '_' + settings.save_name(include_dg=False)
    save_root += '_' + str(n_run) + 'reps'
    save_file = cache_dir + '/' + save_root + '.pkl'
    # try loading results
    if load:
        try:
            return pd.read_pickle(save_file)
        except OSError:
            print('Could not load file: ' + save_file)
    # start function
    # --------------
    # get info on the number of samples taken in each run as well
    estimator_list = [e.CountSamples()] + estimator_list_in
    est_names = [est.latex_name for est in estimator_list]
    method_names = []
    method_values = []
    assert dynamic_goals[0] is None, (
        'Need to start with standard ns to calculate efficiency gains')
    for i, dynamic_goal in enumerate(dynamic_goals):
        # set up settings
        settings.dynamic_goal = dynamic_goal
        settings.tuned_dynamic_p = tuned_dynamic_ps[i]
        # if we have already done the standard calculation, set n_samples_max
        # for dynamic calculations so it is slightly smaller than the number
        # of samples the standard calculation used to ensure a fair comparison
        # of performance. Otherwise dynamic nested sampling will end up using
        # more samples than standard nested sampling as it does not terminate
        # until after the number of samples is greater than n_samples_max.
        if i != 0 and settings.dynamic_goal is not None:
            assert dynamic_goals[0] is None
            assert isinstance(estimator_list[0], e.CountSamples)
            n_samples_max = np.mean(np.asarray([val[0] for val in
                                                method_values[0]]))
            # This factor is a function of the dynamic goal as typically
            # evidence calculations have longer additional threads than
            # parameter estimation calculations.
            reduce_factor = 1 - ((1.5 - 0.5 * settings.dynamic_goal) *
                                 (settings.nbatch / settings.nlive_const))
            settings.n_samples_max = int(n_samples_max * reduce_factor)
        print('dynamic_goal=' + str(settings.dynamic_goal),
              'n_samples_max=' + str(settings.n_samples_max))
        # get a name for this calculation method
        if dynamic_goal is None:
            method_names.append('standard')
        else:
            method_names.append('dynamic $G=' +
                                str(settings.dynamic_goal) + '$')
            if settings.tuned_dynamic_p is True:
                method_names[-1] += ' tuned'
        # generate runs and get results
        run_list = ns.get_run_data(settings, n_run, parallel=parallel,
                                   random_seeds=run_random_seeds,
                                   load=load, save=save,
                                   max_workers=max_workers,
                                   cache_dir=cache_dir,
                                   overwrite_existing=overwrite_existing)
        method_values.append(pu.parallel_apply(
            nestcheck.ns_run_utils.run_estimators, run_list,
            func_args=(estimator_list,), max_workers=max_workers,
            parallel=parallel))
    results = pf.efficiency_gain_df(method_names, method_values, est_names)
    if save:
        # save the results data frame
        print('get_dynamic_results: saving results to\n' + save_file)
        results.to_pickle(save_file)
    return results