Ejemplo n.º 1
0
    def test_dynamic_results_table_values(self):
        """
        Test generating a table comparing dynamic and standard nested sampling;
        this covers a lot of the perfectns package's functionality.

        Tests of the expected values relies on default seeding of runs in
        get_run_data using numpy.random.seed - this should be stable over
        different platforms but may be worth checking if errors occur.
        """
        settings = get_minimal_settings()
        n_run = 5
        dynamic_goals = [0, 0.25, 1, 1]
        tuned_dynamic_ps = [False, False, False, True]
        dynamic_table = rt.get_dynamic_results(
            n_run,
            dynamic_goals,
            ESTIMATOR_LIST,
            settings,
            load=True,
            save=True,
            cache_dir=TEST_CACHE_DIR,
            parallel=True,
            tuned_dynamic_ps=tuned_dynamic_ps)
        # Check merged dynamic results function
        merged_df = rt.merged_dynamic_results(
            [(settings.n_dim, settings.prior.prior_scale)],
            [settings.likelihood],
            settings,
            ESTIMATOR_LIST,
            dynamic_goals=dynamic_goals,
            n_run=n_run,
            cache_dir=TEST_CACHE_DIR,
            tuned_dynamic_ps=tuned_dynamic_ps,
            load=True,
            save=False)
        self.assertTrue(np.array_equal(merged_df.values, dynamic_table.values))
        # Check numerical values in dynamic_table
        self.assertFalse(np.any(np.isnan(dynamic_table.values)))
        # Check the values for one column (those for RMean)
        expected_rmean_vals = np.asarray([
            1.05159345, 0.05910616, 1.09315952, 0.08192338, 1.14996638,
            0.11357112, 1.24153945, 0.09478196, 1.24436994, 0.07220817,
            0.13216539, 0.04672752, 0.18318625, 0.06476612, 0.25395275,
            0.08978585, 0.21193890, 0.07493172, 0.16146238, 0.05708557,
            0.52053477, 0.52053477, 0.27085052, 0.27085052, 0.38887867,
            0.38887867, 0.67002776, 0.67002776
        ])
        numpy.testing.assert_allclose(
            dynamic_table[e.RMean(from_theta=True).latex_name].values,
            expected_rmean_vals,
            rtol=1e-7,
            err_msg=('this relies on numpy.random.seed being consistent - '
                     'this should be true but is perhaps worth checking for '
                     'your platform.'))
Ejemplo n.º 2
0
    def test_bootstrap_results_table_values(self):
        """
        Generate a table showing sampling error estimates using the bootstrap
        method.

        As the numerical values produced are stochastic we just test that the
        function runs ok and does not produce NaN values - this should be
        sufficient.
        """
        np.random.seed(0)
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', UserWarning)
            bs_df = rt.get_bootstrap_results(5,
                                             10,
                                             ESTIMATOR_LIST,
                                             get_minimal_settings(),
                                             n_run_ci=2,
                                             n_simulate_ci=10,
                                             add_sim_method=True,
                                             cred_int=0.95,
                                             load=True,
                                             save=True,
                                             cache_dir=TEST_CACHE_DIR,
                                             ninit_sep=True,
                                             parallel=False)
        # Check numerical values in dynamic_table:
        # The first row of the table contains analytic calculations of the
        # estimators' values given the likelihood and prior which have already
        # been tested in test_dynamic_results_table.
        # None of the other values in the table should be NaN:
        self.assertFalse(np.any(np.isnan(bs_df.values[1:, :])))
        # Check the values for one column (those for RMean)
        expected_rmean_vals = np.asarray([
            1.05159345e+00, 5.91061598e-02, 1.32165391e-01, 4.67275222e-02,
            9.74212313e-01, 3.95418293e-01, 4.45773150e+01, 1.57604609e+01,
            1.26559404e+00, 4.81565225e-01, 3.14517691e+01, 1.11198796e+01,
            1.44503362e+00, 1.20619710e-01, 6.00000000e+01, 1.00000000e+02
        ])
        numpy.testing.assert_allclose(
            bs_df[e.RMean(from_theta=True).latex_name].values,
            expected_rmean_vals,
            rtol=1e-7,
            err_msg=('this relies on numpy.random.seed being consistent - '
                     'this should be true but is perhaps worth checking for '
                     'your platform.'))
Ejemplo n.º 3
0
 def test_r_not_from_theta(self):
     run_dict_temp = {
         'theta': np.full((2, 2), 1),
         'r': np.full((2, ), np.sqrt(2)),
         'logl': np.full((2, ), 0.),
         'nlive_array': np.full((2, ), 5.),
         'settings': {
             'dims_to_sample': 2,
             'n_dim': 2
         }
     }
     self.assertAlmostEqual(e.RMean(from_theta=False)(run_dict_temp,
                                                      logw=None),
                            np.sqrt(2),
                            places=10)
     self.assertAlmostEqual(e.RCred(0.84, from_theta=False)(run_dict_temp,
                                                            logw=None),
                            np.sqrt(2),
                            places=10)
Ejemplo n.º 4
0
 def test_plot_parameter_logx_diagram(self):
     settings = get_minimal_settings()
     for ftheta in [e.ParamMean(), e.ParamSquaredMean(), e.RMean()]:
         fig = perfectns.plots.plot_parameter_logx_diagram(settings,
                                                           ftheta,
                                                           x_points=50,
                                                           y_points=50)
         self.assertIsInstance(fig, matplotlib.figure.Figure)
     # Test warning for estimators without CDF
     with warnings.catch_warnings(record=True) as war:
         warnings.simplefilter("always")
         perfectns.plots.cdf_given_logx(e.LogZ(), np.zeros(1), np.zeros(1),
                                        settings)
         self.assertEqual(len(war), 1)
     # Test unexpected kwargs check
     self.assertRaises(TypeError,
                       perfectns.plots.plot_parameter_logx_diagram,
                       settings,
                       e.ParamMean(),
                       x_points=50,
                       y_points=50,
                       unexpected=0)
Ejemplo n.º 5
0
 def test_true_r_mean_value(self):
     self.assertAlmostEqual(e.get_true_estimator_values(
         e.RMean(), self.settings),
                            1.2470645289408879e+00,
                            places=10)
Ejemplo n.º 6
0
import perfectns.cached_gaussian_prior
import perfectns.likelihoods as likelihoods
import perfectns.nested_sampling as ns
import perfectns.results_tables as rt
import perfectns.maths_functions
import perfectns.priors as priors
import perfectns.plots

ESTIMATOR_LIST = [
    e.LogZ(),
    e.Z(),
    e.ParamMean(),
    e.ParamSquaredMean(),
    e.ParamCred(0.5),
    e.ParamCred(0.84),
    e.RMean(from_theta=True),
    e.RCred(0.84, from_theta=True)
]
TEST_CACHE_DIR = 'cache_tests'
TEST_DIR_EXISTS_MSG = ('Directory ' + TEST_CACHE_DIR + ' exists! Tests use '
                       'this dir to check caching then delete it afterwards, '
                       'so the path should be left empty.')


class TestNestedSampling(unittest.TestCase):
    def setUp(self):
        """Check TEST_CACHE_DIR does not already exist."""
        assert not os.path.exists(TEST_CACHE_DIR), TEST_DIR_EXISTS_MSG

    def tearDown(self):
        """Remove any caches created by the tests."""
Ejemplo n.º 7
0
cluster.
"""
import perfectns.priors as priors
import perfectns.likelihoods as likelihoods
import perfectns.settings
import perfectns.estimators as e
import perfectns.plots
import perfectns.results_tables

estimator_list = [
    e.LogZ(),
    e.ParamMean(),
    e.ParamSquaredMean(),
    e.ParamCred(0.5),
    e.ParamCred(0.84),
    e.RMean(),
    e.RCred(0.5),
    e.RCred(0.84)
]
settings = perfectns.settings.PerfectNSSettings()
settings.ninit = 10
settings.nlive_const = 200
settings.likelihood = likelihoods.Gaussian(likelihood_scale=1)
settings.prior = priors.Gaussian(prior_scale=10)
settings.n_dim = 10
likelihoods_list = [
    likelihoods.Gaussian(1),
    likelihoods.ExpPower(1, 2),
    likelihoods.ExpPower(1, 0.75)
]
dim_scale_list = [(2, s) for s in [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100]]