Пример #1
0
class MultiNormalTests(unittest.TestCase):
    def setUp(self):
        np.random.seed(1)
        self.mean = np.array([-13.0, .0, 7.0])
        self.cov = np.eye(3)
        self.distribution = MultiNormal(self.mean, self.cov, seed=1)

    def test_sample(self):
        samples = self.distribution.sample(100)
        computed_means = samples.mean(axis=0)
        computed_vars = samples.var(axis=0)
        expected_means = np.array([-12.9820723, 0.08671813, 7.11855369])
        expected_vars = np.array([0.99725084, 0.8610233, 0.8089557])
        self.assertTrue((computed_means - expected_means < 1e-5).all())
        self.assertTrue((computed_vars - expected_vars < 1e-5).all())

    def test_set_parameters(self):
        new_mean = np.array([130.0, 10.0, .0, .0])
        new_cov = np.eye(4) * 1e-2
        self.distribution.set_parameters([new_mean, new_cov])
        pdf_value = self.distribution.pdf(new_mean)
        self.assertLess(abs(pdf_value - 253.302959106), 1e-6)

        samples = self.distribution.sample(100)
        computed_means = samples.mean(axis=0)
        computed_vars = samples.var(axis=0)
        expected_means = np.array(
            [1.30004201e+02, 1.00043990e+01, 1.08618430e-02, 8.21679910e-04])
        expected_vars = np.array(
            [0.01023298, 0.00919317, 0.00876968, 0.00987364])
        self.assertTrue((computed_means - expected_means < 1e-5).all())
        self.assertTrue((computed_vars - expected_vars < 1e-5).all())
Пример #2
0
    def setUp(self):
        # find spark and initialize it
        self.backend = BackendDummy()

        # define a uniform prior distribution
        lb = np.array([-5, 0])
        ub = np.array([5, 10])
        prior = Uniform(lb, ub, seed=1)

        # define a Gaussian model
        self.model = Gaussian(prior, mu=2.1, sigma=5.0, seed=1)

        # define a distance function
        stat_calc = Identity(degree=2, cross=0)
        self.dist_calc = Euclidean(stat_calc)

        # create fake observed data
        self.observation = self.model.simulate(1)

        # define kernel
        mean = np.array([-13.0, .0, 7.0])
        cov = np.eye(3)
        self.kernel = MultiNormal(mean, cov, seed=1)
Пример #3
0
 def setUp(self):
     np.random.seed(1)
     self.mean = np.array([-13.0, .0, 7.0])
     self.cov = np.eye(3)
     self.distribution = MultiNormal(self.mean, self.cov, seed=1)
Пример #4
0
    def test_sample(self):
        # setup backend
        backend = BackendDummy()

        # define a uniform prior distribution
        lb = np.array([-5, 0])
        ub = np.array([5, 10])
        prior = Uniform(lb, ub, seed=1)

        # define a Gaussian model
        model = Gaussian(prior, mu=2.1, sigma=5.0, seed=1)

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # create fake observed data
        y_obs = model.simulate(1)

        # Define the likelihood function
        likfun = SynLiklihood(stat_calc)

        # use the PMC scheme for T = 1
        mean = np.array([-13.0, .0, 7.0])
        cov = np.eye(3)
        kernel = MultiNormal(mean, cov, seed=1)

        T, n_sample, n_samples_per_param = 1, 10, 100
        sampler = PMC(model, likfun, kernel, backend, seed=1)
        journal = sampler.sample(y_obs,
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactor=np.array([.1, .1]),
                                 iniPoints=None)
        samples = (journal.get_parameters(), journal.get_weights())

        # Compute posterior mean
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            samples[0][:, 0]), np.array(samples[0][:,
                                                   1]), np.array(samples[1][:,
                                                                            0])
        mu_post_mean, sigma_post_mean = np.average(
            mu_post_sample,
            weights=post_weights), np.average(sigma_post_sample,
                                              weights=post_weights)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(
            mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10, ))
        self.assertEqual(sigma_sample_shape, (10, ))
        self.assertEqual(weights_sample_shape, (10, ))
        self.assertLess(abs(mu_post_mean - (-1.48953333102)), 1e-10)
        self.assertLess(abs(sigma_post_mean - 6.50695612708), 1e-10)

        # use the PMC scheme for T = 2
        T, n_sample, n_samples_per_param = 2, 10, 100
        sampler = PMC(model, likfun, kernel, backend, seed=1)
        journal = sampler.sample(y_obs,
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactor=np.array([.1, .1]),
                                 iniPoints=None)
        samples = (journal.get_parameters(), journal.get_weights())

        # Compute posterior mean
        mu_post_sample, sigma_post_sample, post_weights = np.asarray(
            samples[0][:, 0]), np.asarray(samples[0][:, 1]), np.asarray(
                samples[1][:, 0])
        mu_post_mean, sigma_post_mean = np.average(
            mu_post_sample,
            weights=post_weights), np.average(sigma_post_sample,
                                              weights=post_weights)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(
            mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10, ))
        self.assertEqual(sigma_sample_shape, (10, ))
        self.assertEqual(weights_sample_shape, (10, ))
        self.assertLess(abs(mu_post_mean - (-1.4033145848)), 1e-10)
        self.assertLess(abs(sigma_post_mean - 7.05175546876), 1e-10)
Пример #5
0
#Define model
lb, ub = [50, 5, 0.1, 0.5e-3, 0], [150, 20, 1.5, 3e-3, 10]
prior = Uniform(lb=[50, 5, 0.1, 0.5e-3, 0], ub=[150, 20, 1.5, 3e-3, 10])
pAd, pAg, pT, pF, aT = 110, 14.6, 0.6, 1.7e-3, 6
model = Deposition(prior, pAd, pAg, pT, pF, aT, seed=1)
# Observed data
a = np.array([[0, 0, 0, 172200, 4808], [20, 1689, 26.8, 155100, 1683],
              [60, 2004, 29.9, 149400, 0], [120, 1968, 31.3, 140700, 0],
              [300, 1946, 36.6, 125800, 0]])

np.save('depo_experimental.npy', a)
BE = np.zeros(shape=(10, 5))
index = 5
y_obs = np.load('depo_experimental.npy')[index]

# Define summary stat and distance
stat_calc = DepositionStatistics(degree=1, cross=0)
dist_calc = DepositionDistance(stat_calc)

mean = np.array([-13.0, .0, 7.0])
cov = np.eye(3)
kernel = MultiNormal(mean, cov, seed=1)

steps, epsilon, n_samples, n_samples_per_param = 2, 40, 1, 1
sampler = SABC(model, dist_calc, kernel, backend, seed=1)
journal_sabc = sampler.sample([y_obs], steps, epsilon, n_samples,
                              n_samples_per_param)
journal_sabc.save('experimental_5.jrnl')
samples = (journal_sabc.get_parameters(), journal_sabc.get_weights())