예제 #1
0
    def test_inference_gum_marsaglia_posterior_inference_compilation(self):
        observation = [8, 9]
        posterior_mean_correct = 7.25
        posterior_stddev_correct = math.sqrt(1 / 1.2)

        self._model.learn_inference_network(observation=[1, 1],
                                            early_stop_traces=training_traces)
        posterior = self._model.posterior_distribution(
            samples, use_inference_network=True, observation=observation)
        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.mean_unweighted)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.stddev_unweighted)
        kl_divergence = float(
            util.kl_divergence_normal(posterior_mean_correct,
                                      posterior_stddev_correct, posterior.mean,
                                      posterior_stddev))

        util.debug('training_traces', 'samples', 'posterior_mean_unweighted',
                   'posterior_mean', 'posterior_mean_correct',
                   'posterior_stddev_unweighted', 'posterior_stddev',
                   'posterior_stddev_correct', 'kl_divergence')
        add_perf_score_inference_compilation(kl_divergence)

        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertLess(kl_divergence, 0.25)
예제 #2
0
    def test_model_remote_gum_marsaglia_posterior_random_walk_metropolis_hastings(
            self):
        observation = [8, 9]
        posterior_mean_correct = 7.25
        posterior_stddev_correct = math.sqrt(1 / 1.2)

        posterior = self._model.posterior_distribution(
            samples,
            inference_engine=pyprob.InferenceEngine.
            RANDOM_WALK_METROPOLIS_HASTINGS,
            observation=observation)
        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.unweighted().mean)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.unweighted().stddev)
        kl_divergence = float(
            util.kl_divergence_normal(
                Normal(posterior_mean_correct, posterior_stddev_correct),
                Normal(posterior.mean, posterior_stddev)))

        util.debug('samples', 'posterior_mean_unweighted', 'posterior_mean',
                   'posterior_mean_correct', 'posterior_stddev_unweighted',
                   'posterior_stddev', 'posterior_stddev_correct',
                   'kl_divergence')

        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertLess(kl_divergence, 0.25)
예제 #3
0
    def test_inference_gum_marsaglia_posterior_importance_sampling(self):
        observation = [8, 9]
        posterior_mean_correct = 7.25
        posterior_stddev_correct = math.sqrt(1 / 1.2)

        posterior = self._model.posterior_distribution(samples,
                                                       observation=observation)
        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.mean_unweighted)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.stddev_unweighted)
        kl_divergence = float(
            util.kl_divergence_normal(posterior_mean_correct,
                                      posterior_stddev_correct, posterior.mean,
                                      posterior_stddev))

        util.debug('samples', 'posterior_mean_unweighted', 'posterior_mean',
                   'posterior_mean_correct', 'posterior_stddev_unweighted',
                   'posterior_stddev', 'posterior_stddev_correct',
                   'kl_divergence')
        add_perf_score_importance_sampling(kl_divergence)

        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertLess(kl_divergence, 0.25)
예제 #4
0
    def test_dist_empirical_combine_non_uniform_weights_use_initial(self):
        samples1 = 10000
        samples2 = 1000
        observation = [8, 9]
        posterior_mean_correct = 7.25
        posterior_stddev_correct = math.sqrt(1/1.2)

        posterior1 = self._model_gum.posterior_distribution(samples1, observation=observation)
        posterior1_mean = float(posterior1.mean)
        posterior1_mean_unweighted = float(posterior1.unweighted().mean)
        posterior1_stddev = float(posterior1.stddev)
        posterior1_stddev_unweighted = float(posterior1.unweighted().stddev)
        kl_divergence1 = float(util.kl_divergence_normal(Normal(posterior_mean_correct, posterior_stddev_correct), Normal(posterior1.mean, posterior1_stddev)))

        posterior2 = self._model_gum.posterior_distribution(samples2, observation=observation)
        posterior2_mean = float(posterior2.mean)
        posterior2_mean_unweighted = float(posterior2.unweighted().mean)
        posterior2_stddev = float(posterior2.stddev)
        posterior2_stddev_unweighted = float(posterior2.unweighted().stddev)
        kl_divergence2 = float(util.kl_divergence_normal(Normal(posterior_mean_correct, posterior_stddev_correct), Normal(posterior2.mean, posterior2_stddev)))

        posterior = Empirical.combine([posterior1, posterior2], use_initial_values_and_weights=True)
        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.unweighted().mean)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.unweighted().stddev)
        kl_divergence = float(util.kl_divergence_normal(Normal(posterior_mean_correct, posterior_stddev_correct), Normal(posterior.mean, posterior_stddev)))

        util.debug('samples1', 'posterior1_mean_unweighted', 'posterior1_mean', 'posterior1_stddev_unweighted', 'posterior1_stddev', 'kl_divergence1', 'samples2', 'posterior2_mean_unweighted', 'posterior2_mean', 'posterior2_stddev_unweighted', 'posterior2_stddev', 'kl_divergence2', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')

        self.assertAlmostEqual(posterior1_mean, posterior_mean_correct, places=0)
        self.assertAlmostEqual(posterior1_stddev, posterior_stddev_correct, places=0)
        self.assertLess(kl_divergence1, 0.25)
        self.assertAlmostEqual(posterior2_mean, posterior_mean_correct, places=0)
        self.assertAlmostEqual(posterior2_stddev, posterior_stddev_correct, places=0)
        self.assertLess(kl_divergence2, 0.25)
        self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
        self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
        self.assertLess(kl_divergence, 0.25)