コード例 #1
0
class HiddenMarkovModelTestCase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        self._observation = [0.9, 0.8, 0.7, 0.0, -0.025, -5.0, -2.0, -0.1, 0.0, 0.13, 0.45, 6, 0.2, 0.3, -1, -1]
        self._posterior_mean_correct = util.to_tensor([[0.3775, 0.3092, 0.3133],
                                                       [0.0416, 0.4045, 0.5539],
                                                       [0.0541, 0.2552, 0.6907],
                                                       [0.0455, 0.2301, 0.7244],
                                                       [0.1062, 0.1217, 0.7721],
                                                       [0.0714, 0.1732, 0.7554],
                                                       [0.9300, 0.0001, 0.0699],
                                                       [0.4577, 0.0452, 0.4971],
                                                       [0.0926, 0.2169, 0.6905],
                                                       [0.1014, 0.1359, 0.7626],
                                                       [0.0985, 0.1575, 0.7440],
                                                       [0.1781, 0.2198, 0.6022],
                                                       [0.0000, 0.9848, 0.0152],
                                                       [0.1130, 0.1674, 0.7195],
                                                       [0.0557, 0.1848, 0.7595],
                                                       [0.2017, 0.0472, 0.7511],
                                                       [0.2545, 0.0611, 0.6844]])
        super().__init__(*args, **kwargs)

    def setUp(self):
        server_address = 'ipc://@RemoteModelHiddenMarkovModel_' + str(uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run('pyprob/pyprob_cpp', '/home/pyprob_cpp/build/pyprob_cpp/test_hmm {}'.format(server_address), network='host', detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    def test_inference_remote_hmm_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.0015

        start = time.time()
        posterior = self._model.posterior_results(samples, observe=observation)
        add_importance_sampling_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        print(posterior[0])
        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)

    def test_inference_remote_hmm_posterior_importance_sampling_with_inference_network(self):
        samples = importance_sampling_with_inference_network_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.03

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 16} for i in range(len(observation))})

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)

    def test_inference_remote_hmm_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)

    def test_inference_remote_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)
コード例 #2
0
class GaussianWithUnknownMeanTestCase(unittest.TestCase):
    def setUp(self):
        docker_client = docker.from_env()
        server_address = 'ipc://@RemoteModelGaussianWithUnknownMean_' + str(
            uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run(
            'pyprob/pyprob_cpp',
            '/home/pyprob_cpp/build/pyprob_cpp/test_gum {}'.format(
                server_address),
            network='host',
            detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    def test_inference_remote_gum_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        true_posterior = Normal(7.25, math.sqrt(1 / 1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)
        prior_mean_correct = 1.
        prior_stddev_correct = math.sqrt(5)
        posterior_effective_sample_size_min = samples * 0.005

        start = time.time()
        posterior = self._model.posterior_results(
            samples,
            inference_engine=InferenceEngine.IMPORTANCE_SAMPLING,
            observe={
                'obs0': 8,
                'obs1': 9
            })
        add_importance_sampling_duration(time.time() - start)

        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.unweighted().mean)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.unweighted().stddev)
        posterior_effective_sample_size = float(
            posterior.effective_sample_size)
        kl_divergence = float(
            pyprob.distributions.Distribution.kl_divergence(
                true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'prior_mean_correct',
                        'posterior_mean_unweighted', 'posterior_mean',
                        'posterior_mean_correct', 'prior_stddev_correct',
                        'posterior_stddev_unweighted', 'posterior_stddev',
                        'posterior_stddev_correct',
                        'posterior_effective_sample_size',
                        'posterior_effective_sample_size_min', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertAlmostEqual(posterior_mean_unweighted,
                               prior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev_unweighted,
                               prior_stddev_correct,
                               places=0)
        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertGreater(posterior_effective_sample_size,
                           posterior_effective_sample_size_min)
        self.assertLess(kl_divergence, 0.25)

    def test_inference_remote_gum_posterior_importance_sampling_with_inference_network(
            self):
        samples = importance_sampling_samples
        true_posterior = Normal(7.25, math.sqrt(1 / 1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)
        posterior_effective_sample_size_min = samples * 0.02

        self._model.reset_inference_network()
        self._model.learn_inference_network(
            num_traces=
            importance_sampling_with_inference_network_training_traces,
            observe_embeddings={
                'obs0': {
                    'dim': 256,
                    'depth': 1
                },
                'obs1': {
                    'dim': 256,
                    'depth': 1
                }
            })

        start = time.time()
        posterior = self._model.posterior_results(
            samples,
            inference_engine=InferenceEngine.
            IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK,
            observe={
                'obs0': 8,
                'obs1': 9
            })
        add_importance_sampling_with_inference_network_duration(time.time() -
                                                                start)

        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.unweighted().mean)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.unweighted().stddev)
        posterior_effective_sample_size = float(
            posterior.effective_sample_size)
        kl_divergence = float(
            pyprob.distributions.Distribution.kl_divergence(
                true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'posterior_mean_unweighted',
                        'posterior_mean', 'posterior_mean_correct',
                        'posterior_stddev_unweighted', 'posterior_stddev',
                        'posterior_stddev_correct',
                        'posterior_effective_sample_size',
                        'posterior_effective_sample_size_min', 'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(
            kl_divergence)

        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertGreater(posterior_effective_sample_size,
                           posterior_effective_sample_size_min)
        self.assertLess(kl_divergence, 0.25)

    def test_inference_remote_gum_posterior_lightweight_metropolis_hastings(
            self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        true_posterior = Normal(7.25, math.sqrt(1 / 1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)

        start = time.time()
        posterior = self._model.posterior_results(
            samples,
            inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS,
            observe={
                'obs0': 8,
                'obs1': 9
            })[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)

        posterior_mean = float(posterior.mean)
        posterior_stddev = float(posterior.stddev)
        kl_divergence = float(
            pyprob.distributions.Distribution.kl_divergence(
                true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'burn_in', 'posterior_mean',
                        'posterior_mean_correct', 'posterior_stddev',
                        'posterior_stddev_correct', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertLess(kl_divergence, 0.25)

    def test_inference_remote_gum_posterior_random_walk_metropolis_hastings(
            self):
        samples = random_walk_metropolis_hastings_samples
        burn_in = random_walk_metropolis_hastings_burn_in
        true_posterior = Normal(7.25, math.sqrt(1 / 1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)

        start = time.time()
        posterior = self._model.posterior_results(
            samples,
            inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS,
            observe={
                'obs0': 8,
                'obs1': 9
            })[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)

        posterior_mean = float(posterior.mean)
        posterior_stddev = float(posterior.stddev)
        kl_divergence = float(
            pyprob.distributions.Distribution.kl_divergence(
                true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'burn_in', 'posterior_mean',
                        'posterior_mean_correct', 'posterior_stddev',
                        'posterior_stddev_correct', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertAlmostEqual(posterior_mean,
                               posterior_mean_correct,
                               places=0)
        self.assertAlmostEqual(posterior_stddev,
                               posterior_stddev_correct,
                               places=0)
        self.assertLess(kl_divergence, 0.25)