Esempio n. 1
0
 def setUp(self):
     server_address = 'ipc://@RemoteModelDistributions_' + str(uuid.uuid4())
     docker_client = docker.from_env()
     self._docker_container = docker_client.containers.run(
         'pyprob/pyprob_cpp',
         '/home/pyprob_cpp/build/pyprob_cpp/test_distributions {}'.format(
             server_address),
         network='host',
         detach=True)
     self._model = RemoteModel(server_address)
Esempio n. 2
0
 def setUp(self):
     docker_client = docker.from_env()
     server_address = 'ipc://@RemoteModelGaussianWithUnknownMean_' + str(
         uuid.uuid4())
     docker_client = docker.from_env()
     self._docker_container = docker_client.containers.run(
         'pyprob/pyprob_cpp',
         '/home/pyprob_cpp/build/pyprob_cpp/test_gum {}'.format(
             server_address),
         network='host',
         detach=True)
     self._model = RemoteModel(server_address)
Esempio n. 3
0
class RemoteModelSetDefaultsAndAddressesTestCase(unittest.TestCase):
    def setUp(self):
        server_address = 'ipc://@RemoteModelSetDefaultsAndAddresses_' + str(
            uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run(
            'pyprob/pyprob_cpp',
            '/home/pyprob_cpp/build/pyprob_cpp/test_set_defaults_and_addresses {}'
            .format(server_address),
            network='host',
            detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    def test_model_remote_set_defaults_and_addresses_prior(self):
        samples = 1000
        prior_mean_correct = 1
        prior_stddev_correct = 3.882074  # Estimate from 100k samples

        prior = self._model.prior_results(samples)
        prior_mean = float(prior.mean)
        prior_stddev = float(prior.stddev)
        util.eval_print('samples', 'prior_mean', 'prior_mean_correct',
                        'prior_stddev', 'prior_stddev_correct')

        self.assertAlmostEqual(prior_mean, prior_mean_correct, places=0)
        self.assertAlmostEqual(prior_stddev, prior_stddev_correct, places=0)

    def test_model_remote_set_defaults_and_addresses_addresses(self):
        addresses_controlled_correct = [
            '[forward()+0x252]__Normal__1', '[forward()+0x252]__Normal__2',
            '[forward()+0xbf1]__Normal__2'
        ]
        addresses_all_correct = [
            '[forward()+0x252]__Normal__1', '[forward()+0x252]__Normal__2',
            '[forward()+0xbf1]__Normal__1', '[forward()+0xbf1]__Normal__2',
            '[forward()+0x1329]__Normal__1', '[forward()+0x1329]__Normal__2',
            '[forward()+0x1a2e]__Normal__1'
        ]

        trace = next(self._model._trace_generator())
        addresses_controlled = [s.address for s in trace.variables_controlled]
        addresses_all = [s.address for s in trace.variables]

        util.eval_print('addresses_controlled', 'addresses_controlled_correct',
                        'addresses_all', 'addresses_all_correct')

        self.assertEqual(addresses_controlled, addresses_controlled_correct)
        self.assertEqual(addresses_all, addresses_all_correct)
Esempio n. 4
0
import pyprob
from pyprob import util, RemoteModel, InferenceEngine
from pyprob.distributions import Normal, Categorical

docker_client = docker.from_env()
print('Pulling latest Docker image: pyprob/pyprob_cpp')
docker_client.images.pull('pyprob/pyprob_cpp')
print('Docker image pulled.')

docker_container = docker_client.containers.run(
    'pyprob/pyprob_cpp',
    '/code/pyprob_cpp/build/pyprob_cpp/test_set_defaults_and_addresses ipc://@RemoteModelSetDefaultsAndAddresses',
    network='host',
    detach=True)
SetDefaultsAndAddressesCPP = RemoteModel(
    'ipc://@RemoteModelSetDefaultsAndAddresses')


class RemoteModelSetDefaultsAndAddressesTestCase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        self._model = SetDefaultsAndAddressesCPP
        super().__init__(*args, **kwargs)

    def test_model_remote_set_defaults_and_addresses_prior(self):
        samples = 1000
        prior_mean_correct = 1
        prior_stddev_correct = 3.882074  # Estimate from 100k samples

        prior = self._model.prior_results(samples)
        prior_mean = float(prior.mean)
        prior_stddev = float(prior.stddev)
Esempio n. 5
0
import pyprob
import subprocess, os, signal
from pyprob import RemoteModel
import numpy as np
import torch

model_executable = './simulator'  # Path to the simulator's executable file.
# There is no need for a model_address as it will be randomly generated in this example

if __name__ == '__main__':

    def model_dispatcher(trace_idx, server_address):
        return subprocess.Popen('{} {} > /dev/null &'.format(
            model_executable, server_address),
                                shell=True,
                                preexec_fn=os.setsid)

    try:
        # Start the simulator, passing in the inter-process communication address.
        # Instantiate the Python-side model.
        # In this case, the model generates a random address and launches a new process for each simulation
        # The process will be terminated after each simulation and a new process is launched, by calling the
        # model_dispatcher function above.
        model = RemoteModel(random_server_address=True,
                            model_dispatcher=model_dispatcher,
                            restart_per_trace=True)
        # Run the simulator 10 times.
        samples = model.prior_results(num_traces=10)
    finally:
        print('Done, killing model process')
        model.kill_process()
Esempio n. 6
0
    random_walk_metropolis_hastings_duration += val


docker_client = docker.from_env()
print('Pulling latest Docker image: probprog/pyprob_cpp')
docker_client.images.pull('probprog/pyprob_cpp')
print('Docker image pulled.')

docker_containers = []
docker_containers.append(
    docker_client.containers.run(
        'probprog/pyprob_cpp',
        '/code/pyprob_cpp/build/pyprob_cpp/test_gum ipc://@GaussianWithUnknownMeanCPP',
        network='host',
        detach=True))
GaussianWithUnknownMeanCPP = RemoteModel('ipc://@GaussianWithUnknownMeanCPP')

docker_containers.append(
    docker_client.containers.run(
        'probprog/pyprob_cpp',
        '/code/pyprob_cpp/build/pyprob_cpp/test_gum_marsaglia_replacement ipc://@GaussianWithUnknownMeanMarsagliaWithReplacementCPP',
        network='host',
        detach=True))
GaussianWithUnknownMeanMarsagliaWithReplacementCPP = RemoteModel(
    'ipc://@GaussianWithUnknownMeanMarsagliaWithReplacementCPP')

docker_containers.append(
    docker_client.containers.run(
        'probprog/pyprob_cpp',
        '/code/pyprob_cpp/build/pyprob_cpp/test_hmm ipc://@HiddenMarkovModelCPP',
        network='host',
Esempio n. 7
0
class GaussianWithUnknownMeanTestCase(unittest.TestCase):
    def setUp(self):
        server_address = 'ipc://@RemoteModelGaussianWithUnknownMean_' + str(uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run('pyprob/pyprob_cpp', '/home/pyprob_cpp/build/pyprob_cpp/test_gum {}'.format(server_address), network='host', detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    def test_inference_remote_gum_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        true_posterior = Normal(7.25, math.sqrt(1/1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)
        prior_mean_correct = 1.
        prior_stddev_correct = math.sqrt(5)
        posterior_effective_sample_size_min = samples * 0.005

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe={'obs0': 8, 'obs1': 9})
        add_importance_sampling_duration(time.time() - start)

        posterior_mean = float(posterior.mean)
        posterior_mean_unweighted = float(posterior.unweighted().mean)
        posterior_stddev = float(posterior.stddev)
        posterior_stddev_unweighted = float(posterior.unweighted().stddev)
        posterior_effective_sample_size = float(posterior.effective_sample_size)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'prior_mean_correct', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'prior_stddev_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertAlmostEqual(posterior_mean_unweighted, prior_mean_correct, places=0)
        self.assertAlmostEqual(posterior_stddev_unweighted, prior_stddev_correct, places=0)
        self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
        self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(kl_divergence, 0.25)

    # def test_inference_remote_gum_posterior_importance_sampling_with_inference_network(self):
    #     samples = importance_sampling_samples
    #     true_posterior = Normal(7.25, math.sqrt(1/1.2))
    #     posterior_mean_correct = float(true_posterior.mean)
    #     posterior_stddev_correct = float(true_posterior.stddev)
    #     posterior_effective_sample_size_min = samples * 0.01
    #
    #     self._model.reset_inference_network()
    #     self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_training_traces, observe_embeddings={'obs0': {'dim': 256, 'depth': 1}, 'obs1': {'dim': 256, 'depth': 1}})
    #
    #     start = time.time()
    #     posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe={'obs0': 8, 'obs1': 9})
    #     add_importance_sampling_with_inference_network_duration(time.time() - start)
    #
    #     posterior_mean = float(posterior.mean)
    #     posterior_mean_unweighted = float(posterior.unweighted().mean)
    #     posterior_stddev = float(posterior.stddev)
    #     posterior_stddev_unweighted = float(posterior.unweighted().stddev)
    #     posterior_effective_sample_size = float(posterior.effective_sample_size)
    #     kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))
    #
    #     util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev_unweighted', 'posterior_stddev', 'posterior_stddev_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'kl_divergence')
    #     add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)
    #
    #     self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
    #     self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
    #     self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
    #     self.assertLess(kl_divergence, 0.25)

    def test_inference_remote_gum_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        true_posterior = Normal(7.25, math.sqrt(1/1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9})[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)

        posterior_mean = float(posterior.mean)
        posterior_stddev = float(posterior.stddev)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
        self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
        self.assertLess(kl_divergence, 0.25)

    def test_inference_remote_gum_posterior_random_walk_metropolis_hastings(self):
        samples = random_walk_metropolis_hastings_samples
        burn_in = random_walk_metropolis_hastings_burn_in
        true_posterior = Normal(7.25, math.sqrt(1/1.2))
        posterior_mean_correct = float(true_posterior.mean)
        posterior_stddev_correct = float(true_posterior.stddev)

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs0': 8, 'obs1': 9})[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)

        posterior_mean = float(posterior.mean)
        posterior_stddev = float(posterior.stddev)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(true_posterior, Normal(posterior.mean, posterior.stddev)))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'posterior_stddev', 'posterior_stddev_correct', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertAlmostEqual(posterior_mean, posterior_mean_correct, places=0)
        self.assertAlmostEqual(posterior_stddev, posterior_stddev_correct, places=0)
        self.assertLess(kl_divergence, 0.25)
Esempio n. 8
0
class BranchingTestCase(unittest.TestCase):
    def setUp(self):
        server_address = 'ipc://@RemoteModelBranching_' + str(uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run('pyprob/pyprob_cpp', '/home/pyprob_cpp/build/pyprob_cpp/test_branching {}'.format(server_address), network='host', detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    @functools.lru_cache(maxsize=None)  # 128 by default
    def fibonacci(self, n):
        if n < 2:
            return 1

        a = 1
        fib = 1
        for i in range(n-2):
            a, fib = fib, a + fib
        return fib

    def true_posterior(self, observe=6):
        count_prior = Poisson(4)
        vals = []
        log_weights = []
        for r in range(40):
            for s in range(40):
                if 4 < float(r):
                    l = 6
                else:
                    f = self.fibonacci(3 * r)
                    l = 1 + f + count_prior.sample()
                vals.append(r)
                log_weights.append(Poisson(l).log_prob(observe) + count_prior.log_prob(r) + count_prior.log_prob(s))
        return Empirical(vals, log_weights)

    def test_inference_remote_branching_importance_sampling(self):
        samples = importance_sampling_samples
        posterior_correct = util.empirical_to_categorical(self.true_posterior(), max_val=40)

        start = time.time()
        posterior = util.empirical_to_categorical(self._model.posterior_results(samples, observe={'obs': 6}), max_val=40)
        add_importance_sampling_duration(time.time() - start)

        posterior_probs = util.to_numpy(posterior._probs)
        posterior_probs_correct = util.to_numpy(posterior_correct._probs)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(posterior, posterior_correct))

        util.eval_print('samples', 'posterior_probs', 'posterior_probs_correct', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertLess(kl_divergence, 0.75)
    #
    # def test_inference_remote_branching_importance_sampling_with_inference_network(self):
    #     samples = importance_sampling_samples
    #     posterior_correct = util.empirical_to_categorical(self._model.true_posterior(), max_val=40)
    #
    #     self._model.reset_inference_network()
    #     self._model.learn_inference_network(num_traces=2000, observe_embeddings={'obs': {'depth': 2, 'dim': 32}})
    #
    #     start = time.time()
    #     posterior = util.empirical_to_categorical(self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe={'obs': 6}), max_val=40)
    #     add_importance_sampling_with_inference_network_duration(time.time() - start)
    #
    #     posterior_probs = util.to_numpy(posterior._probs)
    #     posterior_probs_correct = util.to_numpy(posterior_correct._probs)
    #     kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(posterior, posterior_correct))
    #
    #     util.eval_print('samples', 'posterior_probs', 'posterior_probs_correct', 'kl_divergence')
    #     add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)
    #
    #     self.assertLess(kl_divergence, 0.75)

    def test_inference_remote_branching_lightweight_metropolis_hastings(self):
        samples = importance_sampling_samples
        posterior_correct = util.empirical_to_categorical(self.true_posterior(), max_val=40)

        start = time.time()
        posterior = util.empirical_to_categorical(self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe={'obs': 6}), max_val=40)
        add_lightweight_metropolis_hastings_duration(time.time() - start)

        posterior_probs = util.to_numpy(posterior._probs)
        posterior_probs_correct = util.to_numpy(posterior_correct._probs)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(posterior, posterior_correct))

        util.eval_print('samples', 'posterior_probs', 'posterior_probs_correct', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(kl_divergence, 0.75)

    def test_inference_remote_branching_random_walk_metropolis_hastings(self):
        samples = importance_sampling_samples
        posterior_correct = util.empirical_to_categorical(self.true_posterior(), max_val=40)

        start = time.time()
        posterior = util.empirical_to_categorical(self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'obs': 6}), max_val=40)
        add_random_walk_metropolis_hastings_duration(time.time() - start)

        posterior_probs = util.to_numpy(posterior._probs)
        posterior_probs_correct = util.to_numpy(posterior_correct._probs)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(posterior, posterior_correct))

        util.eval_print('samples', 'posterior_probs', 'posterior_probs_correct', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(kl_divergence, 0.75)
Esempio n. 9
0
class HiddenMarkovModelTestCase(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        self._observation = [0.9, 0.8, 0.7, 0.0, -0.025, -5.0, -2.0, -0.1, 0.0, 0.13, 0.45, 6, 0.2, 0.3, -1, -1]
        self._posterior_mean_correct = util.to_tensor([[0.3775, 0.3092, 0.3133],
                                                       [0.0416, 0.4045, 0.5539],
                                                       [0.0541, 0.2552, 0.6907],
                                                       [0.0455, 0.2301, 0.7244],
                                                       [0.1062, 0.1217, 0.7721],
                                                       [0.0714, 0.1732, 0.7554],
                                                       [0.9300, 0.0001, 0.0699],
                                                       [0.4577, 0.0452, 0.4971],
                                                       [0.0926, 0.2169, 0.6905],
                                                       [0.1014, 0.1359, 0.7626],
                                                       [0.0985, 0.1575, 0.7440],
                                                       [0.1781, 0.2198, 0.6022],
                                                       [0.0000, 0.9848, 0.0152],
                                                       [0.1130, 0.1674, 0.7195],
                                                       [0.0557, 0.1848, 0.7595],
                                                       [0.2017, 0.0472, 0.7511],
                                                       [0.2545, 0.0611, 0.6844]])
        super().__init__(*args, **kwargs)

    def setUp(self):
        server_address = 'ipc://@RemoteModelHiddenMarkovModel_' + str(uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run('pyprob/pyprob_cpp', '/home/pyprob_cpp/build/pyprob_cpp/test_hmm {}'.format(server_address), network='host', detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    def test_inference_remote_hmm_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.0015

        start = time.time()
        posterior = self._model.posterior_results(samples, observe=observation)
        add_importance_sampling_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        print(posterior[0])
        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)

    def test_inference_remote_hmm_posterior_importance_sampling_with_inference_network(self):
        samples = importance_sampling_with_inference_network_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.03

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 16} for i in range(len(observation))})

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)

    def test_inference_remote_hmm_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)

    def test_inference_remote_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)
Esempio n. 10
0
def run(args):
    def model_dispatcher(trace_idx, server_address):
        arguments = f'{args.params} {trace_idx} {args.out_dir}'
        if args.dump_simulator_log:
            return subprocess.Popen(
                f'{model_executable} {server_address} {arguments} 2>&1 > {args.out_dir}/LOG{trace_idx} &',
                shell=True,
                preexec_fn=os.setsid)
        else:
            return subprocess.Popen(
                f'{model_executable} {server_address} {arguments} 2>&1 &',
                shell=True,
                preexec_fn=os.setsid)

    try:
        model = RemoteModel(
            random_server_address=True,
            model_dispatcher=model_dispatcher,
            restart_per_trace=True,
            kill_on_zero_likelihood=args.kill_on_zero_likelihood)
        traces = model.posterior(
            num_traces=args.num_traces,
            inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING,
            observe={
                f'obs_{i}': args.constraint_threshold
                for i in range(args.days)
            })
        trace_weights = {}
        for idx, trace in enumerate(traces):
            # Convert the latent variables that are converted to integer on C++ code.
            trace.named_variables[
                'shelter_in_place_duration_mean'].value = trace.named_variables[
                    'shelter_in_place_duration_mean'].value.int()

            dump_parameter_file(sampled_parameters={
                name: variable.value.item()
                for name, variable in trace.named_variables.items()
                if not variable.observed
            },
                                path=os.path.join(args.out_dir,
                                                  f'params{idx}'),
                                args=args)
            weight = np.exp(trace.log_importance_weight)
            print(f'likelihood {idx}: {weight}')
            assert weight < 0.2 or weight > 0.8
            trace_weights[idx] = int(weight > 0.5)

        print(f'Average success rate: {np.mean(list(trace_weights.keys()))}')

        # Save the trace weights
        with open(os.path.join(args.out_dir, 'weights.json'), 'w') as fp:
            json.dump(trace_weights, fp, indent=4, separators=(',', ': '))
        # Save the traces to file
        traces.copy(file_name=os.path.join(args.out_dir, f'traces'))
    except Exception as e:
        if args.compressed_file_path is not None:
            # Compress the outputs
            print('Failed... Compressing the output')
            zipdir(f'{args.compressed_file_path}_failed', args.out_dir)
        raise e
    finally:
        if model._model_process is not None:
            print('Done, killing model process: {}'.format(
                model._model_process.pid))
            os.killpg(os.getpgid(model._model_process.pid), signal.SIGTERM)

    if args.compressed_file_path is not None:
        # Compress the outputs
        print('Compressing the output')
        zipdir(args.compressed_file_path, args.out_dir)
Esempio n. 11
0
import pyprob
import subprocess, os, signal
from pyprob import RemoteModel
import numpy as np
import torch

model_address = 'ipc://@gum'  # A shared address for inter-process communication between the simulator and pyprob.
model_executable = './simulator'  # Path to the simulator's executable file.

if __name__ == '__main__':
    obs = 2  # observed value from the model

    try:
        model_process = subprocess.Popen('{} {} > /dev/null &'.format(
            model_executable, model_address),
                                         shell=True,
                                         preexec_fn=os.setsid)
        model = RemoteModel(model_address)
        samples = model.posterior_results(
            num_traces=5000,
            inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING,
            observe={'obs': obs})

        var_gt = 5 * 2 / (5 + 2)
        mu_posterior = var_gt * (1 / 5 + obs / 2)
        print(f'Mean = {samples.mean} (GT: {mu_posterior})')
        print(f'Standard deviation = {samples.stddev} (GT: {np.sqrt(var_gt)})')
    finally:
        if model_process is not None:
            print('Done, killing model process: {}'.format(model_process.pid))
            os.killpg(os.getpgid(model_process.pid), signal.SIGTERM)
Esempio n. 12
0
import pyprob
import subprocess, os, signal
from pyprob import RemoteModel
import numpy as np
import torch

model_address = 'ipc://@gum'     # A shared address for inter-process communication (IPC) between the simulator and pyprob.
model_executable = './simulator' # Path to the simulator's executable file.

if __name__ == '__main__':
    try:
        # Start the simulator, passing in the inter-process communication address.
        model_process = subprocess.Popen('{} {} > /dev/null &'.format(model_executable, model_address), shell=True, preexec_fn=os.setsid)
        # Instantiate the Python-side model.
        model = RemoteModel(model_address)
        # Run the simulator 10 times.
        samples = model.prior_results(num_traces=10)
    finally:
        if model_process is not None:
            print('Done, killing model process: {}'.format(model_process.pid))
            os.killpg(os.getpgid(model_process.pid), signal.SIGTERM)
Esempio n. 13
0
class RemoteModelDistributionsTestCase(unittest.TestCase):
    def setUp(self):
        server_address = 'ipc://@RemoteModelDistributions_' + str(uuid.uuid4())
        docker_client = docker.from_env()
        self._docker_container = docker_client.containers.run(
            'pyprob/pyprob_cpp',
            '/home/pyprob_cpp/build/pyprob_cpp/test_distributions {}'.format(
                server_address),
            network='host',
            detach=True)
        self._model = RemoteModel(server_address)

    def tearDown(self):
        self._model.close()
        self._docker_container.kill()

    def test_distributions_remote(self):
        num_samples = 4000
        prior_normal_mean_correct = Normal(1.75, 0.5).mean
        prior_uniform_mean_correct = Uniform(1.2, 2.5).mean
        prior_categorical_mean_correct = 1.  # Categorical([0.1, 0.5, 0.4])
        prior_poisson_mean_correct = Poisson(4.0).mean
        prior_bernoulli_mean_correct = Bernoulli(0.2).mean
        prior_beta_mean_correct = Beta(1.2, 2.5).mean
        prior_exponential_mean_correct = Exponential(2.2).mean
        prior_gamma_mean_correct = Gamma(0.5, 1.2).mean
        prior_log_normal_mean_correct = LogNormal(0.5, 0.2).mean
        prior_binomial_mean_correct = Binomial(10, 0.72).mean
        prior_weibull_mean_correct = Weibull(1.1, 0.6).mean

        prior = self._model.prior(num_samples)
        prior_normal = prior.map(
            lambda trace: trace.named_variables['normal'].value)
        prior_uniform = prior.map(
            lambda trace: trace.named_variables['uniform'].value)
        prior_categorical = prior.map(
            lambda trace: trace.named_variables['categorical'].value)
        prior_poisson = prior.map(
            lambda trace: trace.named_variables['poisson'].value)
        prior_bernoulli = prior.map(
            lambda trace: trace.named_variables['bernoulli'].value)
        prior_beta = prior.map(
            lambda trace: trace.named_variables['beta'].value)
        prior_exponential = prior.map(
            lambda trace: trace.named_variables['exponential'].value)
        prior_gamma = prior.map(
            lambda trace: trace.named_variables['gamma'].value)
        prior_log_normal = prior.map(
            lambda trace: trace.named_variables['log_normal'].value)
        prior_binomial = prior.map(
            lambda trace: trace.named_variables['binomial'].value)
        prior_weibull = prior.map(
            lambda trace: trace.named_variables['weibull'].value)
        prior_normal_mean = util.to_numpy(prior_normal.mean)
        prior_uniform_mean = util.to_numpy(prior_uniform.mean)
        prior_categorical_mean = util.to_numpy(int(prior_categorical.mean))
        prior_poisson_mean = util.to_numpy(prior_poisson.mean)
        prior_bernoulli_mean = util.to_numpy(prior_bernoulli.mean)
        prior_beta_mean = util.to_numpy(prior_beta.mean)
        prior_exponential_mean = util.to_numpy(prior_exponential.mean)
        prior_gamma_mean = util.to_numpy(prior_gamma.mean)
        prior_log_normal_mean = util.to_numpy(prior_log_normal.mean)
        prior_binomial_mean = util.to_numpy(prior_binomial.mean)
        prior_weibull_mean = util.to_numpy(prior_weibull.mean)
        util.eval_print('num_samples', 'prior_normal_mean',
                        'prior_normal_mean_correct', 'prior_uniform_mean',
                        'prior_uniform_mean_correct', 'prior_categorical_mean',
                        'prior_categorical_mean_correct', 'prior_poisson_mean',
                        'prior_poisson_mean_correct', 'prior_bernoulli_mean',
                        'prior_bernoulli_mean_correct', 'prior_beta_mean',
                        'prior_beta_mean_correct', 'prior_exponential_mean',
                        'prior_exponential_mean_correct', 'prior_gamma_mean',
                        'prior_gamma_mean_correct', 'prior_log_normal_mean',
                        'prior_log_normal_mean_correct', 'prior_binomial_mean',
                        'prior_binomial_mean_correct', 'prior_weibull_mean',
                        'prior_weibull_mean_correct')

        self.assertTrue(
            np.allclose(prior_normal_mean, prior_normal_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_uniform_mean,
                        prior_uniform_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_categorical_mean,
                        prior_categorical_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_poisson_mean,
                        prior_poisson_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_bernoulli_mean,
                        prior_bernoulli_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_beta_mean, prior_beta_mean_correct, atol=0.1))
        self.assertTrue(
            np.allclose(prior_exponential_mean,
                        prior_exponential_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_gamma_mean, prior_gamma_mean_correct, atol=0.1))
        self.assertTrue(
            np.allclose(prior_log_normal_mean,
                        prior_log_normal_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_binomial_mean,
                        prior_binomial_mean_correct,
                        atol=0.1))
        self.assertTrue(
            np.allclose(prior_weibull_mean,
                        prior_weibull_mean_correct,
                        atol=0.1))