def test_inference_branching_random_walk_metropolis_hastings(self):
        samples = importance_sampling_samples
        posterior_correct = util.empirical_to_categorical(
            self.true_posterior(), max_val=40)

        start = time.time()
        posterior = util.empirical_to_categorical(
            self._model.posterior_distribution(
                samples,
                inference_engine=InferenceEngine.
                RANDOM_WALK_METROPOLIS_HASTINGS,
                observe={'obs': 6}),
            max_val=40)
        add_random_walk_metropolis_hastings_duration(time.time() - start)

        posterior_probs = util.to_numpy(posterior._probs)
        posterior_probs_correct = util.to_numpy(posterior_correct._probs)
        kl_divergence = float(
            pyprob.distributions.Distribution.kl_divergence(
                posterior, posterior_correct))

        util.eval_print('samples', 'posterior_probs',
                        'posterior_probs_correct', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(kl_divergence, 0.75)
Exemple #2
0
    def test_inference_remote_branching_importance_sampling(self):
        samples = importance_sampling_samples
        posterior_correct = util.empirical_to_categorical(self.true_posterior(), max_val=40)

        start = time.time()
        posterior = util.empirical_to_categorical(self._model.posterior_results(samples, observe={'obs': 6}), max_val=40)
        add_importance_sampling_duration(time.time() - start)

        posterior_probs = util.to_numpy(posterior._probs)
        posterior_probs_correct = util.to_numpy(posterior_correct._probs)
        kl_divergence = float(pyprob.distributions.Distribution.kl_divergence(posterior, posterior_correct))

        util.eval_print('samples', 'posterior_probs', 'posterior_probs_correct', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertLess(kl_divergence, 0.75)
Exemple #3
0
    def test_model_remote_branching_importance_sampling(self):
        observation = 6
        posterior_correct = util.empirical_to_categorical(
            self.true_posterior(observation), max_val=40)

        posterior = util.empirical_to_categorical(
            self._model.posterior_distribution(samples,
                                               observation=observation),
            max_val=40)
        posterior_probs = util.to_numpy(posterior._probs[0])
        posterior_probs_correct = util.to_numpy(posterior_correct._probs[0])

        kl_divergence = float(
            util.kl_divergence_categorical(posterior_correct, posterior))

        util.debug('samples', 'posterior_probs', 'posterior_probs_correct',
                   'kl_divergence')

        self.assertLess(kl_divergence, 0.25)
Exemple #4
0
    def test_model_remote_branching_random_walk_metropolis_hastings(self):
        observation = 6
        posterior_correct = util.empirical_to_categorical(
            self.true_posterior(observation), max_val=40)

        posterior = util.empirical_to_categorical(
            self._model.posterior_distribution(
                samples,
                observation=observation,
                inference_engine=pyprob.InferenceEngine.
                RANDOM_WALK_METROPOLIS_HASTINGS),
            max_val=40)
        posterior_probs = util.to_numpy(posterior._probs[0])
        posterior_probs_correct = util.to_numpy(posterior_correct._probs[0])

        kl_divergence = float(
            util.kl_divergence_categorical(posterior_correct, posterior))

        util.debug('samples', 'posterior_probs', 'posterior_probs_correct',
                   'kl_divergence')

        self.assertLess(kl_divergence, 0.25)
    def test_inference_mini_captcha_posterior_importance_sampling(self):
        samples = int(importance_sampling_samples / len(self._model._alphabet))
        test_letters = self._model._alphabet

        start = time.time()
        posteriors = []
        map_estimates = []
        for i in range(len(self._model._alphabet)):
            posterior = self._model.posterior_distribution(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe={'query_image': self._test_images[i]})
            posteriors.append(posterior)
            map_estimates.append(self._model._alphabet[int(posterior.mode)])
        add_importance_sampling_duration(time.time() - start)

        accuracy = sum([1 if map_estimates[i] == test_letters[i] else 0 for i in range(len(test_letters))])/len(test_letters)
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(util.empirical_to_categorical(p, max_val=len(self._model._alphabet)-1), tp) for (p, tp) in zip(posteriors, self._true_posteriors)]))

        util.eval_print('samples', 'test_letters', 'map_estimates', 'accuracy', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertGreater(accuracy, 0.9)
        self.assertLess(kl_divergence, 0.25)
    def test_inference_mini_captcha_posterior_random_walk_metropolis_hastings(self):
        samples = int(random_walk_metropolis_hastings_samples / len(self._model._alphabet))
        burn_in = int(random_walk_metropolis_hastings_burn_in / len(self._model._alphabet))
        test_letters = self._model._alphabet

        start = time.time()
        posteriors = []
        map_estimates = []
        for i in range(len(self._model._alphabet)):
            posterior = self._model.posterior_distribution(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe={'query_image': self._test_images[i]})[burn_in:]
            posteriors.append(posterior)
            map_estimates.append(self._model._alphabet[int(posterior.combine_duplicates().mode)])
        add_random_walk_metropolis_hastings_duration(time.time() - start)

        accuracy = sum([1 if map_estimates[i] == test_letters[i] else 0 for i in range(len(test_letters))])/len(test_letters)
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(util.empirical_to_categorical(p, max_val=len(self._model._alphabet)-1), tp) for (p, tp) in zip(posteriors, self._true_posteriors)]))

        util.eval_print('samples', 'test_letters', 'map_estimates', 'accuracy', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertGreater(accuracy, 0.9)
        self.assertLess(kl_divergence, 0.25)
    def test_inference_mini_captcha_posterior_importance_sampling_with_inference_network(self):
        samples = int(importance_sampling_with_inference_network_samples / len(self._model._alphabet))
        test_letters = self._model._alphabet

        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_training_traces, observe_embeddings={'query_image': {'dim': 32, 'reshape': [1, 28, 28], 'embedding': ObserveEmbedding.CNN2D5C}})

        start = time.time()
        posteriors = []
        map_estimates = []
        for i in range(len(self._model._alphabet)):
            posterior = self._model.posterior_distribution(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe={'query_image': self._test_images[i]})
            posteriors.append(posterior)
            map_estimates.append(self._model._alphabet[int(posterior.mode)])
        add_importance_sampling_with_inference_network_duration(time.time() - start)

        accuracy = sum([1 if map_estimates[i] == test_letters[i] else 0 for i in range(len(test_letters))])/len(test_letters)
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(util.empirical_to_categorical(p, max_val=len(self._model._alphabet)-1), tp) for (p, tp) in zip(posteriors, self._true_posteriors)]))

        util.eval_print('samples', 'test_letters', 'map_estimates', 'accuracy', 'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)

        self.assertGreater(accuracy, 0.9)
        self.assertLess(kl_divergence, 0.25)