Exemplo n.º 1
0
    def test_inference_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {
            'obs{}'.format(i): self._observation[i]
            for i in range(len(self._observation))
        }
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_distribution(
            samples,
            inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS,
            observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(
            F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(
            sum([
                pyprob.distributions.Distribution.kl_divergence(
                    Categorical(i + util._epsilon),
                    Categorical(j + util._epsilon))
                for (i, j) in zip(posterior_mean, posterior_mean_correct)
            ]))

        util.eval_print('samples', 'burn_in', 'posterior_mean',
                        'posterior_mean_correct', 'l2_distance',
                        'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)
Exemplo n.º 2
0
    def test_inference_hmm_posterior_importance_sampling_with_inference_network(
            self):
        samples = importance_sampling_with_inference_network_samples
        observation = {
            'obs{}'.format(i): self._observation[i]
            for i in range(len(self._observation))
        }
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.03

        self._model.reset_inference_network()
        self._model.learn_inference_network(
            num_traces=
            importance_sampling_with_inference_network_training_traces,
            observe_embeddings={
                'obs{}'.format(i): {
                    'depth': 2,
                    'dim': 16
                }
                for i in range(len(observation))
            })

        start = time.time()
        posterior = self._model.posterior_return(
            samples,
            inference_engine=InferenceEngine.
            IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK,
            observe=observation)
        add_importance_sampling_with_inference_network_duration(time.time() -
                                                                start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(
            posterior.effective_sample_size)

        l2_distance = float(
            F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(
            sum([
                pyprob.distributions.Distribution.kl_divergence(
                    Categorical(i + util._epsilon),
                    Categorical(j + util._epsilon))
                for (i, j) in zip(posterior_mean, posterior_mean_correct)
            ]))

        util.eval_print('samples', 'posterior_mean_unweighted',
                        'posterior_mean', 'posterior_mean_correct',
                        'posterior_effective_sample_size',
                        'posterior_effective_sample_size_min', 'l2_distance',
                        'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(
            kl_divergence)

        self.assertGreater(posterior_effective_sample_size,
                           posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)
Exemplo n.º 3
0
    def test_dist_categorical(self):
        dist_sample_shape_correct = [1]
        dist_log_probs_correct = [-2.30259]

        dist = Categorical([0.1, 0.2, 0.7])

        dist_sample_shape = list(dist.sample().size())
        dist_log_probs = util.to_numpy(dist.log_prob(0))

        util.debug('dist_sample_shape', 'dist_sample_shape_correct', 'dist_log_probs', 'dist_log_probs_correct')

        self.assertEqual(dist_sample_shape, dist_sample_shape_correct)
        self.assertTrue(np.allclose(dist_log_probs, dist_log_probs_correct, atol=0.1))
Exemplo n.º 4
0
    def test_dist_categorical_batched(self):
        dist_sample_shape_correct = [2]
        dist_log_probs_correct = [[-2.30259], [-0.693147]]

        dist = Categorical([[0.1, 0.2, 0.7],
                            [0.2, 0.5, 0.3]])

        dist_sample_shape = list(dist.sample().size())
        dist_log_probs = util.to_numpy(dist.log_prob([[0, 1]]))

        util.debug('dist_sample_shape', 'dist_sample_shape_correct', 'dist_log_probs', 'dist_log_probs_correct')

        self.assertEqual(dist_sample_shape, dist_sample_shape_correct)
        self.assertTrue(np.allclose(dist_log_probs, dist_log_probs_correct, atol=0.1))
Exemplo n.º 5
0
    def __init__(self, *args, **kwargs):
        class MiniCaptcha(Model):
            def __init__(self, alphabet=['A', 'B', 'C', 'D', 'E', 'F'], noise=0.1):
                self._alphabet = alphabet
                self._probs = [1/len(alphabet) for i in range(len(alphabet))]
                self._noise = noise
                super().__init__('MiniCaptcha')

            def render(self, text, size=18, height=28, width=28, x=6, y=6):
                pil_font = ImageFont.truetype('Ubuntu-B.ttf', size=size)
                text_width, text_height = pil_font.getsize(text)
                canvas = Image.new('RGB', [height, width], (255, 255, 255))
                draw = ImageDraw.Draw(canvas)
                draw.text((x, y), text, font=pil_font, fill='#000000')
                return torch.from_numpy(1 - (np.asarray(canvas) / 255.0))[:, :, 0].unsqueeze(0).float()

            def forward(self):
                letter_id = pyprob.sample(Categorical(self._probs))
                image = self.render(self._alphabet[letter_id]).view(-1)
                likelihood = Normal(image, self._noise)
                pyprob.observe(likelihood, name='query_image')
                return letter_id

        self._model = MiniCaptcha()
        self._test_images = [self._model.render(letter).view(-1) for letter in self._model._alphabet]
        self._true_posteriors = [Categorical(util.one_hot(len(self._model._alphabet), i) + util._epsilon) for i in range(len(self._model._alphabet))]
        super().__init__(*args, **kwargs)
Exemplo n.º 6
0
    def __init__(self, *args, **kwargs):
        # http://www.robots.ox.ac.uk/~fwood/assets/pdf/Wood-AISTATS-2014.pdf
        class HiddenMarkovModel(Model):
            def __init__(self, init_dist, trans_dists, obs_dists, obs_length):
                self.init_dist = init_dist
                self.trans_dists = trans_dists
                self.obs_dists = obs_dists
                self.obs_length = obs_length
                super().__init__('Hidden Markov model')

            def forward(self):
                states = [pyprob.sample(init_dist)]
                for i in range(self.obs_length):
                    state = pyprob.sample(self.trans_dists[int(states[-1])])
                    pyprob.observe(self.obs_dists[int(state)], name='obs{}'.format(i))
                    states.append(state)
                return torch.stack([util.one_hot(3, int(s)) for s in states])

        init_dist = Categorical([1, 1, 1])
        trans_dists = [Categorical([0.1, 0.5, 0.4]),
                       Categorical([0.2, 0.2, 0.6]),
                       Categorical([0.15, 0.15, 0.7])]
        obs_dists = [Normal(-1, 1),
                     Normal(1, 1),
                     Normal(0, 1)]

        self._observation = [0.9, 0.8, 0.7, 0.0, -0.025, -5.0, -2.0, -0.1, 0.0, 0.13, 0.45, 6, 0.2, 0.3, -1, -1]
        self._model = HiddenMarkovModel(init_dist, trans_dists, obs_dists, len(self._observation))
        self._posterior_mean_correct = util.to_tensor([[0.3775, 0.3092, 0.3133],
                                                       [0.0416, 0.4045, 0.5539],
                                                       [0.0541, 0.2552, 0.6907],
                                                       [0.0455, 0.2301, 0.7244],
                                                       [0.1062, 0.1217, 0.7721],
                                                       [0.0714, 0.1732, 0.7554],
                                                       [0.9300, 0.0001, 0.0699],
                                                       [0.4577, 0.0452, 0.4971],
                                                       [0.0926, 0.2169, 0.6905],
                                                       [0.1014, 0.1359, 0.7626],
                                                       [0.0985, 0.1575, 0.7440],
                                                       [0.1781, 0.2198, 0.6022],
                                                       [0.0000, 0.9848, 0.0152],
                                                       [0.1130, 0.1674, 0.7195],
                                                       [0.0557, 0.1848, 0.7595],
                                                       [0.2017, 0.0472, 0.7511],
                                                       [0.2545, 0.0611, 0.6844]])
        super().__init__(*args, **kwargs)
Exemplo n.º 7
0
    def test_model_remote_hmm_posterior_importance_sampling(self):
        observation = self._observation
        posterior_mean_correct = self._posterior_mean_correct

        posterior = self._model.posterior_distribution(samples,
                                                       observation=observation)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean

        l2_distance = float(
            F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(
            sum([
                util.kl_divergence_categorical(Categorical(i), Categorical(j))
                for (i, j) in zip(posterior_mean, posterior_mean_correct)
            ]))

        util.debug('samples', 'posterior_mean_unweighted', 'posterior_mean',
                   'posterior_mean_correct', 'l2_distance', 'kl_divergence')

        self.assertLess(l2_distance, 10)
Exemplo n.º 8
0
    def test_inference_hmm_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        observation = {
            'obs{}'.format(i): self._observation[i]
            for i in range(len(self._observation))
        }
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.0015

        start = time.time()
        posterior = self._model.posterior_distribution(samples,
                                                       observe=observation)
        add_importance_sampling_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(
            posterior.effective_sample_size)

        print(posterior[0])
        l2_distance = float(
            F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(
            sum([
                pyprob.distributions.Distribution.kl_divergence(
                    Categorical(i + util._epsilon),
                    Categorical(j + util._epsilon))
                for (i, j) in zip(posterior_mean, posterior_mean_correct)
            ]))

        util.eval_print('samples', 'posterior_mean_unweighted',
                        'posterior_mean', 'posterior_mean_correct',
                        'posterior_effective_sample_size',
                        'posterior_effective_sample_size_min', 'l2_distance',
                        'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size,
                           posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1)
Exemplo n.º 9
0
    def test_model_remote_hmm_posterior_random_walk_metropolis_hastings(self):
        observation = self._observation
        posterior_mean_correct = self._posterior_mean_correct

        posterior = self._model.posterior_distribution(
            samples,
            inference_engine=pyprob.InferenceEngine.
            RANDOM_WALK_METROPOLIS_HASTINGS,
            observation=observation)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean

        l2_distance = float(
            F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(
            sum([
                util.kl_divergence_categorical(Categorical(i), Categorical(j))
                for (i, j) in zip(posterior_mean, posterior_mean_correct)
            ]))

        util.debug('samples', 'posterior_mean_unweighted', 'posterior_mean',
                   'posterior_mean_correct', 'l2_distance', 'kl_divergence')

        self.assertLess(l2_distance, 10)
Exemplo n.º 10
0
 def forward(self, observation=None):
     categorical_value = pyprob.sample(Categorical([0.1, 0.1, 0.8]))
     normal_value = pyprob.sample(Normal(5., 2.))
     return float(categorical_value), normal_value
Exemplo n.º 11
0
def get_sample(s):
    address = s.Address().decode("utf-8")
    distribution = None
    # sample.instance = s.Instance()
    value = NDArray_to_Tensor(s.Value())
    distribution_type = s.DistributionType()
    if distribution_type != infcomp.protocol.Distribution.Distribution().NONE:
        if distribution_type == infcomp.protocol.Distribution.Distribution(
        ).UniformDiscrete:
            p = infcomp.protocol.UniformDiscrete.UniformDiscrete()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = UniformDiscrete(p.PriorMin(), p.PriorSize())
            if value.dim() > 0:
                value = util.one_hot(distribution.prior_size,
                                     int(value[0]) - distribution.prior_min)
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).MultivariateNormal:
            p = infcomp.protocol.MultivariateNormal.MultivariateNormal()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = MultivariateNormal(NDArray_to_Tensor(p.PriorMean()),
                                              NDArray_to_Tensor(p.PriorCov()))
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Normal:
            p = infcomp.protocol.Normal.Normal()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = Normal(p.PriorMean(), p.PriorStd())
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Flip:
            distribution = Flip()
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Discrete:
            p = infcomp.protocol.Discrete.Discrete()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = Discrete(p.PriorSize())
            if value.dim() > 0:
                value = util.one_hot(distribution.prior_size, int(value[0]))
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Categorical:
            p = infcomp.protocol.Categorical.Categorical()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = Categorical(p.PriorSize())
            if value.dim() > 0:
                value = util.one_hot(distribution.prior_size, int(value[0]))
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).UniformContinuous:
            p = infcomp.protocol.UniformContinuous.UniformContinuous()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = UniformContinuous(p.PriorMin(), p.PriorMax())
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).UniformContinuousAlt:
            p = infcomp.protocol.UniformContinuousAlt.UniformContinuousAlt()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = UniformContinuousAlt(p.PriorMin(), p.PriorMax())
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Laplace:
            p = infcomp.protocol.Laplace.Laplace()
            p.Init(s.Distribution().Bytes, s.Distribution().Pos)
            distribution = Laplace(p.PriorLocation(), p.PriorScale())
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Gamma:
            distribution = Gamma()
        elif distribution_type == infcomp.protocol.Distribution.Distribution(
        ).Beta:
            distribution = Beta()
        else:
            util.logger.log(
                'get_sample: Unknown distribution:Distribution id: {0}.'.
                format(distribution_type))
    sample = Sample(address, distribution, value)
    return sample
Exemplo n.º 12
0
 def forward(self, observation=None):
     categorical_value = pyprob.sample(Categorical([0.1, 0.1, 0.8]))
     normal_value = pyprob.sample(Normal(5, 2))
     return categorical_value, normal_value
Exemplo n.º 13
0
 def forward(self):
     letter_id = pyprob.sample(Categorical(self._probs))
     image = self.render(self._alphabet[letter_id]).view(-1)
     likelihood = Normal(image, self._noise)
     pyprob.observe(likelihood, name='query_image')
     return letter_id