Example #1
0
    def test_belief_update_larger_tree(self):
        a = DiscreteFactor([0, 1], data=np.array([[1, 2], [2, 2]], dtype=np.float64))
        b = DiscreteFactor([1, 2], data=np.array([[3, 2], [1, 2]], dtype=np.float64))
        c = DiscreteFactor([2, 3], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
        d = DiscreteFactor([3], data=np.array([2, 1], dtype=np.float64))
        e = DiscreteFactor([0], data=np.array([4, 1], dtype=np.float64))
        f = DiscreteFactor([2], data=np.array([1, 2], dtype=np.float64))
        #
        # a{0 1} - b{1 2} - c{2 3} - d{3}
        #    |       |
        # e{0}     f{2}
        #
        model = Model([a, b, c, d, e, f])
        print 'edges', model.edges
        update_order = DistributeCollectProtocol(model)
        inference = LoopyBeliefUpdateInference(model, update_order=update_order)

        exact_inference = ExhaustiveEnumeration(model)
        exhaustive_answer = exact_inference.calibrate().belief

        print 'bp'
        change = inference.calibrate()
        print change

        for factor in model.factors:
            print factor

        for variable in model.variables:
            marginal_beliefs = inference.get_marginals(variable)
            true_marginal = exhaustive_answer.marginalize([variable])
            for marginal in marginal_beliefs:
                assert_array_almost_equal(true_marginal.normalized_data, marginal.normalized_data)

        expected_ln_Z = np.log(exhaustive_answer.data.sum())
        self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation())
Example #2
0
    def test_update_beliefs_disconnected(self):
        a = DiscreteFactor([(1, 2), (2, 2)], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
        b = DiscreteFactor([(2, 2), (3, 2)], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
        c = DiscreteFactor([(4, 2), (5, 2)], data=np.array([[5, 6], [8, 9]], dtype=np.float64))
        d = DiscreteFactor([(5, 2), (6, 2)], data=np.array([[1, 6], [2, 3]], dtype=np.float64))
        e = DiscreteFactor([(7, 2), (8, 2)], data=np.array([[2, 1], [2, 3]], dtype=np.float64))

        model = Model([a, b, c, d, e])
        for factor in model.factors:
            print 'before', factor, np.sum(factor.data)

        update_order = DistributeCollectProtocol(model)
        inference = LoopyBeliefUpdateInference(model, update_order=update_order)

        exact_inference = ExhaustiveEnumeration(model)
        exhaustive_answer = exact_inference.calibrate().belief
        print 'Exhaust', np.sum(exhaustive_answer.data)

        change = inference.calibrate()
        print change

        for factor in model.factors:
            print factor, np.sum(factor.data)

        for variable in model.variables:
            marginal_beliefs = inference.get_marginals(variable)
            true_marginal = exhaustive_answer.marginalize([variable])
            for marginal in marginal_beliefs:
                assert_array_almost_equal(true_marginal.normalized_data, marginal.normalized_data)

        expected_ln_Z = np.log(exhaustive_answer.data.sum())
        self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation())
Example #3
0
File: learn.py Project: dirko/pyugm
    def log_likelihood_and_gradient(self, evidence):
        """
        Run inference on the model to find the log-likelihood of the model given evidence and its gradient with respect
            to the model parameters.
        :param evidence: A dictionary where the key is a variable name and the value its observed value.
        :returns: The log-likelihood and a vector of derivatives.
        """
        self._update_order.reset()
        inference = LoopyBeliefUpdateInference(self._model, update_order=self._update_order)
        inference.calibrate(parameters=self.parameters)
        log_z_total = inference.partition_approximation()
        model_expected_counts = self._accumulate_expected_counts(inference)

        self._update_order.reset()
        inference = LoopyBeliefUpdateInference(self._model, update_order=self._update_order)
        inference.calibrate(evidence, parameters=self.parameters)
        log_z_observed = inference.partition_approximation()
        empirical_expected_counts = self._accumulate_expected_counts(inference)

        log_likelihood = log_z_observed - log_z_total
        derivative = empirical_expected_counts - model_expected_counts

        if self._dimension > 0:
            derivative += -numpy.dot(self._prior_precision, (self._parameters - self._prior_location))
            log_likelihood += -0.5 * numpy.dot(numpy.dot((self._parameters - self._prior_location).T,
                                                         self._prior_precision),
                                               (self._parameters - self._prior_location))
            log_likelihood += self._prior_normaliser
        return log_likelihood, derivative
Example #4
0
    def test_update_beliefs_small(self):
        a = DiscreteFactor([0, 1])
        b = DiscreteFactor([1, 2])
        model = Model([a, b])
        update_order1 = FloodingProtocol(model=model, max_iterations=2)
        inference = LoopyBeliefUpdateInference(model, update_order1)
        #                       0
        #                     0  1
        # Phi* = Sum_{0} 1 0 [ 1 1 ]  =  1 0 [ 2 ]
        #                  1 [ 1 1 ]       1 [ 2 ]
        #
        #                                        1               1
        # Psi* = Phi* x Psi  =  1 0 [2] x 2 0 [ 1 1 ]  =  2 0 [ 2 2 ]
        #        Phi              1 [2]     1 [ 1 1 ]       1 [ 2 2 ]
        #
        #                        1           1
        # Phi** = Sum_{2} 2 0 [ 2 2 ]  =  [ 4 4 ]
        #                   1 [ 2 2 ]
        #
        #                            1              0               0
        # Psi** = Phi** x Psi  =  [ 2 2 ] x  1 0 [ 1 1 ]  =  1 0 [ 2 2 ]
        #         Phi*                         1 [ 1 1 ]       1 [ 2 2 ]
        #
        #             1
        # Phi*** = [ 4 4 ]
        #                                 1
        # Psi*** = Phi*** x Psi* = 2 0 [ 2 2 ]
        #          Phi**             1 [ 2 2 ]
        #
        inference.calibrate()
        #update_order2 = FloodingProtocol(model=model, max_iterations=3)
        #change1, iterations1 = inference.calibrate(update_order2)
        #print 'changes:', change0, change1, 'iterations:', iterations0, iterations1

        final_a_data = np.array([[2, 2],
                                 [2, 2]], dtype='f64') / 8.0
        final_b_data = np.array([[2, 2],
                                 [2, 2]], dtype='f64') / 8.0

        belief_a = inference.beliefs[a]
        assert_array_almost_equal(final_a_data, belief_a.normalized_data)
        belief_b = inference.beliefs[b]
        assert_array_almost_equal(final_b_data, belief_b.normalized_data)
Example #5
0
    def test_belief_update_long_tree(self):
        label_template = np.array([['same', 'different'],
                                   ['different', 'same']])
        observation_template = np.array([['obs_low'] * 32,
                                         ['obs_high'] * 32])
        observation_template[0, 13:17] = 'obs_high'
        observation_template[1, 13:17] = 'obs_low'
        N = 2
        pairs = [DiscreteFactor([(i, 2), (i + 1, 2)], parameters=label_template) for i in xrange(N - 1)]
        obs = [DiscreteFactor([(i, 2), (i + N, 32)], parameters=observation_template) for i in xrange(N)]
        repe = [16., 16., 14., 13., 15., 16., 14., 13., 15., 16., 15.,
                13., 14., 16., 16., 15., 13., 13., 14., 14., 13., 14.,
                14., 14., 14., 14., 14., 14., 14., 14., 14., 14., 14.,
                14., 14., 14., 14., 14., 14., 14., 14., 9., 4., 4.,
                4., 4., 5., 3., 2., 3., 2., 3., 3., 3., 3.,
                3., 3., 3., 3., 4., 4., 5., 5., 5.]
        evidence = dict((i + N, 0 if repe[i % len(repe)] >= 13 and repe[i % len(repe)] < 17 else 1) for i in xrange(N))

        model = Model(pairs + obs)
        parameters = {'same': 2.0, 'different': -1.0, 'obs_high': 0.0, 'obs_low': -0.0}

        update_order = FloodingProtocol(model, max_iterations=4)
        inference = LoopyBeliefUpdateInference(model, update_order=update_order)
        inference.calibrate(evidence, parameters)

        exact_inference = ExhaustiveEnumeration(model)
        exhaustive_answer = exact_inference.calibrate(evidence, parameters).belief

        for i in xrange(N):
            expected_marginal = exhaustive_answer.marginalize([i])
            for actual_marginal in inference.get_marginals(i):
                print i
                print expected_marginal.normalized_data
                print actual_marginal.normalized_data
                assert_array_almost_equal(expected_marginal.normalized_data, actual_marginal.normalized_data)

        expected_ln_Z = np.log(exhaustive_answer.data.sum())
        self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation())
Example #6
0
    def test_loopy_distribute_collect(self):
        a = DiscreteFactor([0, 1], data=np.array([[1, 2], [2, 2]], dtype=np.float64))
        b = DiscreteFactor([1, 2], data=np.array([[3, 2], [1, 2]], dtype=np.float64))
        c = DiscreteFactor([2, 0], data=np.array([[1, 2], [3, 4]], dtype=np.float64))
        #
        # a{0 1} - b{1 2}
        #    \       /
        #      c{2 0}
        #
        # a{0 1} - {0} - c{2 0}
        #
        #
        #
        #
        model = Model([a, b, c])
        update_order = LoopyDistributeCollectProtocol(model, max_iterations=40)
        inference = LoopyBeliefUpdateInference(model, update_order=update_order)
        inference.calibrate()

        exact_inference = ExhaustiveEnumeration(model)
        exhaustive_answer = exact_inference.calibrate().belief

        for factor in model.factors:
            print factor, np.sum(factor.data)
        for var in model.variables_to_factors.keys():
            print var, exhaustive_answer.marginalize([var]).data
        print
        for var in model.variables_to_factors.keys():
            print var, inference.get_marginals(var)[0].data

        for variable in model.variables:
            for factor in inference.get_marginals(variable):
                expected_table = exhaustive_answer.marginalize([variable])
                actual_table = factor.marginalize([variable])
                assert_array_almost_equal(expected_table.normalized_data, actual_table.normalized_data, decimal=2)

        expected_ln_Z = np.log(exhaustive_answer.data.sum())
        self.assertAlmostEqual(expected_ln_Z, inference.partition_approximation(), places=1)
def lbp_combine_best_patches(patch_image_directory,
                             context_image,
                             prediction_fn,
                             min_stride=1,
                             max_iters=1,
                             max_stride=3):
    # open the context image
    context = Image.open(context_image)

    # get smallest pw and images as np arrays
    smallest_pw, patch_images = get_stylized_images(patch_image_directory)

    # get context image scaled down so that it's the same size as the stylized imgs
    patchR, patchC, _ = patch_images[0].shape
    context = context.resize((patchC, patchR), Image.ANTIALIAS)
    context = np.array(context)

    lbf_folder = 'lbf_output/'
    if not os.path.exists(lbf_folder):
        os.makedirs(lbf_folder)

    inner_folder = lbf_folder + prediction_fn + '_folder/'
    if not os.path.exists(inner_folder):
        os.makedirs(inner_folder)

    stride = min_stride
    while stride <= max_stride:
        evidence, factors = construct_graph(patch_images, context, smallest_pw,
                                            stride)
        model = Model(factors)

        # Get some feedback on how inference is converging by listening in on some of the label beliefs.
        def reporter(infe, orde):
            print('{:3}'.format(orde.total_iterations))

        order = FloodingProtocol(model, max_iterations=max_iters)
        inference = LoopyBeliefUpdateInference(model, order, callback=reporter)
        inference.calibrate(evidence)

        K = len(patch_images)
        rrange = range(0, patchR - smallest_pw + 1, stride)
        crange = range(0, patchC - smallest_pw + 1, stride)
        num_r = len(rrange)  # number of patches vertically
        num_c = len(crange)  # number of patches horizontally

        ff_labels = [[None for i in range(num_c)] for j in range(num_r)]
        ff_r = 0
        for r in rrange:
            ff_c = 0
            for c in crange:
                variable_name = 'label_{}_{}'.format(r, c)

                # first factor is the context-style factor tha we want
                label_factor = inference.get_marginals(variable_name)[0]

                # save the actual patch location to make it easier to remap them later on
                ff_labels[ff_r][ff_c] = [[r, c], label_factor.normalized_data]
                ff_c += 1
            ff_r += 1

        # save the labels so they can be easily reused
        ff_labels = np.array(ff_labels)
        np.save(
            "%s%s_stride_%d_first_factor_label_data" %
            (inner_folder, prediction_fn, stride), ff_labels)
        stride += 1