示例#1
0
    def test_exhaustive_enumeration(self):
        a = DiscreteFactor([(0, 2), (1, 3)], data=np.array([[1, 2, 3], [4, 5, 6]]))
        b = DiscreteFactor([(0, 2), (2, 2)], data=np.array([[1, 2], [2, 1]]))
        # 0 1 2 |
        #-------+--------
        # 0 0 0 | 1x1=1
        # 0 0 1 | 1x2=2
        # 0 1 0 | 2x1=2
        # 0 1 1 | 2x2=4
        # 0 2 0 | 3x1=3
        # 0 2 1 | 3x2=6
        # 1 0 0 | 4x2=8
        # 1 0 1 | 4x1=4
        # 1 1 0 | 5x2=10
        # 1 1 1 | 5x1=5
        # 1 2 0 | 6x2=12
        # 1 2 1 | 6x1=6

        model = Model([a, b])
        exact_inference = ExhaustiveEnumeration(model)
        c = exact_inference.calibrate().belief

        d = DiscreteFactor([(0, 2), (1, 3), (2, 2)])
        d._data = np.array([1, 2, 2, 4, 3, 6, 8, 4, 10, 5, 12, 6]).reshape(2, 3, 2)

        self.assertEqual(d.variables, c.variables)
        self.assertEqual(d.axis_to_variable, c.axis_to_variable)
        assert_array_almost_equal(d._data, c.data)
示例#2
0
    def test_marginalize_larger(self):
        data = np.array([[[1, 2],
                          [5, 8],
                          [9, 10]],
                        [[11, 12],
                         [15, 18],
                         [19, 21]]])
        a = DiscreteFactor([(0, 2), (4, 3), (20, 2)], data=data)

        data = np.array([35, 96])
        c = DiscreteFactor([(0, 2)], data=data)

        b = a.marginalize([0])
        print b.data
        print c.data
        print b.data.shape
        print c.data.shape
        self.assertEqual(b.variables, c.variables)
        self.assertEqual(b.axis_to_variable, c.axis_to_variable)
        assert_array_almost_equal(b.data, c.data)

        data = np.array([[12, 14],
                         [20, 26],
                         [28, 31]])
        e = DiscreteFactor([(4, 3), (20, 2)], data=data)

        d = a.marginalize([4, 20])
        print d.data
        print e.data
        print d.data.shape
        print e.data.shape
        self.assertEqual(d.variables, e.variables)
        self.assertEqual(d.axis_to_variable, e.axis_to_variable)
        assert_array_almost_equal(d.data, e.data)
示例#3
0
    def test_marginalize_small(self):
        data = np.array([[1, 2],
                         [5, 8]])
        a = DiscreteFactor([(0, 2), (1, 2)], data=data)

        data = np.array([3, 13])
        c = DiscreteFactor([(0, 2)], data=data)

        b = a.marginalize([0])
        print b.data
        print c.data
        print b.data.shape
        print c.data.shape
        self.assertEqual(b.variables, c.variables)
        self.assertEqual(b.axis_to_variable, c.axis_to_variable)
        assert_array_almost_equal(b.data, c.data)

        data = np.array([6, 10])
        e = DiscreteFactor([(1, 2)], data)

        d = a.marginalize([1])
        print d.data
        print e.data
        print d.data.shape
        print e.data.shape

        self.assertEqual(d.variables, e.variables)
        self.assertEqual(d.axis_to_variable, e.axis_to_variable)
        assert_array_almost_equal(d.data, e.data)
示例#4
0
    def test_marginalize_small_edge(self):
        data = np.array([[0, 2],
                         [5, 8]])
        a = DiscreteFactor([(0, 2), (1, 2)], data=data)

        print a.data
        b = a.marginalize([0, 1])
        print
        print b.data
        print b.data.shape
        self.assertEqual(b.variables, a.variables)
        self.assertEqual(b.axis_to_variable, a.axis_to_variable)
        assert_array_almost_equal(b.data, a.data)

        c = a.marginalize([1, 0])
        print c.data
        print c.data.shape
        self.assertEqual(c.variables, a.variables)
        self.assertEqual(c.axis_to_variable, a.axis_to_variable)
        assert_array_almost_equal(c.data, a.data)
def construct_graph(patch_images, context_image, pw, stride):
    """
    Drawn heavily from this example notebook http://nbviewer.jupyter.org/github/dirko/pyugm/blob/master/examples/Loopy%20belief%20propagation%20example.ipynb
    on how to use the pyugm package

    This will create a graph with observation factors (create_ovservation_comps)
    and label factors (create_neighbor_matrix) s.t. we reward using patches
    from one of the K style imgs that is close to our source image while also 
    rewarding smoothness between the chosen patches (this is done through the neighbor label factors)

    inputs
        patch_image_directory- directory where stylized imgs are located. 
        context_image- the context image as a np array
        pw- patch width
        stride- step size between each patch
    outputs
        evidence- context values associated with each observed variable
        factors- the lines on our graph; they relate the style patch to the context patch
            and relate style patches to other style patches (for neighbors)
    """
    evidence = {}
    factors = []

    K = len(patch_images)
    source_rows = context_image.shape[0]
    source_cols = context_image.shape[1]

    # Add observation factors
    for r in range(0, source_rows - pw + 1, stride):
        for c in range(0, source_cols - pw + 1, stride):
            # do this for each patch in the source image
            # call create_observation_comps to make the parameters
            label_variable_name = 'label_{}_{}'.format(r, c)
            observation_variable_name = 'obs_{}_{}'.format(r, c)

            print("\nadded label %s and observation %s" %
                  (label_variable_name, observation_variable_name))
            print("these two are linked by a factor")

            observation_params = create_oberservation_comps(
                patch_images, context_image, (r, c), pw)
            factors.append(
                DiscreteFactor([(label_variable_name, K),
                                (observation_variable_name, 256)],
                               parameters=observation_params))
            evidence[observation_variable_name] = context_image[r:r + pw,
                                                                c:c + pw, :]

    # Add label factors
    # for each patch location in a texture image
    # create a node for it and get the 4 neighbor matrices from the helper function
    # then create the neighbors-factors if appropriate
    # as said below, maybe just start with doing down and right
    for r in range(0, source_rows - pw + 1, stride):
        for c in range(0, source_cols - pw + 1, stride):
            variable_name = 'label_{}_{}'.format(r, c)
            neighbor_params, neighbor_locs = create_neighbor_matrix(
                patch_images, (r, c), pw, stride)

            print("\nLooking at label %s" % variable_name)

            # for each valid neighbor, create a factor
            for np, nl in zip(neighbor_params, neighbor_locs):
                if np is None or nl is None:
                    continue
                r, c = nl
                neighbor_name = 'label_{}_{}'.format(r, c)

                print("adding neighbor ", neighbor_name)

                factors.append(
                    DiscreteFactor([(variable_name, K), (neighbor_name, K)],
                                   parameters=np))
    return evidence, factors
示例#6
0
 def test_get_potential_slice(self):
     a = DiscreteFactor([(4, 2), (8, 3)], data=np.array(range(6)).reshape(2, 3))
     b = a.get_potential([(8, 0), (9, 1), (2, 4)])
     self.assertIsNone(assert_array_almost_equal(b, np.array([0, 3])))
示例#7
0
 def test_get_potential_single(self):
     a = DiscreteFactor([(4, 2), (8, 3)], data=np.array(range(6)).reshape(2, 3))
     b = a.get_potential([(8, 0), (4, 1), (2, 4)])
     print b
     self.assertAlmostEqual(b, 3)