Esempio n. 1
0
 def test_compute_valid(self):
     """Calculating validity of Sum"""
     # Without IndicatorLeaf
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
     v34 = spn.RawLeaf(num_vars=2)
     s1 = spn.Sum((v12, [0, 1, 2, 3]))
     s2 = spn.Sum((v12, [0, 1, 2, 4]))
     s3 = spn.Sum((v12, [0, 1, 2, 3]), (v34, 0))
     p1 = spn.Product((v12, [0, 5]), (v34, 0))
     p2 = spn.Product((v12, [1, 6]), (v34, 0))
     p3 = spn.Product((v12, [1, 6]), (v34, 1))
     s4 = spn.Sum(p1, p2)
     s5 = spn.Sum(p1, p3)
     self.assertTrue(v12.is_valid())
     self.assertTrue(v34.is_valid())
     self.assertTrue(s1.is_valid())
     self.assertFalse(s2.is_valid())
     self.assertFalse(s3.is_valid())
     self.assertTrue(s4.is_valid())
     self.assertFalse(s5.is_valid())
     # With IVS
     s6 = spn.Sum(p1, p2)
     s6.generate_latent_indicators()
     self.assertTrue(s6.is_valid())
     s7 = spn.Sum(p1, p2)
     s7.set_latent_indicators(spn.RawLeaf(num_vars=2))
     self.assertFalse(s7.is_valid())
     s8 = spn.Sum(p1, p2)
     s8.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2))
     with self.assertRaises(spn.StructureError):
         s8.is_valid()
     s9 = spn.Sum(p1, p2)
     s9.set_latent_indicators((v12, [0, 3]))
     self.assertTrue(s9.is_valid())
    def test_compute_graph_up_noconst(self):
        """Computing value assuming no constant functions"""
        # Number of times val_fun was called
        # Use list to avoid creating local fun variable during assignment
        counter = [0]

        def val_fun(node, *inputs):
            counter[0] += 1
            if isinstance(node, spn.graph.node.VarNode):
                return 1
            elif isinstance(node, spn.graph.node.ParamNode):
                return 0.1
            else:
                weight_val, iv_val, *values = inputs
                return weight_val + sum(values) + 1

        # Generate graph
        v1 = spn.RawLeaf(num_vars=1)
        v2 = spn.RawLeaf(num_vars=1)
        v3 = spn.RawLeaf(num_vars=1)
        s1 = spn.Sum(v1, v1, v2)  # v1 included twice
        s2 = spn.Sum(v1, v3)
        s3 = spn.Sum(v2, v3, v3)  # v3 included twice
        s4 = spn.Sum(s1, v1)
        s5 = spn.Sum(s2, v3, s3)
        s6 = spn.Sum(s4, s2, s5, s4, s5)  # s4 and s5 included twice
        spn.generate_weights(s6)

        # Calculate value
        val = spn.compute_graph_up(s6, val_fun)

        # Test
        self.assertAlmostEqual(val, 35.2)
        self.assertEqual(counter[0], 15)
 def test_compute_valid(self):
     """Calculating validity of PermuteProducts"""
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=3)
     v345 = spn.IndicatorLeaf(num_vars=3, num_vals=3)
     v678 = spn.RawLeaf(num_vars=3)
     v910 = spn.RawLeaf(num_vars=2)
     p1 = spn.PermuteProducts((v12, [0, 1]), (v12, [4, 5]))
     p2 = spn.PermuteProducts((v12, [3, 5]), (v345, [0, 1, 2]))
     p3 = spn.PermuteProducts((v345, [0, 1, 2]), (v345, [3, 4, 5]), (v345, [6, 7, 8]))
     p4 = spn.PermuteProducts((v345, [6, 8]), (v678, [0, 1]))
     p5 = spn.PermuteProducts((v678, [1]), v910)
     p6 = spn.PermuteProducts(v678, v910)
     p7 = spn.PermuteProducts((v678, [0, 1, 2]))
     p8 = spn.PermuteProducts((v910, [0]), (v910, [1]))
     self.assertTrue(p1.is_valid())
     self.assertTrue(p2.is_valid())
     self.assertTrue(p3.is_valid())
     self.assertTrue(p4.is_valid())
     self.assertTrue(p5.is_valid())
     self.assertTrue(p6.is_valid())
     self.assertTrue(p7.is_valid())
     self.assertTrue(p8.is_valid())
     p9 = spn.PermuteProducts((v12, [0, 1]), (v12, [1, 2]))
     p10 = spn.PermuteProducts((v12, [3, 4, 5]), (v345, [0]), (v345, [0, 1, 2]))
     p11 = spn.PermuteProducts((v345, [3, 5]), (v678, [0]), (v678, [0]))
     p12 = spn.PermuteProducts((v910, [1]), (v910, [1]))
     p13 = spn.PermuteProducts(v910, v910)
     p14 = spn.PermuteProducts((v12, [0]), (v12, [1]))
     self.assertFalse(p9.is_valid())
     self.assertFalse(p10.is_valid())
     self.assertFalse(p11.is_valid())
     self.assertFalse(p12.is_valid())
     self.assertFalse(p13.is_valid())
     self.assertEqual(p14.num_prods, 1)
     self.assertFalse(p14.is_valid())
Esempio n. 4
0
    def test_masked_weights(self):
        v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
        v34 = spn.RawLeaf(num_vars=2)
        v5 = spn.RawLeaf(num_vars=1)
        s = spn.SumsLayer((v12, [0, 5]),
                          v34, (v12, [3]),
                          v5, (v12, [0, 5]),
                          v34, (v12, [3]),
                          v5,
                          num_or_size_sums=[3, 1, 3, 4, 1])
        s.generate_weights(
            initializer=tf.initializers.random_uniform(0.0, 1.0))
        with self.test_session() as sess:
            sess.run(s.weights.node.initialize())
            weights = sess.run(s.weights.node.variable)

        shape = [5, 4]
        self.assertEqual(shape, s.weights.node.variable.shape.as_list())
        [
            self.assertEqual(weights[row, col], 0.0)
            for row, col in [(0, -1), (1, 1), (1,
                                               2), (1,
                                                    3), (2,
                                                         -1), (4,
                                                               1), (4,
                                                                    2), (4, 3)]
        ]
        self.assertAllClose(np.sum(weights, axis=1), np.ones(5))
    def test_traverse_graph_nostop_noparams(self):
        """Traversing the whole graph excluding param nodes"""
        counter = [0]
        nodes = [None] * 10

        def fun(node):
            nodes[counter[0]] = node
            counter[0] += 1

        # Generate graph
        v1 = spn.RawLeaf(num_vars=1)
        v2 = spn.RawLeaf(num_vars=1)
        v3 = spn.RawLeaf(num_vars=1)
        s1 = spn.Sum(v1, v1, v2)  # v1 included twice
        s2 = spn.Sum(v1, v3)
        s3 = spn.Sum(v2, v3, v3)  # v3 included twice
        s4 = spn.Sum(s1, v1)
        s5 = spn.Sum(s2, v3, s3)
        s6 = spn.Sum(s4, s2, s5, s4, s5)  # s4 and s5 included twice
        spn.generate_weights(s6)

        # Traverse
        spn.traverse_graph(s6, fun=fun, skip_params=True)

        # Test
        self.assertEqual(counter[0], 9)
        self.assertIs(nodes[0], s6)
        self.assertIs(nodes[1], s4)
        self.assertIs(nodes[2], s2)
        self.assertIs(nodes[3], s5)
        self.assertIs(nodes[4], s1)
        self.assertIs(nodes[5], v1)
        self.assertIs(nodes[6], v3)
        self.assertIs(nodes[7], s3)
        self.assertIs(nodes[8], v2)
Esempio n. 6
0
    def test_get_out_size(self):
        """Computing the sizes of the outputs of nodes in SPN graph"""
        # Generate graph
        v1 = spn.RawLeaf(num_vars=5)
        v2 = spn.RawLeaf(num_vars=5)
        v3 = spn.RawLeaf(num_vars=5)
        s1 = spn.Sum((v1, [1, 3]), (v1, [1, 4]), v2)  # v1 included twice
        s2 = spn.Sum(v1, (v3, [0, 1, 2, 3, 4]))
        s3 = spn.Sum(v2, v3, v3)  # v3 included twice
        n4 = spn.Concat(s1, v1)
        n5 = spn.Concat((v3, [0, 4]), s3)
        n6 = spn.Concat(n4, s2, n5, (n4, [0]), (n5, [1]))  # n4 and n5 included twice

        # Test
        num = v1.get_out_size()
        self.assertEqual(num, 5)
        num = v2.get_out_size()
        self.assertEqual(num, 5)
        num = v3.get_out_size()
        self.assertEqual(num, 5)
        num = s1.get_out_size()
        self.assertEqual(num, 1)
        num = s2.get_out_size()
        self.assertEqual(num, 1)
        num = s3.get_out_size()
        self.assertEqual(num, 1)
        num = n4.get_out_size()
        self.assertEqual(num, 6)
        num = n5.get_out_size()
        self.assertEqual(num, 3)
        num = n6.get_out_size()
        self.assertEqual(num, 12)
Esempio n. 7
0
 def test_compute_mpe_path(self):
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
     v34 = spn.RawLeaf(num_vars=2)
     v5 = spn.RawLeaf(num_vars=1)
     p = spn.Concat((v12, [0, 5]), v34, (v12, [3]), v5)
     counts = tf.placeholder(tf.float32, shape=(None, 6))
     op = p._compute_log_mpe_path(tf.identity(counts), v12.get_value(),
                                  v34.get_value(), v12.get_value(),
                                  v5.get_value())
     feed = np.r_[:18].reshape(-1, 6)
     with self.test_session() as sess:
         out = sess.run(op, feed_dict={counts: feed})
     np.testing.assert_array_almost_equal(
         out[0],
         np.array([[0., 0., 0., 0., 0., 1., 0., 0.],
                   [6., 0., 0., 0., 0., 7., 0., 0.],
                   [12., 0., 0., 0., 0., 13., 0., 0.]],
                  dtype=np.float32))
     np.testing.assert_array_almost_equal(
         out[1], np.array([[2., 3.], [8., 9.], [14., 15.]],
                          dtype=np.float32))
     np.testing.assert_array_almost_equal(
         out[2],
         np.array([[0., 0., 0., 4., 0., 0., 0., 0.],
                   [0., 0., 0., 10., 0., 0., 0., 0.],
                   [0., 0., 0., 16., 0., 0., 0., 0.]],
                  dtype=np.float32))
     np.testing.assert_array_almost_equal(
         out[3], np.array([[5.], [11.], [17.]], dtype=np.float32))
Esempio n. 8
0
    def test_get_num_nodes(self):
        """Computing the number of nodes in the SPN graph"""
        # Generate graph
        v1 = spn.RawLeaf(num_vars=1)
        v2 = spn.RawLeaf(num_vars=1)
        v3 = spn.RawLeaf(num_vars=1)
        s1 = spn.Sum(v1, v1, v2)  # v1 included twice
        s2 = spn.Sum(v1, v3)
        s3 = spn.Sum(v2, v3, v3)  # v3 included twice
        s4 = spn.Sum(s1, v1)
        s5 = spn.Sum(s2, v3, s3)
        s6 = spn.Sum(s4, s2, s5, s4, s5)  # s4 and s5 included twice
        spn.generate_weights(s6)

        # Test
        num = v1.get_num_nodes(skip_params=True)
        self.assertEqual(num, 1)
        num = v1.get_num_nodes(skip_params=False)
        self.assertEqual(num, 1)

        num = v2.get_num_nodes(skip_params=True)
        self.assertEqual(num, 1)
        num = v2.get_num_nodes(skip_params=False)
        self.assertEqual(num, 1)

        num = v3.get_num_nodes(skip_params=True)
        self.assertEqual(num, 1)
        num = v3.get_num_nodes(skip_params=False)
        self.assertEqual(num, 1)

        num = s1.get_num_nodes(skip_params=True)
        self.assertEqual(num, 3)
        num = s1.get_num_nodes(skip_params=False)
        self.assertEqual(num, 4)

        num = s2.get_num_nodes(skip_params=True)
        self.assertEqual(num, 3)
        num = s2.get_num_nodes(skip_params=False)
        self.assertEqual(num, 4)

        num = s3.get_num_nodes(skip_params=True)
        self.assertEqual(num, 3)
        num = s3.get_num_nodes(skip_params=False)
        self.assertEqual(num, 4)

        num = s4.get_num_nodes(skip_params=True)
        self.assertEqual(num, 4)
        num = s4.get_num_nodes(skip_params=False)
        self.assertEqual(num, 6)

        num = s5.get_num_nodes(skip_params=True)
        self.assertEqual(num, 6)
        num = s5.get_num_nodes(skip_params=False)
        self.assertEqual(num, 9)

        num = s6.get_num_nodes(skip_params=True)
        self.assertEqual(num, 9)
        num = s6.get_num_nodes(skip_params=False)
        self.assertEqual(num, 15)
Esempio n. 9
0
    def test_compute_mpe_path_nolatent_indicators(self):
        spn.conf.argmax_zero = True
        v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
        v34 = spn.RawLeaf(num_vars=2)
        v5 = spn.RawLeaf(num_vars=1)
        s = spn.Sum((v12, [0, 5]), v34, (v12, [3]), v5)
        w = s.generate_weights()
        counts = tf.placeholder(tf.float32, shape=(None, 1))
        op = s._compute_log_mpe_path(tf.identity(counts), w.get_log_value(),
                                     None, v12.get_log_value(),
                                     v34.get_log_value(), v12.get_log_value(),
                                     v5.get_log_value())
        init = w.initialize()
        counts_feed = [[10], [11], [12], [13]]
        v12_feed = [[0, 1], [1, 1], [0, 0], [3, 3]]
        v34_feed = [[0.1, 0.2], [1.2, 0.2], [0.1, 0.2], [0.9, 0.8]]
        v5_feed = [[0.5], [0.5], [1.2], [0.9]]

        with self.test_session() as sess:
            sess.run(init)
            # Skip the IndicatorLeaf op
            out = sess.run(op[:1] + op[2:],
                           feed_dict={
                               counts: counts_feed,
                               v12: v12_feed,
                               v34: v34_feed,
                               v5: v5_feed
                           })
        # Weights
        np.testing.assert_array_almost_equal(
            np.squeeze(out[0]),
            np.array([[10., 0., 0., 0., 0., 0.], [0., 0., 11., 0., 0., 0.],
                      [0., 0., 0., 0., 0., 12.], [0., 0., 0., 0., 13., 0.]],
                     dtype=np.float32))
        np.testing.assert_array_almost_equal(
            out[1],
            np.array([[10., 0., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 0., 0., 0., 0., 0.]],
                     dtype=np.float32))
        np.testing.assert_array_almost_equal(
            out[2],
            np.array([[0., 0.], [11., 0.], [0., 0.], [0., 0.]],
                     dtype=np.float32))
        np.testing.assert_array_almost_equal(
            out[3],
            np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 0., 0., 0., 0., 0.],
                      [0., 0., 0., 13., 0., 0., 0., 0.]],
                     dtype=np.float32))
        np.testing.assert_array_almost_equal(
            out[4], np.array([[0.], [0.], [12.], [0.]], dtype=np.float32))
Esempio n. 10
0
 def test_is_valid_false(self):
     """Checking validity of the SPN"""
     # Create graph
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4, name="V12")
     v34 = spn.RawLeaf(num_vars=2, name="V34")
     s1 = spn.Sum((v12, [0, 1, 2, 3]), name="S1")
     s2 = spn.Sum((v12, [4, 5, 6, 7]), name="S2")
     p1 = spn.Product((v12, [0, 7]), name="P1")
     p2 = spn.Product((v12, [2, 3, 4]), name="P2")
     p3 = spn.Product(v34, name="P3")
     n1 = spn.Concat(s1, s2, p3, name="N1")
     n2 = spn.Concat(p1, p2, name="N2")
     p4 = spn.Product((n1, [0]), (n1, [1]), name="P4")
     p5 = spn.Product((n2, [0]), (n1, [2]), name="P5")
     s3 = spn.Sum(p4, n2, name="S3")
     p6 = spn.Product(s3, (n1, [2]), name="P6")
     s4 = spn.Sum(p5, p6, name="S4")
     # Test
     self.assertTrue(v12.is_valid())
     self.assertTrue(v34.is_valid())
     self.assertTrue(s1.is_valid())
     self.assertTrue(s2.is_valid())
     self.assertTrue(p1.is_valid())
     self.assertTrue(p3.is_valid())
     self.assertTrue(p4.is_valid())
     self.assertTrue(n1.is_valid())
     self.assertFalse(p2.is_valid())
     self.assertFalse(n2.is_valid())
     self.assertFalse(s3.is_valid())
     self.assertFalse(s4.is_valid())
     self.assertFalse(p5.is_valid())
     self.assertFalse(p6.is_valid())
Esempio n. 11
0
    def test_input_flags(self):
        """Detection of different types of inputs"""
        inpt = spn.Input()
        self.assertFalse(inpt)
        self.assertFalse(inpt.is_op)
        self.assertFalse(inpt.is_var)
        self.assertFalse(inpt.is_param)

        n = spn.Sum()
        inpt = spn.Input(n)
        self.assertTrue(inpt)
        self.assertTrue(inpt.is_op)
        self.assertFalse(inpt.is_var)
        self.assertFalse(inpt.is_param)

        n = spn.RawLeaf()
        inpt = spn.Input(n)
        self.assertTrue(inpt)
        self.assertFalse(inpt.is_op)
        self.assertTrue(inpt.is_var)
        self.assertFalse(inpt.is_param)

        n = spn.Weights()
        inpt = spn.Input(n)
        self.assertTrue(inpt)
        self.assertFalse(inpt.is_op)
        self.assertFalse(inpt.is_var)
        self.assertTrue(inpt.is_param)
Esempio n. 12
0
    def test_generte_set_errors(self):
        """Detecting structure errors in __generate_set"""
        gen = spn.DenseSPNGenerator(num_decomps=2,
                                    num_subsets=3,
                                    num_mixtures=2)
        v1 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
        v2 = spn.RawLeaf(num_vars=3, name="RawLeaf1")
        v3 = spn.RawLeaf(num_vars=2, name="RawLeaf2")
        s1 = spn.Sum(v3, v2)
        n1 = spn.Concat(v2)

        with self.assertRaises(spn.StructureError):
            gen._DenseSPNGenerator__generate_set([
                spn.Input(v1, [0, 3, 2, 6, 7]),
                spn.Input(v2, [1, 2]),
                spn.Input(s1, None),
                spn.Input(n1, None)
            ])
Esempio n. 13
0
    def test_compute_graph_up_const(self):
        """Computing value with constant function detection"""
        # Number of times val_fun was called
        # Use list to avoid creating local fun variable during assignment
        counter = [0]

        # Generate graph
        v1 = spn.RawLeaf(num_vars=1)
        v2 = spn.RawLeaf(num_vars=1)
        v3 = spn.RawLeaf(num_vars=1)
        s1 = spn.Sum(v1, v1, v2)  # v1 included twice
        s2 = spn.Sum(v1, v3)
        s3 = spn.Sum(v2, v3, v3)  # v3 included twice
        s4 = spn.Sum(s1, v1)
        s5 = spn.Sum(s2, v3, s3)
        s6 = spn.Sum(s4, s2, s5, s4, s5)  # s4 and s5 included twice

        def val_fun(node, *inputs):
            counter[0] += 1
            # s3 is not needed for calculations since only parent is s5
            self.assertIsNot(node, s3)
            # Fixed value or compute using children
            if node == s5:
                return 16
            else:
                if isinstance(node, spn.graph.node.VarNode):
                    return 1
                else:
                    weight_val, iv_val, *values = inputs
                    return sum(values) + 1

        def const_fun(node):
            if node == s5:
                return True
            else:
                return False

        # Calculate value
        val = spn.compute_graph_up(s6, val_fun, const_fun)

        # Test
        self.assertEqual(val, 48)
        self.assertEqual(counter[0], 8)
Esempio n. 14
0
 def test_get_scope(self):
     """Computing the scope of nodes of the SPN graph"""
     # Create graph
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4, name="V12")
     v34 = spn.RawLeaf(num_vars=2, name="V34")
     s1 = spn.Sum((v12, [0, 1, 2, 3]), name="S1")
     s2 = spn.Sum((v12, [4, 5, 6, 7]), name="S2")
     p1 = spn.Product((v12, [0, 7]), name="P1")
     p2 = spn.Product((v12, [3, 4]), name="P2")
     p3 = spn.Product(v34, name="P3")
     n1 = spn.Concat(s1, s2, p3, name="N1")
     n2 = spn.Concat(p1, p2, name="N2")
     p4 = spn.Product((n1, [0]), (n1, [1]), name="P4")
     p5 = spn.Product((n2, [0]), (n1, [2]), name="P5")
     s3 = spn.Sum(p4, n2, name="S3")
     p6 = spn.Product(s3, (n1, [2]), name="P6")
     s4 = spn.Sum(p5, p6, name="S4")
     # Test
     self.assertListEqual(v12.get_scope(),
                          [spn.Scope(v12, 0), spn.Scope(v12, 0),
                           spn.Scope(v12, 0), spn.Scope(v12, 0),
                           spn.Scope(v12, 1), spn.Scope(v12, 1),
                           spn.Scope(v12, 1), spn.Scope(v12, 1)])
     self.assertListEqual(v34.get_scope(),
                          [spn.Scope(v34, 0), spn.Scope(v34, 1)])
     self.assertListEqual(s1.get_scope(),
                          [spn.Scope(v12, 0)])
     self.assertListEqual(s2.get_scope(),
                          [spn.Scope(v12, 1)])
     self.assertListEqual(p1.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1)])
     self.assertListEqual(p2.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1)])
     self.assertListEqual(p3.get_scope(),
                          [spn.Scope(v34, 0) | spn.Scope(v34, 1)])
     self.assertListEqual(n1.get_scope(),
                          [spn.Scope(v12, 0),
                           spn.Scope(v12, 1),
                           spn.Scope(v34, 0) | spn.Scope(v34, 1)])
     self.assertListEqual(n2.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1),
                           spn.Scope(v12, 0) | spn.Scope(v12, 1)])
     self.assertListEqual(p4.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1)])
     self.assertListEqual(p5.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1) |
                           spn.Scope(v34, 0) | spn.Scope(v34, 1)])
     self.assertListEqual(s3.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1)])
     self.assertListEqual(p6.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1) |
                           spn.Scope(v34, 0) | spn.Scope(v34, 1)])
     self.assertListEqual(s4.get_scope(),
                          [spn.Scope(v12, 0) | spn.Scope(v12, 1) |
                           spn.Scope(v34, 0) | spn.Scope(v34, 1)])
Esempio n. 15
0
 def sumslayer_prepare_common(batch_size, factor, indices, input_sizes,
                              latent_indicators, same_inputs, sum_sizes):
     if indices:
         indices = [
             np.random.choice(list(range(size * factor)),
                              size=size,
                              replace=False) for size in input_sizes
         ]
     else:
         factor = 1
         indices = [np.arange(size) for size in input_sizes]
     if not same_inputs:
         input_nodes = [
             spn.RawLeaf(num_vars=size * factor) for size in input_sizes
         ]
         values = [
             np.random.rand(batch_size, size * factor)
             for size in input_sizes
         ]
         input_tuples = [(node, ind.tolist())
                         for node, ind in zip(input_nodes, indices)]
         feed_dict = {node: val for node, val in zip(input_nodes, values)}
     else:
         input_nodes = [spn.RawLeaf(num_vars=max(input_sizes) * factor)]
         values = [np.random.rand(batch_size,
                                  max(input_sizes) * factor)
                   ] * len(input_sizes)
         input_tuples = [(input_nodes[0], ind.tolist()) for ind in indices]
         feed_dict = {input_nodes[0]: values[0]}
     if 1 in sum_sizes:
         latent_indicators = False
     if latent_indicators:
         latent_indicators = [
             np.random.randint(size, size=batch_size) for size in sum_sizes
         ]
     weights = np.random.rand(sum(sum_sizes))
     root_weights = np.random.rand(len(sum_sizes))
     return feed_dict, indices, input_nodes, input_tuples, latent_indicators, values, weights, root_weights
Esempio n. 16
0
    def test_generte_set(self):
        """Generation of sets of inputs with __generate_set"""
        gen = spn.DenseSPNGenerator(num_decomps=2,
                                    num_subsets=3,
                                    num_mixtures=2)
        v1 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
        v2 = spn.RawLeaf(num_vars=3, name="RawLeaf1")
        v3 = spn.RawLeaf(num_vars=2, name="RawLeaf2")
        s1 = spn.Sum(v3)
        n1 = spn.Concat(v2)
        out = gen._DenseSPNGenerator__generate_set([
            spn.Input(v1, [0, 3, 2, 6, 7]),
            spn.Input(v2, [1, 2]),
            spn.Input(s1, None),
            spn.Input(n1, None)
        ])
        # scope_dict:
        # Scope({IndicatorLeaf(0x7f00cb4049b0):0}): {(IndicatorLeaf(0x7f00cb4049b0), 0),
        #                                  (IndicatorLeaf(0x7f00cb4049b0), 2),
        #                                  (IndicatorLeaf(0x7f00cb4049b0), 3)},
        # Scope({IndicatorLeaf(0x7f00cb4049b0):1}): {(IndicatorLeaf(0x7f00cb4049b0), 7),
        #                                  (IndicatorLeaf(0x7f00cb4049b0), 6)},
        # Scope({RawLeaf1(0x7f00b7982ef0):1}): {(Concat(0x7f00cb404d68), 1),
        #                                        (RawLeaf1(0x7f00b7982ef0), 1)},
        # Scope({RawLeaf1(0x7f00b7982ef0):2}): {(Concat(0x7f00cb404d68), 2),
        #                                        (RawLeaf1(0x7f00b7982ef0), 2)},
        # Scope({RawLeaf1(0x7f00b7982ef0):0}): {(Concat(0x7f00cb404d68), 0)},
        # Scope({RawLeaf2(0x7f00cb391eb8):0, RawLeaf2(0x7f00cb391eb8):1}): {
        #                                         (Sum(0x7f00cb404a90), 0)}}

        # Since order is undetermined, we check items
        self.assertEqual(len(out), 6)
        self.assertIn(tuple(sorted([(v2, 1), (n1, 1)])), out)
        self.assertIn(tuple(sorted([(v2, 2), (n1, 2)])), out)
        self.assertIn(tuple(sorted([(n1, 0)])), out)
        self.assertIn(tuple(sorted([(v1, 0), (v1, 2), (v1, 3)])), out)
        self.assertIn(tuple(sorted([(v1, 6), (v1, 7)])), out)
        self.assertIn(tuple(sorted([(s1, 0)])), out)
Esempio n. 17
0
 def test_compute_valid(self):
     """Calculating validity of Product"""
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
     v34 = spn.RawLeaf(num_vars=2)
     p1 = spn.Product((v12, [0, 5]))
     p2 = spn.Product((v12, [0, 3]))
     p3 = spn.Product((v12, [0, 5]), v34)
     p4 = spn.Product((v12, [0, 3]), v34)
     p5 = spn.Product((v12, [0, 5]), v34, (v12, 2))
     self.assertTrue(p1.is_valid())
     self.assertFalse(p2.is_valid())
     self.assertTrue(p3.is_valid())
     self.assertFalse(p4.is_valid())
     self.assertFalse(p5.is_valid())
Esempio n. 18
0
 def test_gather_input_scopes(self):
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4, name="V12")
     v34 = spn.RawLeaf(num_vars=2, name="V34")
     s1 = spn.Sum(v12, v12, v34, (v12, [7, 3, 1, 0]), (v34, 0), name="S1")
     scopes_v12 = v12._compute_scope()
     scopes_v34 = v34._compute_scope()
     # Note: weights/latent_indicators are disconnected, so None should be output these
     scopes = s1._gather_input_scopes(None, None, None, scopes_v12, scopes_v34,
                                      scopes_v12, scopes_v34)
     self.assertTupleEqual(scopes,
                           (None, None, None, scopes_v12, scopes_v34,
                            [scopes_v12[7], scopes_v12[3],
                             scopes_v12[1], scopes_v12[0]],
                            [scopes_v34[0]]))
Esempio n. 19
0
 def test(num_vars, value):
     with self.subTest(num_vars=num_vars, value=value):
         n = spn.RawLeaf(num_vars=num_vars)
         op = n.get_value()
         op_log = n.get_log_value()
         with self.test_session() as sess:
             out = sess.run(op, feed_dict={n: value})
             out_log = sess.run(tf.exp(op_log), feed_dict={n: value})
         np.testing.assert_array_almost_equal(
             out, np.array(value,
                           dtype=spn.conf.dtype.as_numpy_dtype()))
         np.testing.assert_array_almost_equal(
             out_log,
             np.array(value, dtype=spn.conf.dtype.as_numpy_dtype()))
Esempio n. 20
0
    def test_traverse_graph_stop(self):
        """Traversing the graph until fun returns True"""
        counter = [0]
        nodes = [None] * 9
        true_node_no = 4  # s5

        def fun(node):
            nodes[counter[0]] = node
            counter[0] += 1
            if counter[0] == true_node_no:
                return True

        # Generate graph
        v1 = spn.RawLeaf(num_vars=1)
        v2 = spn.RawLeaf(num_vars=1)
        v3 = spn.RawLeaf(num_vars=1)
        s1 = spn.Sum(v1, v1, v2)  # v1 included twice
        s2 = spn.Sum(v1, v3)
        s3 = spn.Sum(v2, v3, v3)  # v3 included twice
        s4 = spn.Sum(s1, v1)
        s5 = spn.Sum(s2, v3, s3)
        s6 = spn.Sum(s4, s2, s5, s4, s5)  # s4 and s5 included twice

        # Traverse
        spn.traverse_graph(s6, fun=fun, skip_params=True)

        # Test
        self.assertEqual(counter[0], 4)
        self.assertIs(nodes[0], s6)
        self.assertIs(nodes[1], s4)
        self.assertIs(nodes[2], s2)
        self.assertIs(nodes[3], s5)
        self.assertIs(nodes[4], None)
        self.assertIs(nodes[5], None)
        self.assertIs(nodes[6], None)
        self.assertIs(nodes[7], None)
        self.assertIs(nodes[8], None)
Esempio n. 21
0
 def test_compute_log_mpe_path(self):
     v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
     v34 = spn.RawLeaf(num_vars=2)
     v5 = spn.RawLeaf(num_vars=1)
     p = spn.Product((v12, [0, 5]), v34, (v12, [3]), v5)
     counts = tf.placeholder(tf.float32, shape=(None, 1))
     op = p._compute_log_mpe_path(tf.identity(counts),
                              v12.get_value(),
                              v34.get_value(),
                              v12.get_value(),
                              v5.get_value())
     feed = [[0],
             [1],
             [2]]
     with self.test_session() as sess:
         out = sess.run(op, feed_dict={counts: feed})
     self.assertAllClose(
         out[0], np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
                           [1., 0., 0., 0., 0., 1., 0., 0.],
                           [2., 0., 0., 0., 0., 2., 0., 0.]],
                          dtype=np.float32))
     self.assertAllClose(
         out[1], np.array([[0., 0.],
                           [1., 1.],
                           [2., 2.]],
                          dtype=np.float32))
     self.assertAllClose(
         out[2], np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
                           [0., 0., 0., 1., 0., 0., 0., 0.],
                           [0., 0., 0., 2., 0., 0., 0., 0.]],
                          dtype=np.float32))
     self.assertAllClose(
         out[3], np.array([[0.],
                           [1.],
                           [2.]],
                          dtype=np.float32))
Esempio n. 22
0
    def test_compute_scope(self):
        """Calculating scope of Sums"""
        # Create a graph
        v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4, name="V12")
        v34 = spn.RawLeaf(num_vars=3, name="V34")

        scopes_per_node = {
            v12: [
                spn.Scope(v12, 0),
                spn.Scope(v12, 0),
                spn.Scope(v12, 0),
                spn.Scope(v12, 0),
                spn.Scope(v12, 1),
                spn.Scope(v12, 1),
                spn.Scope(v12, 1),
                spn.Scope(v12, 1)
            ],
            v34: [spn.Scope(v34, 0),
                  spn.Scope(v34, 1),
                  spn.Scope(v34, 2)]
        }

        def generate_scopes_from_inputs(node,
                                        inputs,
                                        num_or_size_sums,
                                        latent_indicators=False):
            # Create a flat list of scopes, where the scope elements of a single input
            # node are subsequent in the list
            flat_scopes = []
            size = 0
            for inp in inputs:
                if isinstance(inp, tuple) and inp[1]:
                    input_indices = [inp[1]] if isinstance(inp[1],
                                                           int) else inp[1]
                    for i in input_indices:
                        flat_scopes.append(scopes_per_node[inp[0]][i])
                    size += len(input_indices)
                elif not isinstance(inp, tuple):
                    flat_scopes.extend(scopes_per_node[inp])
                    size += len(scopes_per_node[inp])
                else:
                    flat_scopes.extend(scopes_per_node[inp[0]])
                    size += len(scopes_per_node[inp[0]])
            if isinstance(num_or_size_sums, int):
                num_or_size_sums = num_or_size_sums * [
                    size // num_or_size_sums
                ]

            new_scope = []
            offset = 0
            # For each sum generate the scope based on its size
            for i, s in enumerate(num_or_size_sums):
                scope = flat_scopes[offset]
                for j in range(1, s):
                    scope |= flat_scopes[j + offset]
                offset += s
                if latent_indicators:
                    scope |= spn.Scope(node.latent_indicators.node, i)
                new_scope.append(scope)
            scopes_per_node[node] = new_scope

        def sums_layer_and_test(inputs,
                                num_or_size_sums,
                                name,
                                latent_indicators=False):
            """ Create a sums layer, generate its correct scope and test """
            sums_layer = spn.SumsLayer(*inputs,
                                       num_or_size_sums=num_or_size_sums,
                                       name=name)
            if latent_indicators:
                sums_layer.generate_latent_indicators()
            generate_scopes_from_inputs(sums_layer,
                                        inputs,
                                        num_or_size_sums,
                                        latent_indicators=latent_indicators)
            self.assertListEqual(sums_layer.get_scope(),
                                 scopes_per_node[sums_layer])
            return sums_layer

        def concat_layer_and_test(inputs, name):
            """ Create a concat node, generate its scopes and assert whether it is correct """
            scope = []
            for inp in inputs:
                if isinstance(inp, tuple):
                    indices = inp[1]
                    if isinstance(inp[1], int):
                        indices = [inp[1]]
                    for i in indices:
                        scope.append(scopes_per_node[inp[0]][i])
                else:
                    scope.extend(scopes_per_node[inp])
            concat = spn.Concat(*inputs, name=name)
            self.assertListEqual(concat.get_scope(), scope)
            scopes_per_node[concat] = scope
            return concat

        ss1 = sums_layer_and_test([(v12, [0, 1, 2, 3]), (v12, [1, 2, 5, 6]),
                                   (v12, [4, 5, 6, 7])],
                                  3,
                                  "Ss1",
                                  latent_indicators=True)

        ss2 = sums_layer_and_test([(v12, [6, 7]), (v34, 0)],
                                  num_or_size_sums=[1, 2],
                                  name="Ss2")
        ss3 = sums_layer_and_test([(v12, [3, 7]), (v34, 1),
                                   (v12, [4, 5, 6]), v34],
                                  num_or_size_sums=[1, 2, 2, 2, 2],
                                  name="Ss3")

        s1 = sums_layer_and_test([(v34, [1, 2])],
                                 num_or_size_sums=1,
                                 name="S1",
                                 latent_indicators=True)
        concat_layer_and_test([(ss1, [0, 2]), (ss2, 0)], name="N1")
        concat_layer_and_test([(ss1, 1), ss3, s1], name="N2")
        n = concat_layer_and_test([(ss1, 0), ss2, (ss3, [0, 1]), s1],
                                  name="N3")
        sums_layer_and_test([(ss1, [1, 2]), ss2],
                            num_or_size_sums=[2, 1, 1],
                            name="Ss4")
        sums_layer_and_test([(ss1, [0, 2]), (n, [0, 1]), (ss3, [4, 2])],
                            num_or_size_sums=[3, 2, 1],
                            name="Ss5")
Esempio n. 23
0
    def _run_op_test(self,
                     op_fun,
                     inputs,
                     sum_indices=None,
                     inf_type=spn.InferenceType.MARGINAL,
                     log=False,
                     on_gpu=True,
                     latent_indicators=None):
        """Run a single test for a single op."""

        # Preparations
        op_name = op_fun.__name__
        device_name = '/gpu:0' if on_gpu else '/cpu:0'

        # Print
        print2(
            "--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, inference=%s, log=%s, IndicatorLeaf=%s"
            %
            (op_name, on_gpu, inputs.shape,
             ("No" if sum_indices is None else "Yes"),
             ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"), log,
             ("No" if latent_indicators is None else "Yes")), self.file)

        input_size = inputs.shape[1]

        # Create graph
        tf.reset_default_graph()

        # Compute the true output
        sum_sizes = [len(ind) for ind in sum_indices]
        latent_indicators_per_sum = np.split(latent_indicators, latent_indicators.shape[1], axis=1) if latent_indicators is not None \
            else None
        sum_sizes_np = self._repeat_elements(sum_sizes)
        true_out = self._true_out(inf_type, inputs, latent_indicators_per_sum,
                                  sum_indices, sum_sizes, sum_sizes_np)
        if log:
            true_out = np.log(true_out)

        # Set up the graph
        with tf.device(device_name):
            # Create input
            inputs_pl = spn.RawLeaf(num_vars=input_size)
            feed_dict = {inputs_pl: inputs}

            if latent_indicators is not None:
                if op_fun is Ops.sum:
                    latent_indicators_pl = [
                        spn.IndicatorLeaf(num_vars=1, num_vals=s)
                        for s in sum_sizes_np
                    ]
                    latent_indicators = latent_indicators_per_sum
                elif op_fun is Ops.par_sums:
                    latent_indicators_pl = [
                        spn.IndicatorLeaf(num_vars=self.num_parallel,
                                          num_vals=len(ind))
                        for ind in sum_indices
                    ]
                    latent_indicators = np.split(latent_indicators,
                                                 len(self.sum_sizes),
                                                 axis=1)
                else:
                    latent_indicators = [latent_indicators]
                    latent_indicators_pl = [
                        spn.IndicatorLeaf(num_vars=len(sum_sizes_np),
                                          num_vals=max(sum_sizes))
                    ]
                for iv_pl, iv in zip(latent_indicators_pl, latent_indicators):
                    feed_dict[iv_pl] = iv
            else:
                latent_indicators_pl = None

            # Create ops
            start_time = time.time()
            init_ops, ops = op_fun(inputs_pl,
                                   sum_indices,
                                   self.num_parallel,
                                   inf_type,
                                   log,
                                   latent_indicators=latent_indicators_pl)
            setup_time = time.time() - start_time

        # Get num of graph ops
        graph_size = len(tf.get_default_graph().get_operations())
        # Run op multiple times
        output_correct = True
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=False,
                log_device_placement=self.log_devs)) as sess:
            # Initialize weights of all the sum nodes in the graph
            start_time = time.time()
            init_ops.run()
            weights_init_time = time.time() - start_time

            run_times = []
            # Create feed dictionary
            for n in range(self.num_runs):
                # Run
                start_time = time.time()
                out = sess.run(ops, feed_dict=feed_dict)
                run_times.append(time.time() - start_time)
                # Test value
                try:
                    np.testing.assert_array_almost_equal(out, true_out)
                except AssertionError:
                    output_correct = False
                    self.test_failed = True

            if self.profile:
                # Create a suitable filename suffix
                fnm_suffix = op_name
                fnm_suffix += ("_GPU" if on_gpu else "_CPU")
                fnm_suffix += ("_MPE-LOG" if log else "_MPE") if inf_type == \
                    spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else "_MARGINAL")
                fnm_suffix += ("_IV" if latent_indicators is not None else "")
                # Create a profiling report
                profile_report(sess, ops, feed_dict, self.profiles_dir,
                               "sum_value_varying_sizes", fnm_suffix)

        # Return stats
        return OpTestResult(op_name, on_gpu, graph_size, ("Yes"),
                            ("No" if latent_indicators is None else "Yes"),
                            setup_time, weights_init_time, run_times,
                            output_correct)
Esempio n. 24
0
    def _run_op_test(self,
                     op_fun,
                     inputs,
                     indices=None,
                     latent_indicators=None,
                     inf_type=spn.InferenceType.MARGINAL,
                     log=False,
                     on_gpu=True):
        """Run a single test for a single op."""
        # Preparations
        op_name = op_fun.__name__
        device_name = '/gpu:0' if on_gpu else '/cpu:0'

        # Print
        print2(
            "--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, latent_indicators=%s, inference=%s, log=%s"
            % (op_name, on_gpu, inputs.shape,
               ("No" if indices is None else "Yes"),
               ("No" if latent_indicators is None else "Yes"),
               ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"),
               log), self.file)

        input_size = inputs.shape[1]

        # Compute true output
        true_out = self._true_output(op_fun, inputs, indices,
                                     latent_indicators)

        # Create graph
        tf.reset_default_graph()
        with tf.device(device_name):
            # Create input
            inputs_pl = spn.RawLeaf(num_vars=input_size)
            # Create IndicatorLeaf
            if latent_indicators is None:
                latent_indicators_pl = [None for _ in range(self.num_sums)]
            else:
                if op_fun is Ops.sum:
                    latent_indicators_pl = [
                        spn.IndicatorLeaf(num_vars=1, num_vals=input_size)
                        for _ in range(self.num_sums)
                    ]
                elif op_fun is Ops.par_sums or Ops.sums:
                    latent_indicators_pl = [
                        spn.IndicatorLeaf(num_vars=self.num_sums,
                                          num_vals=input_size)
                    ]
            # Create ops
            start_time = time.time()
            init_ops, ops = op_fun(inputs_pl, indices, latent_indicators_pl,
                                   self.num_sums, inf_type, log)
            for _ in range(self.num_ops - 1):
                # The tuple ensures that the next op waits for the output
                # of the previous op, effectively stacking the ops
                # but using the original input every time
                # init_ops, ops = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums,
                #                        inf_type, log, tf.tuple([ops])[0])
                init_ops, ops = op_fun(inputs_pl, indices,
                                       latent_indicators_pl, self.num_sums,
                                       inf_type, log,
                                       tf.tuple([ops[-1]])[0])
            setup_time = time.time() - start_time
        # Get num of graph ops
        graph_size = len(tf.get_default_graph().get_operations())
        # Run op multiple times
        output_correct = True
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=False,
                log_device_placement=self.log_devs)) as sess:
            # Initialize weights of all the sum nodes in the graph
            start_time = time.time()
            init_ops.run()

            run_times = []
            # Create feed dictionary
            feed = {inputs_pl: inputs}
            if latent_indicators is not None:
                for iv_pl in latent_indicators_pl:
                    feed[iv_pl] = latent_indicators

            for n in range(self.num_runs):
                # Run
                start_time = time.time()
                out = sess.run(ops, feed_dict=feed)
                run_times.append(time.time() - start_time)
                # Test value
                try:
                    np.testing.assert_array_almost_equal(out[0], true_out)
                except AssertionError:
                    output_correct = False
                    self.test_failed = True

            if self.profile:
                # Add additional options to trace the session execution
                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                out = sess.run(ops,
                               feed_dict=feed,
                               options=options,
                               run_metadata=run_metadata)

                # Create the Timeline object, and write it to a json file
                fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                chrome_trace = fetched_timeline.generate_chrome_trace_format()
                if not os.path.exists(self.profiles_dir):
                    os.makedirs(self.profiles_dir)

                file_name = op_name
                file_name += ("_GPU" if on_gpu else "_CPU")
                file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \
                    spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else
                                                "_MARGINAL")
                if indices is not None:
                    file_name += "_Indices"
                if latent_indicators is not None:
                    file_name += "_IVS"

                with open(
                        '%s/timeline_path_%s.json' %
                    (self.profiles_dir, file_name), 'w') as f:
                    f.write(chrome_trace)

        # Return stats
        return OpTestResult(op_name, on_gpu, graph_size,
                            ("No" if indices is None else "Yes"),
                            ("No" if latent_indicators is None else "Yes"),
                            setup_time, run_times, output_correct)
Esempio n. 25
0
    def _run_op_test(self,
                     op_fun,
                     inputs,
                     num_inputs,
                     indices=None,
                     single_input=False,
                     log=False,
                     on_gpu=True,
                     inf_type=spn.InferenceType.MARGINAL):
        """Run a single test for a single op."""
        # Preparations
        op_name = op_fun.__name__
        device_name = '/gpu:0' if on_gpu else '/cpu:0'

        # Print
        print2(
            "--> %s: on_gpu=%s, num_inputs=%s, inputs_shape=%s, indices=%s,\
 single_input= %s, inference=%s, log=%s" %
            (op_name, on_gpu, num_inputs, inputs[0][0].shape,
             ("False" if indices is None else "True"), single_input,
             ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"),
             log), self.file)

        # Decern number of products modelled in each node
        num_prods = [
            pow(n_inp_cols, n_inps)
            for n_inps, n_inp_cols in zip(num_inputs, self.num_input_cols)
        ]

        # Compute true output
        true_out = self._true_output(inputs, num_inputs, num_prods, indices,
                                     single_input)

        # Create graph
        tf.reset_default_graph()
        with tf.device(device_name):
            # Create inputs
            if single_input:
                num_inputs_array = np.array(num_inputs)
                num_input_cols_array = np.array(self.num_input_cols)
                num_vars = int(np.sum(num_inputs_array * num_input_cols_array))
                inputs_pl = spn.RawLeaf(num_vars=num_vars)
            else:
                inputs_pl = [[
                    spn.RawLeaf(num_vars=n_inp_cols) for _ in range(n_inps)
                ]
                             for n_inps, n_inp_cols in zip(
                                 num_inputs, self.num_input_cols)]
            # Create ops
            start_time = time.time()
            init_ops, ops = op_fun(inputs_pl, num_inputs, self.num_input_cols,
                                   num_prods, inf_type, indices, log)
            setup_time = time.time() - start_time
        # Get num of graph ops
        graph_size = len(tf.get_default_graph().get_operations())
        # Run op multiple times
        output_correct = True
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=False,
                log_device_placement=self.log_devs)) as sess:
            # Initialize weights of all the sum nodes in the graph
            init_ops.run()
            if op_fun is not Ops.products_layer and single_input is False:
                # Create feed dictionary
                feed = {
                    inp_pl: inp
                    for inp_pl, inp in zip(chain(*inputs_pl), chain(*inputs))
                }
            else:
                concatenated_input = np.concatenate(list(chain(*inputs)),
                                                    axis=1)
            run_times = []
            batch_size = self.num_input_rows // self.num_batches
            for n in range(self.num_runs):
                # Run
                if op_fun is Ops.products_layer:
                    outputs = []
                    start_time = time.time()
                    for i in range(self.num_batches):
                        start = i * batch_size
                        stop = (i + 1) * batch_size
                        # Create feed dictionary
                        if single_input:
                            feed = {
                                inputs_pl: concatenated_input[start:stop, :]
                            }
                        else:
                            feed = {
                                inp_pl: inp[start:stop, :]
                                for inp_pl, inp in zip(chain(
                                    *inputs_pl), chain(*inputs))
                            }
                        out = sess.run(ops, feed_dict=feed)
                        outputs.append(out)
                    run_times.append(time.time() - start_time)
                    out = np.vstack(outputs)
                else:
                    start_time = time.time()
                    # Create feed dictionary
                    if single_input:
                        feed = {inputs_pl: concatenated_input}
                    out = sess.run(ops, feed_dict=feed)
                    run_times.append(time.time() - start_time)

                # Test value
                try:
                    for o, true_o in zip(out, true_out):
                        np.testing.assert_array_almost_equal(o, true_o)
                except AssertionError:
                    output_correct = False
                    self.test_failed = True

            if self.profile:
                # Add additional options to trace the session execution
                options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                out = sess.run(ops,
                               feed_dict=feed,
                               options=options,
                               run_metadata=run_metadata)

                # Create the Timeline object, and write it to a json file
                fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                chrome_trace = fetched_timeline.generate_chrome_trace_format()
                if not os.path.exists(self.profiles_dir):
                    os.makedirs(self.profiles_dir)

                file_name = op_name
                file_name += ("_GPU" if on_gpu else "_CPU")
                file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \
                    spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else
                                                "_MARGINAL")
                file_name += ("_SINGLE-INPUT" if
                              (op_fun is Ops.products_layer
                               and single_input is True) else "")

                with open(
                        '%s/timeline_path_%s.json' %
                    (self.profiles_dir, file_name), 'w') as f:
                    f.write(chrome_trace)

        # Return stats
        return OpTestResult(op_name, on_gpu, graph_size,
                            ("No" if
                             (op_fun is Ops.perm_products and indices is None
                              and single_input is False) else "Yes"),
                            ("Yes" if single_input else "No"), setup_time,
                            run_times, output_correct)
Esempio n. 26
0
    def test_compute_value(self):
        """Calculating value of Product"""

        def test(inputs, feed, output):
            with self.subTest(inputs=inputs, feed=feed):
                n = spn.Product(*inputs)
                op = n.get_value(spn.InferenceType.MARGINAL)
                op_log = n.get_log_value(spn.InferenceType.MARGINAL)
                op_mpe = n.get_value(spn.InferenceType.MPE)
                op_log_mpe = n.get_log_value(spn.InferenceType.MPE)
                with self.test_session() as sess:
                    out = sess.run(op, feed_dict=feed)
                    out_log = sess.run(tf.exp(op_log), feed_dict=feed)
                    out_mpe = sess.run(op_mpe, feed_dict=feed)
                    out_log_mpe = sess.run(tf.exp(op_log_mpe), feed_dict=feed)
                self.assertAllClose(
                    out,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
                self.assertAllClose(
                    out_log,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
                self.assertAllClose(
                    out_mpe,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
                self.assertAllClose(
                    out_log_mpe,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))

        # Create inputs
        v1 = spn.RawLeaf(num_vars=3)
        v2 = spn.RawLeaf(num_vars=1)

        # Multiple inputs, multi-element batch
        test([v1, v2],
             {v1: [[0.1, 0.2, 0.3],
                   [0.4, 0.5, 0.6]],
              v2: [[0.7],
                   [0.8]]},
             [[0.1 * 0.2 * 0.3 * 0.7],
              [0.4 * 0.5 * 0.6 * 0.8]])
        test([(v1, [0, 2]), (v2, [0])],
             {v1: [[0.1, 0.2, 0.3],
                   [0.4, 0.5, 0.6]],
              v2: [[0.7],
                   [0.8]]},
             [[0.1 * 0.3 * 0.7],
              [0.4 * 0.6 * 0.8]])

        # Single input with 1 value, multi-element batch
        test([v2],
             {v2: [[0.1],
                   [0.2]]},
             [[0.1],
              [0.2]])
        test([(v1, [1])],
             {v1: [[0.01, 0.1, 0.03],
                   [0.02, 0.2, 0.04]]},
             [[0.1],
              [0.2]])

        # Multiple inputs, single-element batch
        test([v1, v2],
             {v1: [[0.1, 0.2, 0.3]],
              v2: [[0.7]]},
             [[0.1 * 0.2 * 0.3 * 0.7]])
        test([(v1, [0, 2]), (v2, [0])],
             {v1: [[0.1, 0.2, 0.3]],
              v2: [[0.7]]},
             [[0.1 * 0.3 * 0.7]])

        # Single input with 1 value, single-element batch
        test([v2],
             {v2: [[0.1]]},
             [[0.1]])
        test([(v1, [1])],
             {v1: [[0.01, 0.1, 0.03]]},
             [[0.1]])
    def test_compute_mpe_path(self):
        """Calculating MPE path of PermuteProducts"""
        def test(counts, inputs, feed, output):
            with self.subTest(counts=counts, inputs=inputs, feed=feed):
                p = spn.PermuteProducts(*inputs)
                op = p._compute_log_mpe_path(tf.identity(counts),
                                         *[i[0].get_value() for i in inputs])
                with self.test_session() as sess:
                    out = sess.run(op, feed_dict=feed)

                for o, t in zip(out, output):
                    np.testing.assert_array_almost_equal(
                        o,
                        np.array(t, dtype=spn.conf.dtype.as_numpy_dtype()))

        # Create inputs
        v1 = spn.RawLeaf(num_vars=6)
        v2 = spn.RawLeaf(num_vars=8)
        v3 = spn.RawLeaf(num_vars=5)

        # Multiple Product nodes - Common input Sizes
        # -------------------------------------------

        # Case 1: No. of inputs = Input sizes
        # No. of inputs = 2
        # Input sizes = [2, 2] --> {O O | O O}
        counts = tf.placeholder(tf.float32, shape=(None, 4))
        test(counts,
             [(v1, [4, 5]), (v2, [1, 2])],
             {counts: [[1, 2, 3, 4],
                       [11, 12, 13, 14],
                       [21, 22, 23, 24]]},
             [[[0.0, 0.0, 0.0, 0.0, 3.0, 7.0],
               [0.0, 0.0, 0.0, 0.0, 23.0, 27.0],
               [0.0, 0.0, 0.0, 0.0, 43.0, 47.0]],
              [[0.0, 4.0, 6.0, 0.0, 0.0, 0.0, 0.0, 0.0],
               [0.0, 24.0, 26.0, 0.0, 0.0, 0.0, 0.0, 0.0],
               [0.0, 44.0, 46.0, 0.0, 0.0, 0.0, 0.0, 0.0]]])

        # Case 2: No. of inputs < Input sizes
        # No. of inputs = 2
        # Input sizes = [3, 3] --> {O O O | O O O}
        counts = tf.placeholder(tf.float32, shape=(None, 9))
        test(counts,
             [(v1, [0, 2, 4]), (v2, [1, 4, 7])],
             {counts: [[1, 2, 3, 4, 5, 6, 7, 8, 9],
                       [11, 12, 13, 14, 15, 16, 17, 18, 19],
                       [21, 22, 23, 24, 25, 26, 27, 28, 29]]},
             [[[6.0, 0.0, 15.0, 0.0, 24.0, 0.0],
               [36.0, 0.0, 45.0, 0.0, 54.0, 0.0],
               [66.0, 0.0, 75.0, 0.0, 84.0, 0.0]],
              [[0.0, 12.0, 0.0, 0.0, 15.0, 0.0, 0.0, 18.0],
               [0.0, 42.0, 0.0, 0.0, 45.0, 0.0, 0.0, 48.0],
               [0.0, 72.0, 0.0, 0.0, 75.0, 0.0, 0.0, 78.0]]])

        # Case 3: No. of inputs > Input sizes
        # No. of inputs = 3
        # Input sizes = [2, 2, 2] --> {O O | O O | O O}
        counts = tf.placeholder(tf.float32, shape=(None, 8))
        test(counts,
             [(v1, [0, 2]), (v2, [3, 5]), (v3, [1, 4])],
             {counts: [list(range(1, 9)),
                       list(range(11, 19)),
                       list(range(21, 29))]},
             [[[10.0, 0.0, 26.0, 0.0, 0.0, 0.0],
               [50.0, 0.0, 66.0, 0.0, 0.0, 0.0],
               [90.0, 0.0, 106.0, 0.0, 0.0, 0.0]],
              [[0.0, 0.0, 0.0, 14.0, 0.0, 22.0, 0.0, 0.0],
               [0.0, 0.0, 0.0, 54.0, 0.0, 62.0, 0.0, 0.0],
               [0.0, 0.0, 0.0, 94.0, 0.0, 102.0, 0.0, 0.0]],
              [[0.0, 16.0, 0.0, 0.0, 20.0],
               [0.0, 56.0, 0.0, 0.0, 60.0],
               [0.0, 96.0, 0.0, 0.0, 100.0]]])

        # Case 4: No. of inputs = Input sizes
        # No. of inputs = 3
        # Input sizes = [3, 3, 3] --> {O O O | O O O | O O O}
        counts = tf.placeholder(tf.float32, shape=(None, 27))
        test(counts,
             [(v1, [1, 3, 5]), (v2, [2, 4, 6]), (v3, [0, 1, 4])],
             {counts: [list(range(1, 28)),
                       list(range(101, 128)),
                       list(range(201, 228))]},
             [[[0.0, 45.0, 0.0, 126.0, 0.0, 207.0],
               [0.0, 945.0, 0.0, 1026.0, 0.0, 1107.0],
               [0.0, 1845.0, 0.0, 1926.0, 0.0, 2007.0]],
              [[0.0, 0.0, 99.0, 0.0, 126.0, 0.0, 153.0, 0.0],
               [0.0, 0.0, 999.0, 0.0, 1026.0, 0.0, 1053.0, 0.0],
               [0.0, 0.0, 1899.0, 0.0, 1926.0, 0.0, 1953.0, 0.0]],
              [[117.0, 126.0, 0.0, 0.0, 135.0],
               [1017.0, 1026.0, 0.0, 0.0, 1035.0],
               [1917.0, 1926.0, 0.0, 0.0, 1935.0]]])

        # Multiple Product nodes - Varying input Sizes
        # --------------------------------------------

        # Case 5: Ascending input sizes
        # No. of inputs = 3
        # Input sizes = [1, 2, 3] --> {O | O O | O O O}
        counts = tf.placeholder(tf.float32, shape=(None, 6))
        test(counts,
             [(v1, [3]), (v2, [4, 6]), (v3, [1, 2, 3])],
             {counts: [[1, 2, 3, 4, 5, 6],
                       [11, 12, 13, 14, 15, 16],
                       [21, 22, 23, 24, 25, 26]]},
             [[[0.0, 0.0, 0.0, 21.0, 0.0, 0.0],
               [0.0, 0.0, 0.0, 81.0, 0.0, 0.0],
               [0.0, 0.0, 0.0, 141.0, 0.0, 0.0]],
              [[0.0, 0.0, 0.0, 0.0, 6.0, 0.0, 15.0, 0.0],
               [0.0, 0.0, 0.0, 0.0, 36.0, 0.0, 45.0, 0.0],
               [0.0, 0.0, 0.0, 0.0, 66.0, 0.0, 75.0, 0.0]],
              [[0.0, 5.0, 7.0, 9.0, 0.0],
               [0.0, 25.0, 27.0, 29.0, 0.0],
               [0.0, 45.0, 47.0, 49.0, 0.0]]])

        # Case 6: Descending input sizes
        # No. of inputs = 3
        # Input sizes = [3, 2, 1] --> {O O O | O O | O}
        counts = tf.placeholder(tf.float32, shape=(None, 6))
        test(counts,
             [(v1, [2, 3, 4]), (v2, [0, 7]), (v3, [2])],
             {counts: [[1, 2, 3, 4, 5, 6],
                       [11, 12, 13, 14, 15, 16],
                       [21, 22, 23, 24, 25, 26]]},
             [[[0.0, 0.0, 3.0, 7.0, 11.0, 0.0],
               [0.0, 0.0, 23.0, 27.0, 31.0, 0.0],
               [0.0, 0.0, 43.0, 47.0, 51.0, 0.0]],
              [[9.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0],
               [39.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 42.0],
               [69.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 72.0]],
              [[0.0, 0.0, 21.0, 0.0, 0.0],
               [0.0, 0.0, 81.0, 0.0, 0.0],
               [0.0, 0.0, 141.0, 0.0, 0.0]]])

        # Case 7: Mixed input sizes - 1
        # No. of inputs = 3
        # Input sizes = [3, 2, 3] --> {O O O | O O | O O O}
        counts = tf.placeholder(tf.float32, shape=(None, 18))
        test(counts,
             [(v1, [0, 2, 5]), (v2, [3, 6]), (v3, [1, 2, 4])],
             {counts: [list(range(1, 19)),
                       list(range(21, 39)),
                       list(range(41, 59))]},
             [[[21.0, 0.0, 57.0, 0.0, 0.0, 93.0],
               [141.0, 0.0, 177.0, 0.0, 0.0, 213.0],
               [261.0, 0.0, 297.0, 0.0, 0.0, 333.0]],
              [[0.0, 0.0, 0.0, 72.0, 0.0, 0.0, 99.0, 0.0],
               [0.0, 0.0, 0.0, 252.0, 0.0, 0.0, 279.0, 0.0],
               [0.0, 0.0, 0.0, 432.0, 0.0, 0.0, 459.0, 0.0]],
              [[0.0, 51.0, 57.0, 0.0, 63.0],
               [0.0, 171.0, 177.0, 0.0, 183.0],
               [0.0, 291.0, 297.0, 0.0, 303.0]]])

        # Case 8: Mixed input sizes - 2
        # No. of inputs = 3
        # Input sizes = [1, 2, §] --> {O | O O O | O}
        counts = tf.placeholder(tf.float32, shape=(None, 3))
        test(counts,
             [(v1, [0]), (v2, [1, 2, 3]), (v3, [4])],
             {counts: [[1, 2, 3],
                       [11, 12, 13],
                       [21, 22, 23]]},
             [[[6.0, 0.0, 0.0, 0.0, 0.0, 0.0],
               [36.0, 0.0, 0.0, 0.0, 0.0, 0.0],
               [66.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
              [[0.0, 1.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0],
               [0.0, 11.0, 12.0, 13.0, 0.0, 0.0, 0.0, 0.0],
               [0.0, 21.0, 22.0, 23.0, 0.0, 0.0, 0.0, 0.0]],
              [[0.0, 0.0, 0.0, 0.0, 6.0],
               [0.0, 0.0, 0.0, 0.0, 36.0],
               [0.0, 0.0, 0.0, 0.0, 66.0]]])

        # Single Product node
        # -------------------

        # Case 9: Multiple inputs, each with size 1
        # No. of inputs = 3
        # Input sizes = [1, 1, 1] --> {O | O | O}
        counts = tf.placeholder(tf.float32, shape=(None, 1))
        test(counts,
             [(v1, [3]), (v2, [2]), (v3, [1])],
             {counts: [[123],
                       [123],
                       [123]]},
             [[[0.0, 0.0, 0.0, 123.0, 0.0, 0.0],
               [0.0, 0.0, 0.0, 123.0, 0.0, 0.0],
               [0.0, 0.0, 0.0, 123.0, 0.0, 0.0]],
              [[0.0, 0.0, 123.0, 0.0, 0.0, 0.0, 0.0, 0.0],
               [0.0, 0.0, 123.0, 0.0, 0.0, 0.0, 0.0, 0.0],
               [0.0, 0.0, 123.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
              [[0.0, 123.0, 0.0, 0.0, 0.0],
               [0.0, 123.0, 0.0, 0.0, 0.0],
               [0.0, 123.0, 0.0, 0.0, 0.0]]])

        # Case 10: Single input with size > 1
        # No. of inputs = 1
        # Input sizes = [3] --> {O O O}
        counts = tf.placeholder(tf.float32, shape=(None, 1))
        test(counts,
             [(v3, [1, 2, 3])],
             {counts: [[3],
                       [3],
                       [3]]},
             [[[0.0, 3.0, 3.0, 3.0, 0.0],
               [0.0, 3.0, 3.0, 3.0, 0.0],
               [0.0, 3.0, 3.0, 3.0, 0.0]]])

        # Case 11:  Single input with size = 1
        # No. of inputs = 1
        # Input sizes = [1] --> {O}
        counts = tf.placeholder(tf.float32, shape=(None, 1))
        test(counts,
             [(v3, [4])],
             {counts: [[1],
                       [1],
                       [1]]},
             [[[0.0, 0.0, 0.0, 0.0, 1.0],
               [0.0, 0.0, 0.0, 0.0, 1.0],
               [0.0, 0.0, 0.0, 0.0, 1.0]]])
Esempio n. 28
0
    def test_compute_valid(self):
        """Calculating validity of Sums"""
        # Without IndicatorLeaf
        v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4)
        v34 = spn.RawLeaf(num_vars=2)
        s1 = spn.SumsLayer((v12, [0, 1, 2, 3]), (v12, [0, 1, 2, 3]),
                           (v12, [0, 1, 2, 3]),
                           num_or_size_sums=3)
        self.assertTrue(s1.is_valid())

        s2 = spn.SumsLayer((v12, [0, 1, 2, 4]), name="S2")
        s2b = spn.SumsLayer((v12, [0, 1, 2, 4]),
                            num_or_size_sums=[3, 1],
                            name="S2b")
        self.assertTrue(s2b.is_valid())
        self.assertFalse(s2.is_valid())

        s3 = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]),
                           (v34, 0),
                           num_or_size_sums=2)
        s3b = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]),
                            (v34, 0),
                            num_or_size_sums=[4, 1, 4, 1])
        s3c = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]),
                            (v34, 0),
                            num_or_size_sums=[4, 1, 5])
        self.assertFalse(s3.is_valid())
        self.assertTrue(s3b.is_valid())
        self.assertFalse(s3c.is_valid())

        p1 = spn.Product((v12, [0, 5]), (v34, 0))
        p2 = spn.Product((v12, [1, 6]), (v34, 0))
        p3 = spn.Product((v12, [1, 6]), (v34, 1))

        s4 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2)
        s5 = spn.SumsLayer(p1, p3, p1, p3, p1, p3, num_or_size_sums=3)
        s6 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[2, 1])
        s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[1, 2])
        s8 = spn.SumsLayer(p1, p2, p3, p2, p1, num_or_size_sums=[2, 1, 2])
        self.assertTrue(s4.is_valid())
        self.assertFalse(s5.is_valid())  # p1 and p3 different scopes
        self.assertTrue(s6.is_valid())
        self.assertFalse(s7.is_valid())  # p2 and p3 different scopes
        self.assertTrue(s8.is_valid())
        # With IVS
        s6 = spn.SumsLayer(p1, p2, p1, p2, p1, p2, num_or_size_sums=3)
        s6.generate_latent_indicators()
        self.assertTrue(s6.is_valid())

        s7 = spn.SumsLayer(p1, p2, num_or_size_sums=1)
        s7.set_latent_indicators(spn.RawLeaf(num_vars=2))
        self.assertFalse(s7.is_valid())

        s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=3)
        s7.set_latent_indicators(spn.RawLeaf(num_vars=3))
        self.assertTrue(s7.is_valid())

        s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[2, 1])
        s7.set_latent_indicators(spn.RawLeaf(num_vars=3))
        self.assertFalse(s7.is_valid())

        s8 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2)
        s8.set_latent_indicators(spn.IndicatorLeaf(num_vars=3, num_vals=2))
        with self.assertRaises(spn.StructureError):
            s8.is_valid()

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3])
        s9.set_latent_indicators(spn.RawLeaf(num_vars=2))
        with self.assertRaises(spn.StructureError):
            s9.is_valid()

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3])
        s9.set_latent_indicators(spn.RawLeaf(num_vars=3))
        with self.assertRaises(spn.StructureError):
            s9.is_valid()

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2)
        s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=1, num_vals=4))
        self.assertTrue(s9.is_valid())

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3])
        s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=1, num_vals=4))
        self.assertTrue(s9.is_valid())

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3])
        s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2))
        self.assertFalse(s9.is_valid())

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2)
        s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2))
        self.assertTrue(s9.is_valid())

        s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 2, 1])
        s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2))
        self.assertFalse(s9.is_valid())

        s10 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2)
        s10.set_latent_indicators((v12, [0, 3, 5, 7]))
        self.assertTrue(s10.is_valid())

        s10 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 2, 1])
        s10.set_latent_indicators((v12, [0, 3, 5, 7]))
        self.assertFalse(s10.is_valid())
Esempio n. 29
0
    def test_compute_graph_down(self):
        counter = [0]
        parent_vals_saved = {}

        def fun(node, parent_vals):
            parent_vals_saved[node] = parent_vals
            val = sum(parent_vals) + 0.01
            counter[0] += 1
            if node.is_op:
                return [val + i for i, _ in enumerate(node.inputs)]
            else:
                return 101

        # Generate graph
        v1 = spn.RawLeaf(num_vars=1, name="v1")
        v2 = spn.RawLeaf(num_vars=1, name="v2")
        v3 = spn.RawLeaf(num_vars=1, name="v3")
        s1 = spn.Sum(v1, v1, v2, name="s1")  # v1 included twice
        s2 = spn.Sum(v1, v3, name="s2")
        s3 = spn.Sum(v2, v3, v3, name="s3")  # v3 included twice
        s4 = spn.Sum(s1, v1, name="s4")
        s5 = spn.Sum(s2, v3, s3, name="s5")
        s6 = spn.Sum(s4, s2, s5, s4, s5, name="s6")  # s4 and s5 included twice
        spn.generate_weights(s6)

        down_values = {}
        spn.compute_graph_up_down(s6, down_fun=fun, graph_input=5,
                                  down_values=down_values)

        self.assertEqual(counter[0], 15)
        # Using sorted since order is not guaranteed
        self.assertListAlmostEqual(sorted(parent_vals_saved[s6]), [5])
        self.assertListAlmostEqual(down_values[s6], [5.01, 6.01, 7.01, 8.01,
                                                     9.01, 10.01, 11.01])
        self.assertListAlmostEqual(sorted(parent_vals_saved[s5]), [9.01, 11.01])
        self.assertListAlmostEqual(down_values[s5], [20.03, 21.03, 22.03,
                                                     23.03, 24.03])
        self.assertListAlmostEqual(sorted(parent_vals_saved[s4]), [7.01, 10.01])
        self.assertListAlmostEqual(down_values[s4], [17.03, 18.03, 19.03, 20.03])
        self.assertListAlmostEqual(sorted(parent_vals_saved[s3]), [24.03])
        self.assertListAlmostEqual(down_values[s3], [24.04, 25.04, 26.04,
                                                     27.04, 28.04])
        self.assertListAlmostEqual(sorted(parent_vals_saved[s2]), [8.01, 22.03])
        self.assertListAlmostEqual(down_values[s2], [30.05, 31.05, 32.05, 33.05])
        self.assertListAlmostEqual(sorted(parent_vals_saved[s1]), [19.03])
        self.assertListAlmostEqual(down_values[s1], [19.04, 20.04, 21.04,
                                                     22.04, 23.04])

        self.assertListAlmostEqual(sorted(parent_vals_saved[v1]),
                                   [20.03, 21.04, 22.04, 32.05])
        self.assertEqual(down_values[v1], 101)
        self.assertListAlmostEqual(sorted(parent_vals_saved[v2]),
                                   [23.04, 26.04])
        self.assertEqual(down_values[v2], 101)
        self.assertListAlmostEqual(sorted(parent_vals_saved[v3]),
                                   [23.03, 27.04, 28.04, 33.05])
        self.assertEqual(down_values[v3], 101)

        # Test if the algorithm works on a VarNode and calls graph_input function
        down_values = {}
        parent_vals_saved = {}
        spn.compute_graph_up_down(v1, down_fun=fun, graph_input=lambda: 5,
                                  down_values=down_values)
        self.assertEqual(parent_vals_saved[v1][0], 5)
        self.assertEqual(down_values[v1], 101)
Esempio n. 30
0
    def test_value(self):
        """Calculating value of Concat"""
        def test(inputs, feed, output):
            with self.subTest(inputs=inputs, feed=feed):
                n = spn.Concat(*inputs)
                op = n.get_value(spn.InferenceType.MARGINAL)
                op_log = n.get_log_value(spn.InferenceType.MARGINAL)
                op_mpe = n.get_value(spn.InferenceType.MPE)
                op_log_mpe = n.get_log_value(spn.InferenceType.MPE)
                with self.test_session() as sess:
                    out = sess.run(op, feed_dict=feed)
                    out_log = sess.run(tf.exp(op_log), feed_dict=feed)
                    out_mpe = sess.run(op_mpe, feed_dict=feed)
                    out_log_mpe = sess.run(tf.exp(op_log_mpe), feed_dict=feed)
                np.testing.assert_array_almost_equal(
                    out, np.array(output,
                                  dtype=spn.conf.dtype.as_numpy_dtype()))
                np.testing.assert_array_almost_equal(
                    out_log,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
                np.testing.assert_array_almost_equal(
                    out_mpe,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
                np.testing.assert_array_almost_equal(
                    out_log_mpe,
                    np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))

        # Create inputs
        v1 = spn.RawLeaf(num_vars=3)
        v2 = spn.RawLeaf(num_vars=5)
        v3 = spn.RawLeaf(num_vars=1)

        # Multiple inputs, indices specified
        test(
            [(v1, [0, 2]), (v2, [1])], {
                v1: [[1, 2, 3], [4, 5, 6]],
                v2: [[7, 8, 9, 10, 11], [12, 13, 14, 15, 16]]
            }, [[1.0, 3.0, 8.0], [4.0, 6.0, 13.0]])

        # Single input, indices specified
        test([(v1, [0, 1, 2])], {v1: [[1, 2, 3], [4, 5, 6]]},
             [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])

        # Single input, no indices
        test([v1], {v1: [[1, 2, 3], [4, 5, 6]]},
             [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])

        # Single input with 1 value, no indices
        test([v3], {v3: [[17], [18]]}, [[17.0], [18.0]])

        # Multiple inputs, no indices
        test(
            [v1, v2], {
                v1: [[1, 2, 3], [4, 5, 6]],
                v2: [[7, 8, 9, 10, 11], [12, 13, 14, 15, 16]]
            }, [[1.0, 2.0, 3.0, 7.0, 8.0, 9.0, 10.0, 11.0],
                [4.0, 5.0, 6.0, 12.0, 13.0, 14.0, 15.0, 16.0]])

        # Mixed
        test(
            [v1, (v2, [2, 3]), v3], {
                v1: [[1, 2, 3], [4, 5, 6]],
                v2: [[7, 8, 9, 10, 11], [12, 13, 14, 15, 16]],
                v3: [[17], [18]]
            }, [[1.0, 2.0, 3.0, 9.0, 10.0, 17.0],
                [4.0, 5.0, 6.0, 14.0, 15.0, 18.0]])

        # One-element batch
        test([(v1, [0, 2]), (v2, [1])], {
            v1: [[1, 2, 3]],
            v2: [[7, 8, 9, 10, 11]]
        }, [[1.0, 3.0, 8.0]])