def test_compute_valid(self): """Calculating validity of Sum""" # Without IVs v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) s1 = spn.Sum((v12, [0, 1, 2, 3])) s2 = spn.Sum((v12, [0, 1, 2, 4])) s3 = spn.Sum((v12, [0, 1, 2, 3]), (v34, 0)) p1 = spn.Product((v12, [0, 5]), (v34, 0)) p2 = spn.Product((v12, [1, 6]), (v34, 0)) p3 = spn.Product((v12, [1, 6]), (v34, 1)) s4 = spn.Sum(p1, p2) s5 = spn.Sum(p1, p3) self.assertTrue(v12.is_valid()) self.assertTrue(v34.is_valid()) self.assertTrue(s1.is_valid()) self.assertFalse(s2.is_valid()) self.assertFalse(s3.is_valid()) self.assertTrue(s4.is_valid()) self.assertFalse(s5.is_valid()) # With IVS s6 = spn.Sum(p1, p2) s6.generate_ivs() self.assertTrue(s6.is_valid()) s7 = spn.Sum(p1, p2) s7.set_ivs(spn.ContVars(num_vars=2)) self.assertFalse(s7.is_valid()) s8 = spn.Sum(p1, p2) s8.set_ivs(spn.IVs(num_vars=2, num_vals=2)) with self.assertRaises(spn.StructureError): s8.is_valid() s9 = spn.Sum(p1, p2) s9.set_ivs((v12, [0, 3])) self.assertTrue(s9.is_valid())
def test_group_initialization(self): """Group initialization of weights nodes""" v1 = spn.IVs(num_vars=1, num_vals=2) v2 = spn.IVs(num_vars=1, num_vals=4) s1 = spn.Sum(v1) s1.generate_weights([0.2, 0.3]) s2 = spn.Sum(v2) s2.generate_weights(5) p = spn.Product(s1, s2) init = spn.initialize_weights(p) with tf.Session() as sess: sess.run([init]) val1 = sess.run(s1.weights.node.get_value()) val2 = sess.run(s2.weights.node.get_value()) val1_log = sess.run(tf.exp(s1.weights.node.get_log_value())) val2_log = sess.run(tf.exp(s2.weights.node.get_log_value())) self.assertEqual(val1.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val2.dtype, spn.conf.dtype.as_numpy_dtype()) np.testing.assert_array_almost_equal(val1, [0.4, 0.6]) np.testing.assert_array_almost_equal(val2, [0.25, 0.25, 0.25, 0.25]) self.assertEqual(val1_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val2_log.dtype, spn.conf.dtype.as_numpy_dtype()) np.testing.assert_array_almost_equal(val1_log, [0.4, 0.6]) np.testing.assert_array_almost_equal(val2_log, [0.25, 0.25, 0.25, 0.25])
def test_discretedense_saving_3class_externalivs(self): model1 = spn.DiscreteDenseModel( num_classes=3, num_decomps=2, num_subsets=3, num_mixtures=2, input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE, num_input_mixtures=None, weight_init_value=spn.ValueType.RANDOM_UNIFORM(0, 1)) sample_ivs1 = spn.IVs(num_vars=6, num_vals=2) class_ivs1 = spn.IVs(num_vars=1, num_vals=3) model1.build(sample_ivs1, class_input=class_ivs1) init1 = spn.initialize_weights(model1.root) feed_samples = list(itertools.product(range(2), repeat=6)) feed_class = np.array([ i for i in range(3) for _ in range(len(feed_samples)) ]).reshape(-1, 1) feed_samples = np.array(feed_samples * 3) with tf.Session() as sess: # Initialize init1.run() # Save path = self.out_path(self.cid() + ".spn") model1.save_to_json(path, pretty=True, save_param_vals=True, sess=sess) # Reset graph tf.reset_default_graph() with tf.Session() as sess: # Load model2 = spn.Model.load_from_json(path, load_param_vals=True, sess=sess) self.assertIs(type(model2), spn.DiscreteDenseModel) val_marginal2 = model2.root.get_value( inference_type=spn.InferenceType.MARGINAL) # Check model after loading self.assertTrue(model2.root.is_valid()) out_marginal2 = sess.run(val_marginal2, feed_dict={ model2.sample_inputs[0].node: feed_samples, model2.class_input.node: feed_class }) self.assertAlmostEqual(out_marginal2.sum(), 1.0, places=6) # Writing graph self.write_tf_graph(sess, self.sid(), self.cid())
def test_group_initialization(self): """Group initialization of weights nodes""" v1 = spn.IVs(num_vars=1, num_vals=2) v2 = spn.IVs(num_vars=1, num_vals=4) v3 = spn.IVs(num_vars=1, num_vals=2) v4 = spn.IVs(num_vars=1, num_vals=2) # Sum s1 = spn.Sum(v1) s1.generate_weights(tf.initializers.constant([0.2, 0.3])) s2 = spn.Sum(v2) s2.generate_weights(tf.initializers.constant(5)) # ParSums s3 = spn.ParSums(*[v3, v4], num_sums=2) s3.generate_weights( tf.initializers.constant([0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1])) s4 = spn.ParSums(*[v1, v2, v3, v4], num_sums=3) s4.generate_weights(tf.initializers.constant(2.0)) # Product p = spn.Product(s1, s2, s3, s4) init = spn.initialize_weights(p) with self.test_session() as sess: sess.run([init]) val1 = sess.run(s1.weights.node.get_value()) val2 = sess.run(s2.weights.node.get_value()) val3 = sess.run(s3.weights.node.get_value()) val4 = sess.run(s4.weights.node.get_value()) val1_log = sess.run(tf.exp(s1.weights.node.get_log_value())) val2_log = sess.run(tf.exp(s2.weights.node.get_log_value())) val3_log = sess.run(tf.exp(s3.weights.node.get_log_value())) val4_log = sess.run(tf.exp(s4.weights.node.get_log_value())) self.assertEqual(val1.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val2.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val3.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val4.dtype, spn.conf.dtype.as_numpy_dtype()) np.testing.assert_array_almost_equal(val1, [[0.4, 0.6]]) np.testing.assert_array_almost_equal(val2, [[0.25, 0.25, 0.25, 0.25]]) np.testing.assert_array_almost_equal( val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]) np.testing.assert_array_almost_equal( val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10]) self.assertEqual(val1_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val2_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val3_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val4_log.dtype, spn.conf.dtype.as_numpy_dtype()) np.testing.assert_array_almost_equal(val1_log, [[0.4, 0.6]]) np.testing.assert_array_almost_equal(val2_log, [[0.25, 0.25, 0.25, 0.25]]) np.testing.assert_array_almost_equal( val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]) np.testing.assert_array_almost_equal( val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10])
def test_discretedense_3class_externalivs(self): model = spn.DiscreteDenseModel( num_classes=3, num_decomps=2, num_subsets=3, num_mixtures=2, input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE, num_input_mixtures=None, weight_initializer=tf.initializers.random_uniform(0.0, 1.0)) sample_ivs = spn.IVs(num_vars=6, num_vals=2) class_ivs = spn.IVs(num_vars=1, num_vals=3) root = model.build(sample_ivs, class_input=class_ivs) self.generic_model_test("3class", root, sample_ivs, class_ivs)
def test_discretedense_3class_externalivs(self): model = spn.DiscreteDenseModel( num_classes=3, num_decomps=2, num_subsets=3, num_mixtures=2, input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE, num_input_mixtures=None, weight_init_value=spn.ValueType.RANDOM_UNIFORM(0, 1)) sample_ivs = spn.IVs(num_vars=6, num_vals=2) class_ivs = spn.IVs(num_vars=1, num_vals=3) root = model.build(sample_ivs, class_input=class_ivs) self.generic_model_test("3class", root, sample_ivs, class_ivs)
def generic_dense_test(self, name, num_decomps, num_subsets, num_mixtures, input_dist, num_input_mixtures): """A generic test for DenseSPNGenerator.""" v1 = spn.IVs(num_vars=3, num_vals=2, name="IVs1") v2 = spn.IVs(num_vars=3, num_vals=2, name="IVs2") gen = spn.DenseSPNGenerator(num_decomps=num_decomps, num_subsets=num_subsets, num_mixtures=num_mixtures, input_dist=input_dist, num_input_mixtures=num_input_mixtures) # Generating SPN root = gen.generate(v1, v2) # Generating random weights with tf.name_scope("Weights"): spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0)) # Generating weight initializers init = spn.initialize_weights(root) # Testing validity self.assertTrue(root.is_valid()) # Generating value ops v = root.get_value() v_log = root.get_log_value() # Creating session with self.test_session() as sess: # Initializing weights init.run() # Computing all values feed = np.array(list(itertools.product(range(2), repeat=6))) feed_v1 = feed[:, :3] feed_v2 = feed[:, 3:] out = sess.run(v, feed_dict={v1: feed_v1, v2: feed_v2}) out_log = sess.run(tf.exp(v_log), feed_dict={ v1: feed_v1, v2: feed_v2 }) # Test if partition function is 1.0 self.assertAlmostEqual(out.sum(), 1.0, places=6) self.assertAlmostEqual(out_log.sum(), 1.0, places=6) self.write_tf_graph(sess, self.sid(), self.cid())
def test_compute_mpe_path(self): v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) v5 = spn.ContVars(num_vars=1) p = spn.Product((v12, [0, 5]), v34, (v12, [3]), v5) counts = tf.placeholder(tf.float32, shape=(None, 1)) op = p._compute_mpe_path(tf.identity(counts), v12.get_value(), v34.get_value(), v12.get_value(), v5.get_value()) feed = [[0], [1], [2]] with tf.Session() as sess: out = sess.run(op, feed_dict={counts: feed}) np.testing.assert_array_almost_equal( out[0], np.array([[0., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 1., 0., 0.], [2., 0., 0., 0., 0., 2., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[1], np.array([[0., 0.], [1., 1.], [2., 2.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[2], np.array([[0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 2., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[3], np.array([[0.], [1.], [2.]], dtype=np.float32))
def test_is_valid_false(self): """Checking validity of the SPN""" # Create graph v12 = spn.IVs(num_vars=2, num_vals=4, name="V12") v34 = spn.ContVars(num_vars=2, name="V34") s1 = spn.Sum((v12, [0, 1, 2, 3]), name="S1") s2 = spn.Sum((v12, [4, 5, 6, 7]), name="S2") p1 = spn.Product((v12, [0, 7]), name="P1") p2 = spn.Product((v12, [2, 3, 4]), name="P2") p3 = spn.Product(v34, name="P3") n1 = spn.Concat(s1, s2, p3, name="N1") n2 = spn.Concat(p1, p2, name="N2") p4 = spn.Product((n1, [0]), (n1, [1]), name="P4") p5 = spn.Product((n2, [0]), (n1, [2]), name="P5") s3 = spn.Sum(p4, n2, name="S3") p6 = spn.Product(s3, (n1, [2]), name="P6") s4 = spn.Sum(p5, p6, name="S4") # Test self.assertTrue(v12.is_valid()) self.assertTrue(v34.is_valid()) self.assertTrue(s1.is_valid()) self.assertTrue(s2.is_valid()) self.assertTrue(p1.is_valid()) self.assertTrue(p3.is_valid()) self.assertTrue(p4.is_valid()) self.assertTrue(n1.is_valid()) self.assertFalse(p2.is_valid()) self.assertFalse(n2.is_valid()) self.assertFalse(s3.is_valid()) self.assertFalse(s4.is_valid()) self.assertFalse(p5.is_valid()) self.assertFalse(p6.is_valid())
def test_compute_mpe_path(self): v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) v5 = spn.ContVars(num_vars=1) p = spn.Concat((v12, [0, 5]), v34, (v12, [3]), v5) counts = tf.placeholder(tf.float32, shape=(None, 6)) op = p._compute_log_mpe_path(tf.identity(counts), v12.get_value(), v34.get_value(), v12.get_value(), v5.get_value()) feed = np.r_[:18].reshape(-1, 6) with self.test_session() as sess: out = sess.run(op, feed_dict={counts: feed}) np.testing.assert_array_almost_equal( out[0], np.array([[0., 0., 0., 0., 0., 1., 0., 0.], [6., 0., 0., 0., 0., 7., 0., 0.], [12., 0., 0., 0., 0., 13., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[1], np.array([[2., 3.], [8., 9.], [14., 15.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[2], np.array([[0., 0., 0., 4., 0., 0., 0., 0.], [0., 0., 0., 10., 0., 0., 0., 0.], [0., 0., 0., 16., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[3], np.array([[5.], [11.], [17.]], dtype=np.float32))
def test_get_scope(self): """Computing the scope of nodes of the SPN graph""" # Create graph v12 = spn.IVs(num_vars=2, num_vals=4, name="V12") v34 = spn.ContVars(num_vars=2, name="V34") s1 = spn.Sum((v12, [0, 1, 2, 3]), name="S1") s2 = spn.Sum((v12, [4, 5, 6, 7]), name="S2") p1 = spn.Product((v12, [0, 7]), name="P1") p2 = spn.Product((v12, [3, 4]), name="P2") p3 = spn.Product(v34, name="P3") n1 = spn.Concat(s1, s2, p3, name="N1") n2 = spn.Concat(p1, p2, name="N2") p4 = spn.Product((n1, [0]), (n1, [1]), name="P4") p5 = spn.Product((n2, [0]), (n1, [2]), name="P5") s3 = spn.Sum(p4, n2, name="S3") p6 = spn.Product(s3, (n1, [2]), name="P6") s4 = spn.Sum(p5, p6, name="S4") # Test self.assertListEqual(v12.get_scope(), [spn.Scope(v12, 0), spn.Scope(v12, 0), spn.Scope(v12, 0), spn.Scope(v12, 0), spn.Scope(v12, 1), spn.Scope(v12, 1), spn.Scope(v12, 1), spn.Scope(v12, 1)]) self.assertListEqual(v34.get_scope(), [spn.Scope(v34, 0), spn.Scope(v34, 1)]) self.assertListEqual(s1.get_scope(), [spn.Scope(v12, 0)]) self.assertListEqual(s2.get_scope(), [spn.Scope(v12, 1)]) self.assertListEqual(p1.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1)]) self.assertListEqual(p2.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1)]) self.assertListEqual(p3.get_scope(), [spn.Scope(v34, 0) | spn.Scope(v34, 1)]) self.assertListEqual(n1.get_scope(), [spn.Scope(v12, 0), spn.Scope(v12, 1), spn.Scope(v34, 0) | spn.Scope(v34, 1)]) self.assertListEqual(n2.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1), spn.Scope(v12, 0) | spn.Scope(v12, 1)]) self.assertListEqual(p4.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1)]) self.assertListEqual(p5.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1) | spn.Scope(v34, 0) | spn.Scope(v34, 1)]) self.assertListEqual(s3.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1)]) self.assertListEqual(p6.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1) | spn.Scope(v34, 0) | spn.Scope(v34, 1)]) self.assertListEqual(s4.get_scope(), [spn.Scope(v12, 0) | spn.Scope(v12, 1) | spn.Scope(v34, 0) | spn.Scope(v34, 1)])
def test_compute_mpe_path_noivs(self): spn.conf.argmax_zero = True v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) v5 = spn.ContVars(num_vars=1) s = spn.Sum((v12, [0, 5]), v34, (v12, [3]), v5) w = s.generate_weights() counts = tf.placeholder(tf.float32, shape=(None, 1)) op = s._compute_log_mpe_path(tf.identity(counts), w.get_log_value(), None, v12.get_log_value(), v34.get_log_value(), v12.get_log_value(), v5.get_log_value()) init = w.initialize() counts_feed = [[10], [11], [12], [13]] v12_feed = [[0, 1], [1, 1], [0, 0], [3, 3]] v34_feed = [[0.1, 0.2], [1.2, 0.2], [0.1, 0.2], [0.9, 0.8]] v5_feed = [[0.5], [0.5], [1.2], [0.9]] with self.test_session() as sess: sess.run(init) # Skip the IVs op out = sess.run(op[:1] + op[2:], feed_dict={ counts: counts_feed, v12: v12_feed, v34: v34_feed, v5: v5_feed }) # Weights np.testing.assert_array_almost_equal( np.squeeze(out[0]), np.array([[10., 0., 0., 0., 0., 0.], [0., 0., 11., 0., 0., 0.], [0., 0., 0., 0., 0., 12.], [0., 0., 0., 0., 13., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[1], np.array([[10., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[2], np.array([[0., 0.], [11., 0.], [0., 0.], [0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[3], np.array([[0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 13., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[4], np.array([[0.], [0.], [12.], [0.]], dtype=np.float32))
def test_gather_input_scopes(self): v12 = spn.IVs(num_vars=2, num_vals=4, name="V12") v34 = spn.ContVars(num_vars=2, name="V34") s1 = spn.Sum(v12, v12, v34, (v12, [7, 3, 1, 0]), (v34, 0), name="S1") scopes_v12 = v12._compute_scope() scopes_v34 = v34._compute_scope() # Note: weights/ivs are disconnected, so None should be output these scopes = s1._gather_input_scopes(None, None, None, scopes_v12, scopes_v34, scopes_v12, scopes_v34) self.assertTupleEqual( scopes, (None, None, None, scopes_v12, scopes_v34, [ scopes_v12[7], scopes_v12[3], scopes_v12[1], scopes_v12[0] ], [scopes_v34[0]]))
def test_traversing_on_dense(self): """Compare traversal algs on dense SPN""" def fun1(node, *args): counter[0] += 1 def fun2(node, *args): counter[0] += 1 if node.is_op: return [None] * len(node.inputs) # Generate dense graph v1 = spn.IVs(num_vars=3, num_vals=2, name="IVs1") v2 = spn.IVs(num_vars=3, num_vals=2, name="IVs2") gen = spn.DenseSPNGenerator(num_decomps=2, num_subsets=3, num_mixtures=2, input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE, num_input_mixtures=None) root = gen.generate(v1, v2) spn.generate_weights(root) # Run traversal algs and count nodes counter = [0] spn.compute_graph_up_down(root, down_fun=fun2, graph_input=1) c1 = counter[0] counter = [0] spn.compute_graph_up(root, val_fun=fun1) c2 = counter[0] counter = [0] spn.traverse_graph(root, fun=fun1, skip_params=False) c3 = counter[0] # Compare self.assertEqual(c1, c3) self.assertEqual(c2, c3)
def test_compute_valid(self): """Calculating validity of Product""" v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) p1 = spn.Product((v12, [0, 5])) p2 = spn.Product((v12, [0, 3])) p3 = spn.Product((v12, [0, 5]), v34) p4 = spn.Product((v12, [0, 3]), v34) p5 = spn.Product((v12, [0, 5]), v34, (v12, 2)) self.assertTrue(p1.is_valid()) self.assertFalse(p2.is_valid()) self.assertTrue(p3.is_valid()) self.assertFalse(p4.is_valid()) self.assertFalse(p5.is_valid())
def test_compute_valid(self): """Calculating validity of PermProducts""" v12 = spn.IVs(num_vars=2, num_vals=3) v345 = spn.IVs(num_vars=3, num_vals=3) v678 = spn.ContVars(num_vars=3) v910 = spn.ContVars(num_vars=2) p1 = spn.PermProducts((v12, [0, 1]), (v12, [4, 5])) p2 = spn.PermProducts((v12, [3, 5]), (v345, [0, 1, 2])) p3 = spn.PermProducts((v345, [0, 1, 2]), (v345, [3, 4, 5]), (v345, [6, 7, 8])) p4 = spn.PermProducts((v345, [6, 8]), (v678, [0, 1])) p5 = spn.PermProducts((v678, [1]), v910) p6 = spn.PermProducts(v678, v910) p7 = spn.PermProducts((v678, [0, 1, 2])) p8 = spn.PermProducts((v910, [0]), (v910, [1])) self.assertTrue(p1.is_valid()) self.assertTrue(p2.is_valid()) self.assertTrue(p3.is_valid()) self.assertTrue(p4.is_valid()) self.assertTrue(p5.is_valid()) self.assertTrue(p6.is_valid()) self.assertTrue(p7.is_valid()) self.assertTrue(p8.is_valid()) p9 = spn.PermProducts((v12, [0, 1]), (v12, [1, 2])) p10 = spn.PermProducts((v12, [3, 4, 5]), (v345, [0]), (v345, [0, 1, 2])) p11 = spn.PermProducts((v345, [3, 5]), (v678, [0]), (v678, [0])) p12 = spn.PermProducts((v910, [1]), (v910, [1])) p13 = spn.PermProducts(v910, v910) p14 = spn.PermProducts((v12, [0]), (v12, [1])) self.assertFalse(p9.is_valid()) self.assertFalse(p10.is_valid()) self.assertFalse(p11.is_valid()) self.assertFalse(p12.is_valid()) self.assertFalse(p13.is_valid()) self.assertEqual(p14.num_prods, 1) self.assertFalse(p14.is_valid())
def test(num_vars, num_vals, rv_value, iv_value): with self.subTest(num_vars=num_vars, num_vals=num_vals, rv_value=rv_value): n = spn.IVs(num_vars=num_vars, num_vals=num_vals) op = n.get_value() op_log = n.get_log_value() with self.test_session() as sess: out = sess.run(op, feed_dict={n: rv_value}) out_log = sess.run(tf.exp(op_log), feed_dict={n: rv_value}) np.testing.assert_array_almost_equal( out, np.array(iv_value, dtype=spn.conf.dtype.as_numpy_dtype())) np.testing.assert_array_almost_equal( out_log, np.array(iv_value, dtype=spn.conf.dtype.as_numpy_dtype()))
def test_masked_weights(self): v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) v5 = spn.ContVars(num_vars=1) s = spn.SumsLayer((v12, [0, 5]), v34, (v12, [3]), v5, (v12, [0, 5]), v34, (v12, [3]), v5, num_or_size_sums=[3, 1, 3, 4, 1]) s.generate_weights(initializer=tf.initializers.random_uniform(0.0, 1.0)) with self.test_session() as sess: sess.run(s.weights.node.initialize()) weights = sess.run(s.weights.node.variable) shape = [5, 4] self.assertEqual(shape, s.weights.node.variable.shape.as_list()) [self.assertEqual(weights[row, col], 0.0) for row, col in [(0, -1), (1, 1), (1, 2), (1, 3), (2, -1), (4, 1), (4, 2), (4, 3)]] self.assertAllClose(np.sum(weights, axis=1), np.ones(5))
def test(num_vars, num_vals, rv_value, iv_value): with self.subTest(num_vars=num_vars, num_vals=num_vals, rv_value=rv_value): p = tf.placeholder(tf.int32, [None, num_vars]) n = spn.IVs(feed=p, num_vars=num_vars, num_vals=num_vals) op = n.get_value() op_log = n.get_log_value() with tf.Session() as sess: out = sess.run(op, feed_dict={p: rv_value}) out_log = sess.run(tf.exp(op_log), feed_dict={p: rv_value}) np.testing.assert_array_almost_equal( out, np.array(iv_value, dtype=spn.conf.dtype.as_numpy_dtype())) np.testing.assert_array_almost_equal( out_log, np.array(iv_value, dtype=spn.conf.dtype.as_numpy_dtype()))
def test_generte_set_errors(self): """Detecting structure errors in __generate_set""" gen = spn.DenseSPNGenerator(num_decomps=2, num_subsets=3, num_mixtures=2) v1 = spn.IVs(num_vars=2, num_vals=4) v2 = spn.ContVars(num_vars=3, name="ContVars1") v3 = spn.ContVars(num_vars=2, name="ContVars2") s1 = spn.Sum(v3, v2) n1 = spn.Concat(v2) with self.assertRaises(spn.StructureError): gen._DenseSPNGenerator__generate_set([ spn.Input(v1, [0, 3, 2, 6, 7]), spn.Input(v2, [1, 2]), spn.Input(s1, None), spn.Input(n1, None) ])
def test_generte_set(self): """Generation of sets of inputs with __generate_set""" gen = spn.DenseSPNGenerator(num_decomps=2, num_subsets=3, num_mixtures=2) v1 = spn.IVs(num_vars=2, num_vals=4) v2 = spn.ContVars(num_vars=3, name="ContVars1") v3 = spn.ContVars(num_vars=2, name="ContVars2") s1 = spn.Sum(v3) n1 = spn.Concat(v2) out = gen._DenseSPNGenerator__generate_set([ spn.Input(v1, [0, 3, 2, 6, 7]), spn.Input(v2, [1, 2]), spn.Input(s1, None), spn.Input(n1, None) ]) # scope_dict: # Scope({IVs(0x7f00cb4049b0):0}): {(IVs(0x7f00cb4049b0), 0), # (IVs(0x7f00cb4049b0), 2), # (IVs(0x7f00cb4049b0), 3)}, # Scope({IVs(0x7f00cb4049b0):1}): {(IVs(0x7f00cb4049b0), 7), # (IVs(0x7f00cb4049b0), 6)}, # Scope({ContVars1(0x7f00b7982ef0):1}): {(Concat(0x7f00cb404d68), 1), # (ContVars1(0x7f00b7982ef0), 1)}, # Scope({ContVars1(0x7f00b7982ef0):2}): {(Concat(0x7f00cb404d68), 2), # (ContVars1(0x7f00b7982ef0), 2)}, # Scope({ContVars1(0x7f00b7982ef0):0}): {(Concat(0x7f00cb404d68), 0)}, # Scope({ContVars2(0x7f00cb391eb8):0, ContVars2(0x7f00cb391eb8):1}): { # (Sum(0x7f00cb404a90), 0)}} # Since order is undetermined, we check items self.assertEqual(len(out), 6) self.assertIn(tuple(sorted([(v2, 1), (n1, 1)])), out) self.assertIn(tuple(sorted([(v2, 2), (n1, 2)])), out) self.assertIn(tuple(sorted([(n1, 0)])), out) self.assertIn(tuple(sorted([(v1, 0), (v1, 2), (v1, 3)])), out) self.assertIn(tuple(sorted([(v1, 6), (v1, 7)])), out) self.assertIn(tuple(sorted([(s1, 0)])), out)
def test_generte_set(self): """Generation of sets of inputs with __generate_set""" gen = spn.DenseSPNGenerator(num_decomps=2, num_subsets=3, num_mixtures=2) v1 = spn.IVs(num_vars=2, num_vals=4) v2 = spn.ContVars(num_vars=3, name="ContVars1") v3 = spn.ContVars(num_vars=2, name="ContVars2") s1 = spn.Sum(v3) n1 = spn.Concat(v2) out = gen._DenseSPNGenerator__generate_set([ spn.Input(v1, [0, 3, 2, 6, 7]), spn.Input(v2, [1, 2]), spn.Input(s1, None), spn.Input(n1, None) ]) # Since order is undetermined, we check items self.assertEqual(len(out), 6) self.assertIn(tuple(sorted([(v2, 1), (n1, 1)])), out) self.assertIn(tuple(sorted([(v2, 2), (n1, 2)])), out) self.assertIn(tuple(sorted([(n1, 0)])), out) self.assertIn(tuple(sorted([(v1, 0), (v1, 2), (v1, 3)])), out) self.assertIn(tuple(sorted([(v1, 6), (v1, 7)])), out) self.assertIn(tuple(sorted([(s1, 0)])), out)
def _run_op_test(self, op_fun, inputs, indices=None, ivs=None, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True): """Run a single test for a single op.""" # Preparations op_name = op_fun.__name__ device_name = '/gpu:0' if on_gpu else '/cpu:0' # Print print2( "--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, ivs=%s, inference=%s, log=%s" % (op_name, on_gpu, inputs.shape, ("No" if indices is None else "Yes"), ("No" if ivs is None else "Yes"), ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"), log), self.file) input_size = inputs.shape[1] # Compute true output true_out = self._true_output(op_fun, inputs, indices, ivs, inf_type) # Create graph tf.reset_default_graph() with tf.device(device_name): # Create input inputs_pl = spn.ContVars(num_vars=input_size) # Create IVs if ivs is None: ivs_pl = [None for _ in range(self.num_sums)] else: if op_fun is Ops.sum: ivs_pl = [ spn.IVs(num_vars=1, num_vals=input_size) for _ in range(self.num_sums) ] elif op_fun is Ops.par_sums or Ops.sums: ivs_pl = [ spn.IVs(num_vars=self.num_sums, num_vals=input_size) ] # Create ops start_time = time.time() init_ops, ops = op_fun(inputs_pl, indices, ivs_pl, self.num_sums, inf_type, log) for _ in range(self.num_ops - 1): # The tuple ensures that the next op waits for the output # of the previous op, effectively stacking the ops # but using the original input every time init_ops, ops = op_fun(inputs_pl, indices, ivs_pl, self.num_sums, inf_type, log, tf.tuple([ops])[0]) setup_time = time.time() - start_time # Get num of graph ops graph_size = len(tf.get_default_graph().get_operations()) # Run op multiple times output_correct = True with tf.Session(config=tf.ConfigProto( allow_soft_placement=False, log_device_placement=self.log_devs)) as sess: # Initialize weights of all the sum nodes in the graph start_time = time.time() init_ops.run() weights_init_time = time.time() - start_time run_times = [] # Create feed dictionary feed = {inputs_pl: inputs} if ivs is not None: for iv_pl in ivs_pl: feed[iv_pl] = ivs for n in range(self.num_runs): # Run start_time = time.time() out = sess.run(ops, feed_dict=feed) run_times.append(time.time() - start_time) # Test value try: np.testing.assert_array_almost_equal( out, (np.log(true_out) if log else true_out)) except AssertionError: output_correct = False self.test_failed = True if self.profile: # Add additional options to trace the session execution options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() out = sess.run(ops, feed_dict=feed, options=options, run_metadata=run_metadata) # Create the Timeline object, and write it to a json file fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() if not os.path.exists(self.profiles_dir): os.makedirs(self.profiles_dir) file_name = op_name file_name += ("_GPU" if on_gpu else "_CPU") file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \ spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else "_MARGINAL") if indices is not None: file_name += "_Indices" if ivs is not None: file_name += "_IVS" with open( '%s/timeline_value_%s.json' % (self.profiles_dir, file_name), 'w') as f: f.write(chrome_trace) # Return stats return OpTestResult(op_name, on_gpu, graph_size, ("No" if indices is None else "Yes"), ("No" if ivs is None else "Yes"), setup_time, weights_init_time, run_times, output_correct)
def _run_op_test(self, op_fun, input_dist='RAW', node_type=None, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True): """Run a single test for a single op.""" # Preparations op_name = op_fun.__name__ device_name = '/gpu:0' if on_gpu else '/cpu:0' # Print print2( "--> %s: on_gpu=%s, input_dist=%s, inference=%s, node_type=%s, log=%s" % (op_name, on_gpu, input_dist, ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"), ("SINGLE" if node_type == spn.DenseSPNGenerator.NodeType.SINGLE else "BLOCK" if node_type == spn.DenseSPNGenerator.NodeType.BLOCK else "LAYER"), log), self.file) train_set, train_labels, test_set, test_labels = self._data_set(op_fun) # Create graph tf.reset_default_graph() with tf.device(device_name): # Create input ivs inputs_pl = spn.IVs(num_vars=196, num_vals=2) # Create dense SPN and generate TF graph for training start_time = time.time() # Generate SPN root, latent, learning, additive_smoothing, min_additive_smoothing, \ additive_smoothing_var = op_fun(inputs_pl, self.num_decomps, self.num_subsets, self.num_mixtures, self.num_input_mixtures, self.balanced, input_dist, node_type, inf_type, log) # Add Learning Ops init_weights = spn.initialize_weights(root) reset_accumulators = learning.reset_accumulators() accumulate_updates = learning.accumulate_updates() update_spn = learning.update_spn() # Generate Testing Ops mpe_state_gen = spn.MPEState( log=log, value_inference_type=spn.InferenceType.MPE) mpe_ivs, mpe_latent = mpe_state_gen.get_state( root, inputs_pl, latent) setup_time = time.time() - start_time if on_gpu: max_bytes_used_op = tf.contrib.memory_stats.MaxBytesInUse() # Get num of SPN ops spn_size = root.get_num_nodes() # Get num of graph ops tf_size = len(tf.get_default_graph().get_operations()) # Smoothing Decay for Additive Smoothing smoothing_decay = 0.2 # Run op multiple times with tf.Session(config=tf.ConfigProto( allow_soft_placement=False, log_device_placement=self.log_devs)) as sess: # Initialize weights of the SPN start_time = time.time() init_weights.run() weights_init_time = time.time() - start_time # Reset accumulators sess.run(reset_accumulators) run_times = [] # Create feed dictionary feed = {inputs_pl: train_set, latent: train_labels} # Run Training for epoch in range(self.num_epochs): start_time = time.time() # Adjust smoothing ads = max( np.exp(-epoch * smoothing_decay) * additive_smoothing, min_additive_smoothing) sess.run(additive_smoothing_var.assign(ads)) # Run accumulate_updates sess.run(accumulate_updates, feed_dict=feed) # Update weights sess.run(update_spn) # Reset accumulators sess.run(reset_accumulators) run_times.append(time.time() - start_time) if on_gpu: memory_used = sess.run(max_bytes_used_op) else: memory_used = None if self.profile: # Add additional options to trace the session execution options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata_acc_updt = tf.RunMetadata() run_metadata_spn_updt = tf.RunMetadata() run_metadata_acc_rst = tf.RunMetadata() # Run a single epoch # Run accumulate_updates sess.run(accumulate_updates, feed_dict=feed, options=options, run_metadata=run_metadata_acc_updt) # Update weights sess.run(update_spn, options=options, run_metadata=run_metadata_spn_updt) # Reset accumulators sess.run(reset_accumulators, options=options, run_metadata=run_metadata_acc_rst) # Create the Timeline object, and write it to a json file fetched_timeline_acc_updt = timeline.Timeline( run_metadata_acc_updt.step_stats) fetched_timeline_spn_updt = timeline.Timeline( run_metadata_spn_updt.step_stats) fetched_timeline_acc_rst = timeline.Timeline( run_metadata_acc_rst.step_stats) chrome_trace_acc_updt = fetched_timeline_acc_updt.generate_chrome_trace_format( ) chrome_trace_spn_updt = fetched_timeline_spn_updt.generate_chrome_trace_format( ) chrome_trace_acc_rst = fetched_timeline_acc_rst.generate_chrome_trace_format( ) if not os.path.exists(self.profiles_dir): os.makedirs(self.profiles_dir) file_name = op_name file_name += ("_GPU_" if on_gpu else "_CPU_") file_name += input_dist # "RAW" or "MIXTURE" file_name += ( "_ SINGLE" if node_type == spn.DenseSPNGenerator.NodeType.SINGLE else "_BLOCK" if node_type == spn.DenseSPNGenerator.NodeType.BLOCK else "_LAYER") file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \ spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else "_MARGINAL") with open( '%s/timeline_%s_acc_updt.json' % (self.profiles_dir, file_name), 'w') as f: f.write(chrome_trace_acc_updt) with open( '%s/timeline_%s_spn_updt.json' % (self.profiles_dir, file_name), 'w') as f: f.write(chrome_trace_spn_updt) with open( '%s/timeline_%s_acc_rst.json' % (self.profiles_dir, file_name), 'w') as f: f.write(chrome_trace_acc_rst) # Run Testing mpe_latent_val = sess.run([mpe_latent], feed_dict={ inputs_pl: test_set, latent: np.ones((test_set.shape[0], 1)) * -1 }) result = (mpe_latent_val == test_labels) test_accuracy = np.sum(result) / test_labels.size # Return stats return OpTestResult( op_name, on_gpu, ("SINGLE" if node_type == spn.DenseSPNGenerator.NodeType.SINGLE else "BLOCK" if node_type == spn.DenseSPNGenerator.NodeType.BLOCK else "LAYER"), spn_size, tf_size, memory_used, input_dist, setup_time, weights_init_time, run_times, test_accuracy)
def _run_op_test(self, op_fun, inputs, sum_indices=None, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True, ivs=None): """Run a single test for a single op.""" # Preparations op_name = op_fun.__name__ device_name = '/gpu:0' if on_gpu else '/cpu:0' # Print print2( "--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, inference=%s, log=%s, IVs=%s" % (op_name, on_gpu, inputs.shape, ("No" if sum_indices is None else "Yes"), ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"), log, ("No" if ivs is None else "Yes")), self.file) input_size = inputs.shape[1] # Create graph tf.reset_default_graph() # Compute the true output sum_sizes = [len(ind) for ind in sum_indices] ivs_per_sum = np.split(ivs, ivs.shape[1], axis=1) if ivs is not None \ else None sum_sizes_np = self._repeat_elements(sum_sizes) true_out = self._true_out(inf_type, inputs, ivs_per_sum, sum_indices, sum_sizes, sum_sizes_np) if log: true_out = np.log(true_out) # Set up the graph with tf.device(device_name): # Create input inputs_pl = spn.ContVars(num_vars=input_size) feed_dict = {inputs_pl: inputs} if ivs is not None: if op_fun is Ops.sum: ivs_pl = [ spn.IVs(num_vars=1, num_vals=s) for s in sum_sizes_np ] ivs = ivs_per_sum elif op_fun is Ops.par_sums: ivs_pl = [ spn.IVs(num_vars=self.num_parallel, num_vals=len(ind)) for ind in sum_indices ] ivs = np.split(ivs, len(self.sum_sizes), axis=1) else: ivs = [ivs] ivs_pl = [ spn.IVs(num_vars=len(sum_sizes_np), num_vals=max(sum_sizes)) ] for iv_pl, iv in zip(ivs_pl, ivs): feed_dict[iv_pl] = iv else: ivs_pl = None # Create ops start_time = time.time() init_ops, ops = op_fun(inputs_pl, sum_indices, self.num_parallel, inf_type, log, ivs=ivs_pl) setup_time = time.time() - start_time # Get num of graph ops graph_size = len(tf.get_default_graph().get_operations()) # Run op multiple times output_correct = True with tf.Session(config=tf.ConfigProto( allow_soft_placement=False, log_device_placement=self.log_devs)) as sess: # Initialize weights of all the sum nodes in the graph start_time = time.time() init_ops.run() weights_init_time = time.time() - start_time run_times = [] # Create feed dictionary for n in range(self.num_runs): # Run start_time = time.time() out = sess.run(ops, feed_dict=feed_dict) run_times.append(time.time() - start_time) # Test value try: np.testing.assert_array_almost_equal(out, true_out) except AssertionError: output_correct = False self.test_failed = True if self.profile: # Create a suitable filename suffix fnm_suffix = op_name fnm_suffix += ("_GPU" if on_gpu else "_CPU") fnm_suffix += ("_MPE-LOG" if log else "_MPE") if inf_type == \ spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else "_MARGINAL") fnm_suffix += ("_IV" if ivs is not None else "") # Create a profiling report profile_report(sess, ops, feed_dict, self.profiles_dir, "sum_value_varying_sizes", fnm_suffix) # Return stats return OpTestResult(op_name, on_gpu, graph_size, ("Yes"), ("No" if ivs is None else "Yes"), setup_time, weights_init_time, run_times, output_correct)
def _run_network_test(self, network_fun, inputs, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True): """Run a single test for a single op.""" # Preparations op_name = network_fun.__name__ device_name = '/gpu:0' if on_gpu else '/cpu:0' # Print print2("--> %s: on_gpu=%s, inputs_shape=%s, inference=%s, log=%s" % (op_name, on_gpu, inputs.shape, ("MPE" if inf_type == spn.InferenceType.MPE else "MARGINAL"), log), self.file) # Compute true output true_out = self._true_output(network_fun, inputs, self.num_input_vals, self.num_mixtures, self.num_subsets, inf_type) # Create graph tf.reset_default_graph() with tf.device(device_name): # Create input inputs_pl = spn.IVs(num_vars=self.num_input_vars, num_vals=self.num_input_vals, name="iv_x") # Create networks, stacking one on top of the other, although each # network remains unconnected and independent of each other. start_time = time.time() root, init_network, network = \ network_fun(inputs_pl, self.num_input_vals, self.num_mixtures, self.num_subsets, inf_type, log) for _ in range(self.num_networks - 1): # The tuple ensures that the next network waits for the output # of the previous network, effectively stacking the networks # but using the original input every time root, init_network, network = \ network_fun(inputs_pl, self.num_input_vals, self.num_mixtures, self.num_subsets, inf_type, log, tf.tuple([network])[0]) setup_time = time.time() - start_time # Get num of SPN ops spn_size = root.get_num_nodes() * self.num_networks # Get num of graph ops tf_size = len(tf.get_default_graph().get_operations()) # Run op multiple times output_correct = True with tf.Session(config=tf.ConfigProto( allow_soft_placement=False, log_device_placement=self.log_devs)) as sess: # Initialize weights of all the sum node types in the graph start_time = time.time() init_network.run() weights_init_time = time.time() - start_time run_times = [] # Create feed dictionary feed = {inputs_pl: inputs} for n in range(self.num_runs): # Run start_time = time.time() out = sess.run(network, feed_dict=feed) run_times.append(time.time() - start_time) # Test value try: np.testing.assert_array_almost_equal((np.exp(out) if log else out), true_out) except AssertionError: output_correct = False self.test_failed = True if self.profile: # Add additional options to trace the session execution options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() out = sess.run(network, feed_dict=feed, options=options, run_metadata=run_metadata) # Create the Timeline object, and write it to a json file fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() if not os.path.exists(self.profiles_dir): os.makedirs(self.profiles_dir) file_name = op_name file_name += ("_GPU" if on_gpu else "_CPU") file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \ spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else "_MARGINAL") with open('%s/timeline_value_%s.json' % (self.profiles_dir, file_name), 'w') as f: f.write(chrome_trace) # Return stats return OpTestResult(op_name, on_gpu, spn_size, tf_size, setup_time, weights_init_time, run_times, output_correct)
def test_compute_mpe_value(self): """Calculating MPE value of Sum""" def test(values, ivs, weights, feed, output): with self.subTest(values=values, ivs=ivs, weights=weights, feed=feed): n = spn.Sum(*values, ivs=ivs) n.generate_weights(weights) op = n.get_value(spn.InferenceType.MPE) op_log = n.get_log_value(spn.InferenceType.MPE) with tf.Session() as sess: spn.initialize_weights(n).run() out = sess.run(op, feed_dict=feed) out_log = sess.run(tf.exp(op_log), feed_dict=feed) np.testing.assert_array_almost_equal( out, np.array(output, dtype=spn.conf.dtype.as_numpy_dtype())) np.testing.assert_array_almost_equal( out_log, np.array(output, dtype=spn.conf.dtype.as_numpy_dtype())) # Create inputs v1 = spn.ContVars(num_vars=3, name="ContVars1") v2 = spn.ContVars(num_vars=1, name="ContVars2") ivs = spn.IVs(num_vars=1, num_vals=4) # Multiple inputs, multi-element batch test([v1, v2], None, [0.1, 0.2, 0.4, 0.3], {v1: [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], v2: [[0.7], [0.8]]}, [[0.7 * 0.3], [0.8 * 0.3]]) test([(v1, [0, 2]), (v2, [0])], None, [0.1, 0.5, 0.4], {v1: [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], v2: [[0.7], [0.8]]}, [[0.7 * 0.4], [0.8 * 0.4]]) test([v1, v2], ivs, [0.1, 0.2, 0.4, 0.3], {v1: [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], v2: [[0.7], [0.8]], ivs: [[2], [-1]]}, [[0.3 * 0.4], [0.8 * 0.3]]) test([(v1, [0, 2]), (v2, [0])], (ivs, [0, 1, 2]), [0.1, 0.5, 0.4], {v1: [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], v2: [[0.7], [0.8]], ivs: [[1], [-1]]}, [[0.3 * 0.5], [0.8 * 0.4]]) # Single input with 1 value, multi-element batch test([v2], None, [0.5], {v2: [[0.1], [0.2]]}, [[0.1 * 1.0], [0.2 * 1.0]]) test([(v1, [1])], None, [0.5], {v1: [[0.01, 0.1, 0.03], [0.02, 0.2, 0.04]]}, [[0.1 * 1.0], [0.2 * 1.0]]) test([v2], (ivs, 0), [0.5], {v2: [[0.1], [0.2]], ivs: [[1], [-1]]}, [[0.0], [0.2 * 1.0]]) test([(v1, [1])], (ivs, 1), [0.5], {v1: [[0.01, 0.1, 0.03], [0.02, 0.2, 0.04]], ivs: [[1], [-1]]}, [[0.1 * 1.0], [0.2 * 1.0]]) # Multiple inputs, single-element batch test([v1, v2], None, [0.1, 0.2, 0.4, 0.3], {v1: [[0.1, 0.2, 0.3]], v2: [[0.7]]}, [[0.7 * 0.3]]) test([(v1, [0, 2]), (v2, [0])], None, [0.1, 0.5, 0.4], {v1: [[0.1, 0.2, 0.3]], v2: [[0.7]]}, [[0.7 * 0.4]]) test([v1, v2], ivs, [0.1, 0.2, 0.4, 0.3], {v1: [[0.1, 0.2, 0.3]], v2: [[0.7]], ivs: [[0]]}, [[0.1 * 0.1]]) test([(v1, [0, 2]), (v2, [0])], (ivs, [1, 2, 3]), [0.1, 0.5, 0.4], {v1: [[0.1, 0.2, 0.3]], v2: [[0.7]], ivs: [[-1]]}, [[0.7 * 0.4]]) # Single input with 1 value, single-element batch test([v2], None, [0.5], {v2: [[0.1]]}, [[0.1 * 1.0]]) test([(v1, [1])], None, [0.5], {v1: [[0.01, 0.1, 0.03]]}, [[0.1 * 1.0]]) test([v2], (ivs, [1]), [0.5], {v2: [[0.1]], ivs: [[-1]]}, [[0.1 * 1.0]]) test([(v1, [1])], (ivs, [1]), [0.5], {v1: [[0.01, 0.1, 0.03]], ivs: [[0]]}, [[0.0]])
def test_compute_mpe_path_ivs(self): v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) v5 = spn.ContVars(num_vars=1) s = spn.Sum((v12, [0, 5]), v34, (v12, [3]), v5) iv = s.generate_ivs() w = s.generate_weights() counts = tf.placeholder(tf.float32, shape=(None, 1)) op = s._compute_mpe_path(tf.identity(counts), w.get_value(), iv.get_value(), v12.get_value(), v34.get_value(), v12.get_value(), v5.get_value()) init = w.initialize() counts_feed = [[10], [11], [12], [13], [14], [15], [16], [17]] v12_feed = [[0, 1], [1, 1], [0, 0], [3, 3], [0, 1], [1, 1], [0, 0], [3, 3]] v34_feed = [[0.1, 0.2], [1.2, 0.2], [0.1, 0.2], [0.9, 0.8], [0.1, 0.2], [1.2, 0.2], [0.1, 0.2], [0.9, 0.8]] v5_feed = [[0.5], [0.5], [1.2], [0.9], [0.5], [0.5], [1.2], [0.9]] ivs_feed = [[-1], [-1], [-1], [-1], [1], [2], [3], [1]] with tf.Session() as sess: sess.run(init) # Skip the IVs op out = sess.run(op, feed_dict={counts: counts_feed, iv: ivs_feed, v12: v12_feed, v34: v34_feed, v5: v5_feed}) # Weights np.testing.assert_array_almost_equal( out[0], np.array([[10., 0., 0., 0., 0., 0.], [0., 0., 11., 0., 0., 0.], [0., 0., 0., 0., 0., 12.], [0., 0., 0., 0., 13., 0.], [0., 14., 0., 0., 0., 0.], [0., 0., 15., 0., 0., 0.], [0., 0., 0., 16., 0., 0.], [17., 0., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[1], np.array([[10., 0., 0., 0., 0., 0.], [0., 0., 11., 0., 0., 0.], [0., 0., 0., 0., 0., 12.], [0., 0., 0., 0., 13., 0.], [0., 14., 0., 0., 0., 0.], [0., 0., 15., 0., 0., 0.], [0., 0., 0., 16., 0., 0.], [17., 0., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[2], np.array([[10., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 14., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [17., 0., 0., 0., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[3], np.array([[0., 0.], [11., 0.], [0., 0.], [0., 0.], [0., 0.], [15., 0.], [0., 16.], [0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[4], np.array([[0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 13., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]], dtype=np.float32)) np.testing.assert_array_almost_equal( out[5], np.array([[0.], [0.], [12.], [0.], [0.], [0.], [0.], [0.]], dtype=np.float32))
def test_compute_scope(self): """Calculating scope of Sums""" # Create a graph v12 = spn.IVs(num_vars=2, num_vals=4, name="V12") v34 = spn.ContVars(num_vars=3, name="V34") scopes_per_node = { v12: [spn.Scope(v12, 0), spn.Scope(v12, 0), spn.Scope(v12, 0), spn.Scope(v12, 0), spn.Scope(v12, 1), spn.Scope(v12, 1), spn.Scope(v12, 1), spn.Scope(v12, 1)], v34: [spn.Scope(v34, 0), spn.Scope(v34, 1), spn.Scope(v34, 2)] } def generate_scopes_from_inputs(node, inputs, num_or_size_sums, ivs=False): # Create a flat list of scopes, where the scope elements of a single input # node are subsequent in the list flat_scopes = [] size = 0 for inp in inputs: if isinstance(inp, tuple) and inp[1]: input_indices = [inp[1]] if isinstance(inp[1], int) else inp[1] for i in input_indices: flat_scopes.append(scopes_per_node[inp[0]][i]) size += len(input_indices) elif not isinstance(inp, tuple): flat_scopes.extend(scopes_per_node[inp]) size += len(scopes_per_node[inp]) else: flat_scopes.extend(scopes_per_node[inp[0]]) size += len(scopes_per_node[inp[0]]) if isinstance(num_or_size_sums, int): num_or_size_sums = num_or_size_sums * [size // num_or_size_sums] new_scope = [] offset = 0 # For each sum generate the scope based on its size for i, s in enumerate(num_or_size_sums): scope = flat_scopes[offset] for j in range(1, s): scope |= flat_scopes[j + offset] offset += s if ivs: scope |= spn.Scope(node.ivs.node, i) new_scope.append(scope) scopes_per_node[node] = new_scope def sums_layer_and_test(inputs, num_or_size_sums, name, ivs=False): """ Create a sums layer, generate its correct scope and test """ sums_layer = spn.SumsLayer(*inputs, num_or_size_sums=num_or_size_sums, name=name) if ivs: sums_layer.generate_ivs() generate_scopes_from_inputs(sums_layer, inputs, num_or_size_sums, ivs=ivs) self.assertListEqual(sums_layer.get_scope(), scopes_per_node[sums_layer]) return sums_layer def concat_layer_and_test(inputs, name): """ Create a concat node, generate its scopes and assert whether it is correct """ scope = [] for inp in inputs: if isinstance(inp, tuple): indices = inp[1] if isinstance(inp[1], int): indices = [inp[1]] for i in indices: scope.append(scopes_per_node[inp[0]][i]) else: scope.extend(scopes_per_node[inp]) concat = spn.Concat(*inputs, name=name) self.assertListEqual(concat.get_scope(), scope) scopes_per_node[concat] = scope return concat ss1 = sums_layer_and_test( [(v12, [0, 1, 2, 3]), (v12, [1, 2, 5, 6]), (v12, [4, 5, 6, 7])], 3, "Ss1", ivs=True) ss2 = sums_layer_and_test([(v12, [6, 7]), (v34, 0)], num_or_size_sums=[1, 2], name="Ss2") ss3 = sums_layer_and_test([(v12, [3, 7]), (v34, 1), (v12, [4, 5, 6]), v34], num_or_size_sums=[1, 2, 2, 2, 2], name="Ss3") s1 = sums_layer_and_test([(v34, [1, 2])], num_or_size_sums=1, name="S1", ivs=True) concat_layer_and_test([(ss1, [0, 2]), (ss2, 0)], name="N1") concat_layer_and_test([(ss1, 1), ss3, s1], name="N2") n = concat_layer_and_test([(ss1, 0), ss2, (ss3, [0, 1]), s1], name="N3") sums_layer_and_test([(ss1, [1, 2]), ss2], num_or_size_sums=[2, 1, 1], name="Ss4") sums_layer_and_test([(ss1, [0, 2]), (n, [0, 1]), (ss3, [4, 2])], num_or_size_sums=[3, 2, 1], name="Ss5")
def test_compute_valid(self): """Calculating validity of Sums""" # Without IVs v12 = spn.IVs(num_vars=2, num_vals=4) v34 = spn.ContVars(num_vars=2) s1 = spn.SumsLayer((v12, [0, 1, 2, 3]), (v12, [0, 1, 2, 3]), (v12, [0, 1, 2, 3]), num_or_size_sums=3) self.assertTrue(s1.is_valid()) s2 = spn.SumsLayer((v12, [0, 1, 2, 4]), name="S2") s2b = spn.SumsLayer((v12, [0, 1, 2, 4]), num_or_size_sums=[3, 1], name="S2b") self.assertTrue(s2b.is_valid()) self.assertFalse(s2.is_valid()) s3 = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]), (v34, 0), num_or_size_sums=2) s3b = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]), (v34, 0), num_or_size_sums=[4, 1, 4, 1]) s3c = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]), (v34, 0), num_or_size_sums=[4, 1, 5]) self.assertFalse(s3.is_valid()) self.assertTrue(s3b.is_valid()) self.assertFalse(s3c.is_valid()) p1 = spn.Product((v12, [0, 5]), (v34, 0)) p2 = spn.Product((v12, [1, 6]), (v34, 0)) p3 = spn.Product((v12, [1, 6]), (v34, 1)) s4 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s5 = spn.SumsLayer(p1, p3, p1, p3, p1, p3, num_or_size_sums=3) s6 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[2, 1]) s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[1, 2]) s8 = spn.SumsLayer(p1, p2, p3, p2, p1, num_or_size_sums=[2, 1, 2]) self.assertTrue(s4.is_valid()) self.assertFalse(s5.is_valid()) # p1 and p3 different scopes self.assertTrue(s6.is_valid()) self.assertFalse(s7.is_valid()) # p2 and p3 different scopes self.assertTrue(s8.is_valid()) # With IVS s6 = spn.SumsLayer(p1, p2, p1, p2, p1, p2, num_or_size_sums=3) s6.generate_ivs() self.assertTrue(s6.is_valid()) s7 = spn.SumsLayer(p1, p2, num_or_size_sums=1) s7.set_ivs(spn.ContVars(num_vars=2)) self.assertFalse(s7.is_valid()) s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=3) s7.set_ivs(spn.ContVars(num_vars=3)) self.assertTrue(s7.is_valid()) s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[2, 1]) s7.set_ivs(spn.ContVars(num_vars=3)) self.assertFalse(s7.is_valid()) s8 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s8.set_ivs(spn.IVs(num_vars=3, num_vals=2)) with self.assertRaises(spn.StructureError): s8.is_valid() s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_ivs(spn.ContVars(num_vars=2)) with self.assertRaises(spn.StructureError): s9.is_valid() s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_ivs(spn.ContVars(num_vars=3)) with self.assertRaises(spn.StructureError): s9.is_valid() s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s9.set_ivs(spn.IVs(num_vars=1, num_vals=4)) self.assertTrue(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_ivs(spn.IVs(num_vars=1, num_vals=4)) self.assertTrue(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_ivs(spn.IVs(num_vars=2, num_vals=2)) self.assertFalse(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s9.set_ivs(spn.IVs(num_vars=2, num_vals=2)) self.assertTrue(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 2, 1]) s9.set_ivs(spn.IVs(num_vars=2, num_vals=2)) self.assertFalse(s9.is_valid()) s10 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s10.set_ivs((v12, [0, 3, 5, 7])) self.assertTrue(s10.is_valid()) s10 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 2, 1]) s10.set_ivs((v12, [0, 3, 5, 7])) self.assertFalse(s10.is_valid())