def test_masked_weights(self): v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4) v34 = spn.RawLeaf(num_vars=2) v5 = spn.RawLeaf(num_vars=1) s = spn.SumsLayer((v12, [0, 5]), v34, (v12, [3]), v5, (v12, [0, 5]), v34, (v12, [3]), v5, num_or_size_sums=[3, 1, 3, 4, 1]) s.generate_weights( initializer=tf.initializers.random_uniform(0.0, 1.0)) with self.test_session() as sess: sess.run(s.weights.node.initialize()) weights = sess.run(s.weights.node.variable) shape = [5, 4] self.assertEqual(shape, s.weights.node.variable.shape.as_list()) [ self.assertEqual(weights[row, col], 0.0) for row, col in [(0, -1), (1, 1), (1, 2), (1, 3), (2, -1), (4, 1), (4, 2), (4, 3)] ] self.assertAllClose(np.sum(weights, axis=1), np.ones(5))
def sums_layer_and_test(inputs, num_or_size_sums, name, ivs=False): """ Create a sums layer, generate its correct scope and test """ sums_layer = spn.SumsLayer(*inputs, num_or_size_sums=num_or_size_sums, name=name) if ivs: sums_layer.generate_ivs() generate_scopes_from_inputs(sums_layer, inputs, num_or_size_sums, ivs=ivs) self.assertListEqual(sums_layer.get_scope(), scopes_per_node[sums_layer]) return sums_layer
def build_sumslayer_common(self, feed_dict, input_tuples, ivs, sum_sizes, weights, root_weights): sumslayer = spn.SumsLayer(*input_tuples, num_or_size_sums=sum_sizes) if ivs: ivs_nodes = [sumslayer.generate_ivs()] feed_dict[ivs_nodes[0]] = np.stack(ivs, axis=1) else: ivs_nodes = [] mask = sumslayer._build_mask() weights_padded = np.zeros(mask.size) weights_padded[mask.ravel()] = weights weight_node = sumslayer.generate_weights( initializer=tf.initializers.constant(weights_padded)) # Connect a single sum to group outcomes root = spn.SumsLayer(sumslayer, num_or_size_sums=1) root.generate_weights(initializer=tf.initializers.constant(root_weights)) init = spn.initialize_weights(root) return init, ivs_nodes, root, weight_node
def sums_layer(inputs, sum_indices, repetitions, inf_type, log=False, ivs=None): """ Creates the graph using a SumsLayer node """ repeated_inputs = [] repeated_sum_sizes = [] for ind in sum_indices: repeated_inputs.extend([(inputs, ind) for _ in range(repetitions)]) repeated_sum_sizes.extend([len(ind) for _ in range(repetitions)]) sums_layer = spn.SumsLayer(*repeated_inputs, n_sums_or_sizes=repeated_sum_sizes) sums_layer.generate_weights() if ivs: sums_layer.set_ivs(*ivs) root, value_op = Ops._build_root_and_value(inf_type, log, [sums_layer]) return spn.initialize_weights(root), value_op
def _build_op(self, inputs, placeholders, conf): # TODO make sure the ivs are correct sum_indices, weights, ivs = inputs.indices, inputs.weights, None log, inf_type = conf.log, conf.inf_type repeated_inputs = [] repeated_sum_sizes = [] offset = 0 for ind in sum_indices: # Indices are given by looking at the sizes of the sums size = len(ind) repeated_inputs.extend([(placeholders[0], ind) for _ in range(inputs.num_parallel)]) repeated_sum_sizes.extend( [size for _ in range(inputs.num_parallel)]) offset += size # Globally configure to add up the sums before passing on the values to children spn.conf.sumslayer_count_sum_strategy = self.sum_count_strategy sums_layer = spn.SumsLayer(*repeated_inputs, num_or_size_sums=repeated_sum_sizes) weight_node = self._generate_weights(sums_layer, weights) if ivs: sums_layer.set_ivs(*ivs) # Connect a single sum to group outcomes root = spn.Sum(sums_layer) self._generate_weights(root) # Then build MPE path Ops mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=log) mpe_path_gen.get_mpe_path(root) path_op = [ tf.tuple([ mpe_path_gen.counts[weight_node], mpe_path_gen.counts[placeholders[0]] ])[0] ] return path_op, self._initialize_from(root)
def test_compute_valid(self): """Calculating validity of Sums""" # Without IndicatorLeaf v12 = spn.IndicatorLeaf(num_vars=2, num_vals=4) v34 = spn.RawLeaf(num_vars=2) s1 = spn.SumsLayer((v12, [0, 1, 2, 3]), (v12, [0, 1, 2, 3]), (v12, [0, 1, 2, 3]), num_or_size_sums=3) self.assertTrue(s1.is_valid()) s2 = spn.SumsLayer((v12, [0, 1, 2, 4]), name="S2") s2b = spn.SumsLayer((v12, [0, 1, 2, 4]), num_or_size_sums=[3, 1], name="S2b") self.assertTrue(s2b.is_valid()) self.assertFalse(s2.is_valid()) s3 = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]), (v34, 0), num_or_size_sums=2) s3b = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]), (v34, 0), num_or_size_sums=[4, 1, 4, 1]) s3c = spn.SumsLayer((v12, [0, 1, 2, 3]), (v34, 0), (v12, [0, 1, 2, 3]), (v34, 0), num_or_size_sums=[4, 1, 5]) self.assertFalse(s3.is_valid()) self.assertTrue(s3b.is_valid()) self.assertFalse(s3c.is_valid()) p1 = spn.Product((v12, [0, 5]), (v34, 0)) p2 = spn.Product((v12, [1, 6]), (v34, 0)) p3 = spn.Product((v12, [1, 6]), (v34, 1)) s4 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s5 = spn.SumsLayer(p1, p3, p1, p3, p1, p3, num_or_size_sums=3) s6 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[2, 1]) s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[1, 2]) s8 = spn.SumsLayer(p1, p2, p3, p2, p1, num_or_size_sums=[2, 1, 2]) self.assertTrue(s4.is_valid()) self.assertFalse(s5.is_valid()) # p1 and p3 different scopes self.assertTrue(s6.is_valid()) self.assertFalse(s7.is_valid()) # p2 and p3 different scopes self.assertTrue(s8.is_valid()) # With IVS s6 = spn.SumsLayer(p1, p2, p1, p2, p1, p2, num_or_size_sums=3) s6.generate_latent_indicators() self.assertTrue(s6.is_valid()) s7 = spn.SumsLayer(p1, p2, num_or_size_sums=1) s7.set_latent_indicators(spn.RawLeaf(num_vars=2)) self.assertFalse(s7.is_valid()) s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=3) s7.set_latent_indicators(spn.RawLeaf(num_vars=3)) self.assertTrue(s7.is_valid()) s7 = spn.SumsLayer(p1, p2, p3, num_or_size_sums=[2, 1]) s7.set_latent_indicators(spn.RawLeaf(num_vars=3)) self.assertFalse(s7.is_valid()) s8 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s8.set_latent_indicators(spn.IndicatorLeaf(num_vars=3, num_vals=2)) with self.assertRaises(spn.StructureError): s8.is_valid() s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_latent_indicators(spn.RawLeaf(num_vars=2)) with self.assertRaises(spn.StructureError): s9.is_valid() s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_latent_indicators(spn.RawLeaf(num_vars=3)) with self.assertRaises(spn.StructureError): s9.is_valid() s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=1, num_vals=4)) self.assertTrue(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=1, num_vals=4)) self.assertTrue(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 3]) s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2)) self.assertFalse(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2)) self.assertTrue(s9.is_valid()) s9 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 2, 1]) s9.set_latent_indicators(spn.IndicatorLeaf(num_vars=2, num_vals=2)) self.assertFalse(s9.is_valid()) s10 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=2) s10.set_latent_indicators((v12, [0, 3, 5, 7])) self.assertTrue(s10.is_valid()) s10 = spn.SumsLayer(p1, p2, p1, p2, num_or_size_sums=[1, 2, 1]) s10.set_latent_indicators((v12, [0, 3, 5, 7])) self.assertFalse(s10.is_valid())
def test_generate_spn(self, num_decomps, num_subsets, num_mixtures, num_input_mixtures, input_dims, input_dist, balanced, node_type, log_weights): """A generic test for DenseSPNGenerator.""" if input_dist == spn.DenseSPNGenerator.InputDist.RAW \ and num_input_mixtures != 1: # Redundant test case, so just return return # Input parameters num_inputs = input_dims[0] num_vars = input_dims[1] num_vals = 2 printc("\n- num_inputs: %s" % num_inputs) printc("- num_vars: %s" % num_vars) printc("- num_vals: %s" % num_vals) printc("- num_decomps: %s" % num_decomps) printc("- num_subsets: %s" % num_subsets) printc("- num_mixtures: %s" % num_mixtures) printc("- input_dist: %s" % ("MIXTURE" if input_dist == spn.DenseSPNGenerator.InputDist.MIXTURE else "RAW")) printc("- balanced: %s" % balanced) printc("- num_input_mixtures: %s" % num_input_mixtures) printc("- node_type: %s" % ("SINGLE" if node_type == spn.DenseSPNGenerator.NodeType.SINGLE else "BLOCK" if node_type == spn.DenseSPNGenerator.NodeType.BLOCK else "LAYER")) printc("- log_weights: %s" % log_weights) # Inputs inputs = [ spn.IVs(num_vars=num_vars, num_vals=num_vals, name=("IVs_%d" % (i + 1))) for i in range(num_inputs) ] gen = spn.DenseSPNGenerator(num_decomps=num_decomps, num_subsets=num_subsets, num_mixtures=num_mixtures, input_dist=input_dist, balanced=balanced, num_input_mixtures=num_input_mixtures, node_type=node_type) # Generate Sub-SPNs sub_spns = [ gen.generate(*inputs, root_name=("sub_root_%d" % (i + 1))) for i in range(3) ] # Generate random weights for the first sub-SPN with tf.name_scope("Weights"): spn.generate_weights(sub_spns[0], tf.initializers.random_uniform(0.0, 1.0), log=log_weights) # Initialize weights of the first sub-SPN sub_spn_init = spn.initialize_weights(sub_spns[0]) # Testing validity of the first sub-SPN self.assertTrue(sub_spns[0].is_valid()) # Generate value ops of the first sub-SPN sub_spn_v = sub_spns[0].get_value() sub_spn_v_log = sub_spns[0].get_log_value() # Generate path ops of the first sub-SPN sub_spn_mpe_path_gen = spn.MPEPath(log=False) sub_spn_mpe_path_gen_log = spn.MPEPath(log=True) sub_spn_mpe_path_gen.get_mpe_path(sub_spns[0]) sub_spn_mpe_path_gen_log.get_mpe_path(sub_spns[0]) sub_spn_path = [sub_spn_mpe_path_gen.counts[inp] for inp in inputs] sub_spn_path_log = [ sub_spn_mpe_path_gen_log.counts[inp] for inp in inputs ] # Collect all weight nodes of the first sub-SPN sub_spn_weight_nodes = [] def fun(node): if node.is_param: sub_spn_weight_nodes.append(node) spn.traverse_graph(sub_spns[0], fun=fun) # Generate an upper-SPN over sub-SPNs products_lower = [] for sub_spn in sub_spns: products_lower.append([v.node for v in sub_spn.values]) num_top_mixtures = [2, 1, 3] sums_lower = [] for prods, num_top_mix in zip(products_lower, num_top_mixtures): if node_type == spn.DenseSPNGenerator.NodeType.SINGLE: sums_lower.append( [spn.Sum(*prods) for _ in range(num_top_mix)]) elif node_type == spn.DenseSPNGenerator.NodeType.BLOCK: sums_lower.append([spn.ParSums(*prods, num_sums=num_top_mix)]) else: sums_lower.append([ spn.SumsLayer(*prods * num_top_mix, num_or_size_sums=num_top_mix) ]) # Generate upper-SPN root = gen.generate(*list(itertools.chain(*sums_lower)), root_name="root") # Generate random weights for the SPN with tf.name_scope("Weights"): spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0), log=log_weights) # Initialize weight of the SPN spn_init = spn.initialize_weights(root) # Testing validity of the SPN self.assertTrue(root.is_valid()) # Generate value ops of the SPN spn_v = root.get_value() spn_v_log = root.get_log_value() # Generate path ops of the SPN spn_mpe_path_gen = spn.MPEPath(log=False) spn_mpe_path_gen_log = spn.MPEPath(log=True) spn_mpe_path_gen.get_mpe_path(root) spn_mpe_path_gen_log.get_mpe_path(root) spn_path = [spn_mpe_path_gen.counts[inp] for inp in inputs] spn_path_log = [spn_mpe_path_gen_log.counts[inp] for inp in inputs] # Collect all weight nodes in the SPN spn_weight_nodes = [] def fun(node): if node.is_param: spn_weight_nodes.append(node) spn.traverse_graph(root, fun=fun) # Create a session with self.test_session() as sess: # Initializing weights sess.run(sub_spn_init) sess.run(spn_init) # Generate input feed feed = np.array( list( itertools.product(range(num_vals), repeat=(num_inputs * num_vars)))) batch_size = feed.shape[0] feed_dict = {} for inp, f in zip(inputs, np.split(feed, num_inputs, axis=1)): feed_dict[inp] = f # Compute all values and paths of sub-SPN sub_spn_out = sess.run(sub_spn_v, feed_dict=feed_dict) sub_spn_out_log = sess.run(tf.exp(sub_spn_v_log), feed_dict=feed_dict) sub_spn_out_path = sess.run(sub_spn_path, feed_dict=feed_dict) sub_spn_out_path_log = sess.run(sub_spn_path_log, feed_dict=feed_dict) # Compute all values and paths of the complete SPN spn_out = sess.run(spn_v, feed_dict=feed_dict) spn_out_log = sess.run(tf.exp(spn_v_log), feed_dict=feed_dict) spn_out_path = sess.run(spn_path, feed_dict=feed_dict) spn_out_path_log = sess.run(spn_path_log, feed_dict=feed_dict) # Test if partition function of the sub-SPN and of the # complete SPN is 1.0 self.assertAlmostEqual(sub_spn_out.sum(), 1.0, places=6) self.assertAlmostEqual(sub_spn_out_log.sum(), 1.0, places=6) self.assertAlmostEqual(spn_out.sum(), 1.0, places=6) self.assertAlmostEqual(spn_out_log.sum(), 1.0, places=6) # Test if the sum of counts for each value of each variable # (6 variables, with 2 values each) = batch-size / num-vals self.assertEqual( np.sum(np.hstack(sub_spn_out_path), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals) self.assertEqual( np.sum(np.hstack(sub_spn_out_path_log), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals) self.assertEqual( np.sum(np.hstack(spn_out_path), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals) self.assertEqual( np.sum(np.hstack(spn_out_path_log), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals)