def test_group_initialization(self): """Group initialization of weights nodes""" v1 = spn.IVs(num_vars=1, num_vals=2) v2 = spn.IVs(num_vars=1, num_vals=4) v3 = spn.IVs(num_vars=1, num_vals=2) v4 = spn.IVs(num_vars=1, num_vals=2) # Sum s1 = spn.Sum(v1) s1.generate_weights(tf.initializers.constant([0.2, 0.3])) s2 = spn.Sum(v2) s2.generate_weights(tf.initializers.constant(5)) # ParSums s3 = spn.ParSums(*[v3, v4], num_sums=2) s3.generate_weights( tf.initializers.constant([0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1])) s4 = spn.ParSums(*[v1, v2, v3, v4], num_sums=3) s4.generate_weights(tf.initializers.constant(2.0)) # Product p = spn.Product(s1, s2, s3, s4) init = spn.initialize_weights(p) with self.test_session() as sess: sess.run([init]) val1 = sess.run(s1.weights.node.get_value()) val2 = sess.run(s2.weights.node.get_value()) val3 = sess.run(s3.weights.node.get_value()) val4 = sess.run(s4.weights.node.get_value()) val1_log = sess.run(tf.exp(s1.weights.node.get_log_value())) val2_log = sess.run(tf.exp(s2.weights.node.get_log_value())) val3_log = sess.run(tf.exp(s3.weights.node.get_log_value())) val4_log = sess.run(tf.exp(s4.weights.node.get_log_value())) self.assertEqual(val1.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val2.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val3.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val4.dtype, spn.conf.dtype.as_numpy_dtype()) np.testing.assert_array_almost_equal(val1, [[0.4, 0.6]]) np.testing.assert_array_almost_equal(val2, [[0.25, 0.25, 0.25, 0.25]]) np.testing.assert_array_almost_equal( val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]) np.testing.assert_array_almost_equal( val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10]) self.assertEqual(val1_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val2_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val3_log.dtype, spn.conf.dtype.as_numpy_dtype()) self.assertEqual(val4_log.dtype, spn.conf.dtype.as_numpy_dtype()) np.testing.assert_array_almost_equal(val1_log, [[0.4, 0.6]]) np.testing.assert_array_almost_equal(val2_log, [[0.25, 0.25, 0.25, 0.25]]) np.testing.assert_array_almost_equal( val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]) np.testing.assert_array_almost_equal( val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10])
def par_sums(inputs, indices, ivs, num_sums, inf_type, log=True, output=None): if indices is None: inputs = [inputs] else: inputs = [(inputs, indices)] # Generate a single ParSums node, modeling 'num_sums' sum nodes # within, connecting it to inputs and ivs s = spn.ParSums(*inputs, num_sums=num_sums, ivs=ivs[0]) # Generate weights of the ParSums node s.generate_weights() # Connect the ParSums nodes to a single root Sum node and generate # its weights root = spn.Sum(s) root.generate_weights() if log: value_op = root.get_log_value(inference_type=inf_type) else: value_op = root.get_value(inference_type=inf_type) return spn.initialize_weights(root), value_op
def poons_multi(inputs, num_vals, num_mixtures, num_subsets, inf_type, log=False, output=None): # Build a POON-like network with multi-op nodes subsets = [ spn.ParSums((inputs, list(range(i * num_vals, (i + 1) * num_vals))), num_sums=num_mixtures) for i in range(num_subsets) ] products = spn.PermProducts(*subsets) root = spn.Sum(products, name="root") # Generate dense SPN and all weights in the network spn.generate_weights(root) # Generate path ops based on inf_type and log if log: mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True) else: mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=False) mpe_path_gen.get_mpe_path(root) path_ops = [ mpe_path_gen.counts[inp] for inp in (inputs if isinstance(inputs, list) else [inputs]) ] return root, spn.initialize_weights(root), path_ops
def par_sums(inputs, indices, ivs, num_sums, inf_type=None, log=True, output=None): if indices is None: inputs = [inputs] else: inputs = [(inputs, indices)] # Generate a single ParSums node, modeling 'num_sums' sum nodes # within, connecting it to inputs and ivs s = spn.ParSums(*inputs, num_sums=num_sums, ivs=ivs[-1]) # Generate weights of the ParSums node weights = s.generate_weights() # Connect the ParSums nodes to a single root Sum node and generate # its weights root = spn.Sum(s) root.generate_weights() if log: mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True) else: mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=False) mpe_path_gen.get_mpe_path(root) path_op = [mpe_path_gen.counts[weights]] return spn.initialize_weights(root), path_op
def _build_op(self, inputs, placeholders, conf): """ Creates the graph using only ParSum nodes """ # TODO make sure the ivs are correct sum_indices, weights, ivs = inputs.indices, inputs.weights, None log, inf_type = conf.log, conf.inf_type weights = np.split( weights, np.cumsum([len(ind) * inputs.num_parallel for ind in sum_indices])[:-1]) parallel_sum_nodes = [] for ind in sum_indices: parallel_sum_nodes.append( spn.ParSums((placeholders[0], ind), num_sums=inputs.num_parallel)) weight_nodes = [ self._generate_weights(node, w.tolist()) for node, w in zip(parallel_sum_nodes, weights) ] if ivs: [s.set_ivs(iv) for s, iv in zip(parallel_sum_nodes, ivs)] root = spn.Sum(*parallel_sum_nodes) self._generate_weights(root) mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=log) mpe_path_gen.get_mpe_path(root) path_op = [mpe_path_gen.counts[w] for w in weight_nodes] input_counts = [mpe_path_gen.counts[inp] for inp in placeholders] return tf.tuple( path_op + input_counts)[:len(path_op)], self._initialize_from(root)
def poons_multi(inputs, num_vals, num_mixtures, num_subsets, inf_type, log=False, output=None): # Build a POON-like network with multi-op nodes subsets = [spn.ParSums((inputs, list(range(i*num_vals, (i+1)*num_vals))), num_sums=num_mixtures) for i in range(num_subsets)] products = spn.PermProducts(*subsets) root = spn.Sum(products, name="root") # Generate dense SPN and all weights in the network spn.generate_weights(root) # Generate value ops based on inf_type and log if log: value_op = root.get_log_value(inference_type=inf_type) else: value_op = root.get_value(inference_type=inf_type) return root, spn.initialize_weights(root), value_op
def par_sums(inputs, sum_indices, repetitions, inf_type, log=False, ivs=None): """ Creates the graph using only ParSum nodes """ parallel_sum_nodes = [] for ind in sum_indices: parallel_sum_nodes.append( spn.ParSums((inputs, ind), num_sums=repetitions)) [s.generate_weights() for s in parallel_sum_nodes] if ivs: [s.set_ivs(iv) for s, iv in zip(parallel_sum_nodes, ivs)] root, value_op = Ops._build_root_and_value(inf_type, log, parallel_sum_nodes) return spn.initialize_weights(root), value_op
def test_generate_spn(self, num_decomps, num_subsets, num_mixtures, num_input_mixtures, input_dims, input_dist, balanced, node_type, log_weights): """A generic test for DenseSPNGenerator.""" if input_dist == spn.DenseSPNGenerator.InputDist.RAW \ and num_input_mixtures != 1: # Redundant test case, so just return return # Input parameters num_inputs = input_dims[0] num_vars = input_dims[1] num_vals = 2 printc("\n- num_inputs: %s" % num_inputs) printc("- num_vars: %s" % num_vars) printc("- num_vals: %s" % num_vals) printc("- num_decomps: %s" % num_decomps) printc("- num_subsets: %s" % num_subsets) printc("- num_mixtures: %s" % num_mixtures) printc("- input_dist: %s" % ("MIXTURE" if input_dist == spn.DenseSPNGenerator.InputDist.MIXTURE else "RAW")) printc("- balanced: %s" % balanced) printc("- num_input_mixtures: %s" % num_input_mixtures) printc("- node_type: %s" % ("SINGLE" if node_type == spn.DenseSPNGenerator.NodeType.SINGLE else "BLOCK" if node_type == spn.DenseSPNGenerator.NodeType.BLOCK else "LAYER")) printc("- log_weights: %s" % log_weights) # Inputs inputs = [ spn.IVs(num_vars=num_vars, num_vals=num_vals, name=("IVs_%d" % (i + 1))) for i in range(num_inputs) ] gen = spn.DenseSPNGenerator(num_decomps=num_decomps, num_subsets=num_subsets, num_mixtures=num_mixtures, input_dist=input_dist, balanced=balanced, num_input_mixtures=num_input_mixtures, node_type=node_type) # Generate Sub-SPNs sub_spns = [ gen.generate(*inputs, root_name=("sub_root_%d" % (i + 1))) for i in range(3) ] # Generate random weights for the first sub-SPN with tf.name_scope("Weights"): spn.generate_weights(sub_spns[0], tf.initializers.random_uniform(0.0, 1.0), log=log_weights) # Initialize weights of the first sub-SPN sub_spn_init = spn.initialize_weights(sub_spns[0]) # Testing validity of the first sub-SPN self.assertTrue(sub_spns[0].is_valid()) # Generate value ops of the first sub-SPN sub_spn_v = sub_spns[0].get_value() sub_spn_v_log = sub_spns[0].get_log_value() # Generate path ops of the first sub-SPN sub_spn_mpe_path_gen = spn.MPEPath(log=False) sub_spn_mpe_path_gen_log = spn.MPEPath(log=True) sub_spn_mpe_path_gen.get_mpe_path(sub_spns[0]) sub_spn_mpe_path_gen_log.get_mpe_path(sub_spns[0]) sub_spn_path = [sub_spn_mpe_path_gen.counts[inp] for inp in inputs] sub_spn_path_log = [ sub_spn_mpe_path_gen_log.counts[inp] for inp in inputs ] # Collect all weight nodes of the first sub-SPN sub_spn_weight_nodes = [] def fun(node): if node.is_param: sub_spn_weight_nodes.append(node) spn.traverse_graph(sub_spns[0], fun=fun) # Generate an upper-SPN over sub-SPNs products_lower = [] for sub_spn in sub_spns: products_lower.append([v.node for v in sub_spn.values]) num_top_mixtures = [2, 1, 3] sums_lower = [] for prods, num_top_mix in zip(products_lower, num_top_mixtures): if node_type == spn.DenseSPNGenerator.NodeType.SINGLE: sums_lower.append( [spn.Sum(*prods) for _ in range(num_top_mix)]) elif node_type == spn.DenseSPNGenerator.NodeType.BLOCK: sums_lower.append([spn.ParSums(*prods, num_sums=num_top_mix)]) else: sums_lower.append([ spn.SumsLayer(*prods * num_top_mix, num_or_size_sums=num_top_mix) ]) # Generate upper-SPN root = gen.generate(*list(itertools.chain(*sums_lower)), root_name="root") # Generate random weights for the SPN with tf.name_scope("Weights"): spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0), log=log_weights) # Initialize weight of the SPN spn_init = spn.initialize_weights(root) # Testing validity of the SPN self.assertTrue(root.is_valid()) # Generate value ops of the SPN spn_v = root.get_value() spn_v_log = root.get_log_value() # Generate path ops of the SPN spn_mpe_path_gen = spn.MPEPath(log=False) spn_mpe_path_gen_log = spn.MPEPath(log=True) spn_mpe_path_gen.get_mpe_path(root) spn_mpe_path_gen_log.get_mpe_path(root) spn_path = [spn_mpe_path_gen.counts[inp] for inp in inputs] spn_path_log = [spn_mpe_path_gen_log.counts[inp] for inp in inputs] # Collect all weight nodes in the SPN spn_weight_nodes = [] def fun(node): if node.is_param: spn_weight_nodes.append(node) spn.traverse_graph(root, fun=fun) # Create a session with self.test_session() as sess: # Initializing weights sess.run(sub_spn_init) sess.run(spn_init) # Generate input feed feed = np.array( list( itertools.product(range(num_vals), repeat=(num_inputs * num_vars)))) batch_size = feed.shape[0] feed_dict = {} for inp, f in zip(inputs, np.split(feed, num_inputs, axis=1)): feed_dict[inp] = f # Compute all values and paths of sub-SPN sub_spn_out = sess.run(sub_spn_v, feed_dict=feed_dict) sub_spn_out_log = sess.run(tf.exp(sub_spn_v_log), feed_dict=feed_dict) sub_spn_out_path = sess.run(sub_spn_path, feed_dict=feed_dict) sub_spn_out_path_log = sess.run(sub_spn_path_log, feed_dict=feed_dict) # Compute all values and paths of the complete SPN spn_out = sess.run(spn_v, feed_dict=feed_dict) spn_out_log = sess.run(tf.exp(spn_v_log), feed_dict=feed_dict) spn_out_path = sess.run(spn_path, feed_dict=feed_dict) spn_out_path_log = sess.run(spn_path_log, feed_dict=feed_dict) # Test if partition function of the sub-SPN and of the # complete SPN is 1.0 self.assertAlmostEqual(sub_spn_out.sum(), 1.0, places=6) self.assertAlmostEqual(sub_spn_out_log.sum(), 1.0, places=6) self.assertAlmostEqual(spn_out.sum(), 1.0, places=6) self.assertAlmostEqual(spn_out_log.sum(), 1.0, places=6) # Test if the sum of counts for each value of each variable # (6 variables, with 2 values each) = batch-size / num-vals self.assertEqual( np.sum(np.hstack(sub_spn_out_path), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals) self.assertEqual( np.sum(np.hstack(sub_spn_out_path_log), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals) self.assertEqual( np.sum(np.hstack(spn_out_path), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals) self.assertEqual( np.sum(np.hstack(spn_out_path_log), axis=0).tolist(), [batch_size // num_vals] * num_inputs * num_vars * num_vals)