示例#1
0
    def __init__(self, template, *args, **kwargs):
        """
        Initialize an NodeTemplateSpn.

        template (NodeTemplate): a subclass of NodeTemplate.

        **kwargs:
           seed (int): seed for the random generator. Set before generating
                       the structure.
           num_vals (int): number of different values a variable (on a node) can take;
                       the values are discretized from 0 to num_vals-1.
        """
        super().__init__(template, *args, **kwargs)

        self._num_nodes = self.template.num_nodes()
        self._num_vals = kwargs.get("num_vals", -1)
        if self._num_vals <= 0:
            raise ValueError("num_vals must be positive!")

        # Don't use the layered generator for now
        if self._num_nodes == 1:
            self._input_dist = spn.DenseSPNGenerator.InputDist.RAW
            self._dense_gen = spn.DenseSPNGenerator(
                num_decomps=self._num_decomps,
                num_subsets=self._num_subsets,
                num_mixtures=self._num_mixtures,
                input_dist=self._input_dist,
                num_input_mixtures=self._num_input_mixtures)

        # Initialize structure and learning ops
        self._init_struct(rnd=self._rnd, seed=self._seed)
示例#2
0
    def test_compute_value_sum(self, grid_size):
        indicator_leaf = spn.IndicatorLeaf(num_vals=2, num_vars=grid_size**2)
        convsum = ConvSums(indicator_leaf,
                           spatial_dim_sizes=[grid_size, grid_size],
                           num_channels=4)
        convsum2 = ConvSums(indicator_leaf,
                            spatial_dim_sizes=[grid_size, grid_size],
                            num_channels=4)
        dense_generator = spn.DenseSPNGenerator(
            num_mixtures=4,
            num_subsets=4,
            num_decomps=1,
            input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE)
        root = dense_generator.generate(convsum, convsum2)
        spn.generate_weights(root,
                             initializer=tf.initializers.random_uniform())
        init = spn.initialize_weights(root)

        num_possibilities = 2**(grid_size**2)
        nums = np.arange(num_possibilities).reshape((num_possibilities, 1))
        powers = 2**np.arange(grid_size**2).reshape((1, grid_size**2))
        indicator_feed = np.bitwise_and(nums, powers) // powers

        value_op = spn.LogValue(spn.InferenceType.MARGINAL).get_value(root)
        value_op_sum = tf.reduce_logsumexp(value_op)

        with self.test_session() as sess:
            sess.run(init)
            root_sum = sess.run(value_op_sum,
                                feed_dict={indicator_leaf: indicator_feed})

        print(indicator_feed[:10])
        self.assertAllClose(root_sum, 0.0)
示例#3
0
    def test_generate_spn(self, num_decomps, num_subsets, num_mixtures,
                          num_input_mixtures, input_dims, input_dist, balanced,
                          node_type, log_weights):
        """A generic test for DenseSPNGenerator."""

        if input_dist == spn.DenseSPNGenerator.InputDist.RAW \
            and num_input_mixtures != 1:
            # Redundant test case, so just return
            return

        # Input parameters
        num_inputs = input_dims[0]
        num_vars = input_dims[1]
        num_vals = 2

        printc("\n- num_inputs: %s" % num_inputs)
        printc("- num_vars: %s" % num_vars)
        printc("- num_vals: %s" % num_vals)
        printc("- num_decomps: %s" % num_decomps)
        printc("- num_subsets: %s" % num_subsets)
        printc("- num_mixtures: %s" % num_mixtures)
        printc("- input_dist: %s" %
               ("MIXTURE" if input_dist
                == spn.DenseSPNGenerator.InputDist.MIXTURE else "RAW"))
        printc("- balanced: %s" % balanced)
        printc("- num_input_mixtures: %s" % num_input_mixtures)
        printc("- node_type: %s" %
               ("SINGLE" if node_type == spn.DenseSPNGenerator.NodeType.SINGLE
                else "BLOCK" if node_type
                == spn.DenseSPNGenerator.NodeType.BLOCK else "LAYER"))
        printc("- log_weights: %s" % log_weights)

        # Inputs
        inputs = [
            spn.IndicatorLeaf(num_vars=num_vars,
                              num_vals=num_vals,
                              name=("IndicatorLeaf_%d" % (i + 1)))
            for i in range(num_inputs)
        ]

        gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
                                    num_subsets=num_subsets,
                                    num_mixtures=num_mixtures,
                                    input_dist=input_dist,
                                    balanced=balanced,
                                    num_input_mixtures=num_input_mixtures,
                                    node_type=node_type)

        # Generate Sub-SPNs
        sub_spns = [
            gen.generate(*inputs, root_name=("sub_root_%d" % (i + 1)))
            for i in range(3)
        ]

        # Generate random weights for the first sub-SPN
        with tf.name_scope("Weights"):
            spn.generate_weights(sub_spns[0],
                                 tf.initializers.random_uniform(0.0, 1.0),
                                 log=log_weights)

        # Initialize weights of the first sub-SPN
        sub_spn_init = spn.initialize_weights(sub_spns[0])

        # Testing validity of the first sub-SPN
        self.assertTrue(sub_spns[0].is_valid())

        # Generate value ops of the first sub-SPN
        sub_spn_v = sub_spns[0].get_value()
        sub_spn_v_log = sub_spns[0].get_log_value()

        # Generate path ops of the first sub-SPN
        sub_spn_mpe_path_gen = spn.MPEPath(log=False)
        sub_spn_mpe_path_gen_log = spn.MPEPath(log=True)
        sub_spn_mpe_path_gen.get_mpe_path(sub_spns[0])
        sub_spn_mpe_path_gen_log.get_mpe_path(sub_spns[0])
        sub_spn_path = [sub_spn_mpe_path_gen.counts[inp] for inp in inputs]
        sub_spn_path_log = [
            sub_spn_mpe_path_gen_log.counts[inp] for inp in inputs
        ]

        # Collect all weight nodes of the first sub-SPN
        sub_spn_weight_nodes = []

        def fun(node):
            if node.is_param:
                sub_spn_weight_nodes.append(node)

        spn.traverse_graph(sub_spns[0], fun=fun)

        # Generate an upper-SPN over sub-SPNs
        products_lower = []
        for sub_spn in sub_spns:
            products_lower.append([v.node for v in sub_spn.values])

        num_top_mixtures = [2, 1, 3]
        sums_lower = []
        for prods, num_top_mix in zip(products_lower, num_top_mixtures):
            if node_type == spn.DenseSPNGenerator.NodeType.SINGLE:
                sums_lower.append(
                    [spn.Sum(*prods) for _ in range(num_top_mix)])
            elif node_type == spn.DenseSPNGenerator.NodeType.BLOCK:
                sums_lower.append(
                    [spn.ParallelSums(*prods, num_sums=num_top_mix)])
            else:
                sums_lower.append([
                    spn.SumsLayer(*prods * num_top_mix,
                                  num_or_size_sums=num_top_mix)
                ])

        # Generate upper-SPN
        root = gen.generate(*list(itertools.chain(*sums_lower)),
                            root_name="root")

        # Generate random weights for the SPN
        with tf.name_scope("Weights"):
            spn.generate_weights(root,
                                 tf.initializers.random_uniform(0.0, 1.0),
                                 log=log_weights)

        # Initialize weight of the SPN
        spn_init = spn.initialize_weights(root)

        # Testing validity of the SPN
        self.assertTrue(root.is_valid())

        # Generate value ops of the SPN
        spn_v = root.get_value()
        spn_v_log = root.get_log_value()

        # Generate path ops of the SPN
        spn_mpe_path_gen = spn.MPEPath(log=False)
        spn_mpe_path_gen_log = spn.MPEPath(log=True)
        spn_mpe_path_gen.get_mpe_path(root)
        spn_mpe_path_gen_log.get_mpe_path(root)
        spn_path = [spn_mpe_path_gen.counts[inp] for inp in inputs]
        spn_path_log = [spn_mpe_path_gen_log.counts[inp] for inp in inputs]

        # Collect all weight nodes in the SPN
        spn_weight_nodes = []

        def fun(node):
            if node.is_param:
                spn_weight_nodes.append(node)

        spn.traverse_graph(root, fun=fun)

        # Create a session
        with self.test_session() as sess:
            # Initializing weights
            sess.run(sub_spn_init)
            sess.run(spn_init)

            # Generate input feed
            feed = np.array(
                list(
                    itertools.product(range(num_vals),
                                      repeat=(num_inputs * num_vars))))
            batch_size = feed.shape[0]
            feed_dict = {}
            for inp, f in zip(inputs, np.split(feed, num_inputs, axis=1)):
                feed_dict[inp] = f

            # Compute all values and paths of sub-SPN
            sub_spn_out = sess.run(sub_spn_v, feed_dict=feed_dict)
            sub_spn_out_log = sess.run(tf.exp(sub_spn_v_log),
                                       feed_dict=feed_dict)
            sub_spn_out_path = sess.run(sub_spn_path, feed_dict=feed_dict)
            sub_spn_out_path_log = sess.run(sub_spn_path_log,
                                            feed_dict=feed_dict)

            # Compute all values and paths of the complete SPN
            spn_out = sess.run(spn_v, feed_dict=feed_dict)
            spn_out_log = sess.run(tf.exp(spn_v_log), feed_dict=feed_dict)
            spn_out_path = sess.run(spn_path, feed_dict=feed_dict)
            spn_out_path_log = sess.run(spn_path_log, feed_dict=feed_dict)

            # Test if partition function of the sub-SPN and of the
            # complete SPN is 1.0
            self.assertAlmostEqual(sub_spn_out.sum(), 1.0, places=6)
            self.assertAlmostEqual(sub_spn_out_log.sum(), 1.0, places=6)
            self.assertAlmostEqual(spn_out.sum(), 1.0, places=6)
            self.assertAlmostEqual(spn_out_log.sum(), 1.0, places=6)

            # Test if the sum of counts for each value of each variable
            # (6 variables, with 2 values each) = batch-size / num-vals
            self.assertEqual(
                np.sum(np.hstack(sub_spn_out_path), axis=0).tolist(),
                [batch_size // num_vals] * num_inputs * num_vars * num_vals)
            self.assertEqual(
                np.sum(np.hstack(sub_spn_out_path_log), axis=0).tolist(),
                [batch_size // num_vals] * num_inputs * num_vars * num_vals)
            self.assertEqual(
                np.sum(np.hstack(spn_out_path), axis=0).tolist(),
                [batch_size // num_vals] * num_inputs * num_vars * num_vals)
            self.assertEqual(
                np.sum(np.hstack(spn_out_path_log), axis=0).tolist(),
                [batch_size // num_vals] * num_inputs * num_vars * num_vals)
示例#4
0
num_classes = 10
batch_size = 32
num_epochs = 10

# Reset the graph
tf.reset_default_graph()

# Leaf nodes
leaf_indicators = spn.IndicatorLeaf(num_vals=num_leaf_values,
                                    num_vars=num_vars)

# Generates densely connected random SPNs
dense_generator = spn.DenseSPNGenerator(
    node_type=spn.DenseSPNGenerator.NodeType.BLOCK,
    num_subsets=num_subsets,
    num_mixtures=num_mixtures,
    num_decomps=num_decomps,
    balanced=balanced,
    input_dist=input_dist)

# Generate a dense SPN for each class
class_roots = [
    dense_generator.generate(leaf_indicators) for _ in range(num_classes)
]

# Connect sub-SPNs to a root
root = spn.Sum(*class_roots, name="RootSum")
root = spn.convert_to_layer_nodes(root)

# Add an IVs node to the root as a latent class variable
class_indicators = root.generate_latent_indicators()
示例#5
0
import libspn as spn
with tf.device('/cpu:0'):
    a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
    b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
    c = tf.matmul(a, b)

tf.ConfigProto(log_device_placement=True)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

#print(sess.run(c))

#with tf.Session() as sess:
#    print (sess.run(c))

iv_x = spn.IndicatorLeaf(num_vars=2, num_vals=2, name="iv_x")
gen = spn.DenseSPNGenerator(num_decomps=1, num_subsets=2, num_mixtures=2)
root = gen.generate(iv_x, root_name="root")
iv_y = root.generate_latent_indicators(name="iv_y")  # Can be added manually
spn.generate_weights(root, initializer=tf.initializers.random_uniform(
    0, 1))  # Can be added manually

print(root.get_num_nodes())
print(root.get_scope())
print(root.is_valid())

SUM_CNT = 8
sum_ls = []
#iv_x = spn.IndicatorLeaf([[0,-1], [-1,-1]] ,num_vars=2, num_vals=2, name="iv_x")
for i in range(SUM_CNT):
    iv_x = spn.IndicatorLeaf([[-1], [0]],
                             num_vars=1,
示例#6
0
    def test_compare_manual_conv(self, log_weights, inference_type):
        spn.conf.argmax_zero = True
        spatial_dims = [4, 4]
        nrows, ncols = spatial_dims
        num_vals = 4
        batch_size = 128
        num_vars = spatial_dims[0] * spatial_dims[1]
        indicator_leaf = spn.IndicatorLeaf(num_vars=num_vars,
                                           num_vals=num_vals)
        num_sums = 32
        weights = spn.Weights(num_weights=num_vals,
                              num_sums=num_sums,
                              initializer=tf.initializers.random_uniform(),
                              log=log_weights)

        parsums = []
        for row in range(nrows):
            for col in range(ncols):
                indices = list(
                    range(row * (ncols * num_vals) + col * num_vals,
                          row * (ncols * num_vals) + (col + 1) * num_vals))
                parsums.append(
                    spn.ParallelSums((indicator_leaf, indices),
                                     num_sums=num_sums,
                                     weights=weights))

        convsum = spn.ConvSums(indicator_leaf,
                               num_channels=num_sums,
                               weights=weights,
                               spatial_dim_sizes=spatial_dims)

        dense_gen = spn.DenseSPNGenerator(
            num_decomps=1,
            num_mixtures=2,
            num_subsets=2,
            input_dist=spn.DenseSPNGenerator.InputDist.RAW,
            node_type=spn.DenseSPNGenerator.NodeType.BLOCK)

        rnd = random.Random(1234)
        rnd_state = rnd.getstate()
        conv_root = dense_gen.generate(convsum, rnd=rnd)
        rnd.setstate(rnd_state)

        parsum_concat = spn.Concat(*parsums, name="ParSumConcat")
        parsum_root = dense_gen.generate(parsum_concat, rnd=rnd)

        self.assertTrue(conv_root.is_valid())
        self.assertTrue(parsum_root.is_valid())

        self.assertAllEqual(parsum_concat.get_scope(), convsum.get_scope())

        spn.generate_weights(conv_root, log=log_weights)
        spn.generate_weights(parsum_root, log=log_weights)

        convsum.set_weights(weights)
        [p.set_weights(weights) for p in parsums]

        init_conv = spn.initialize_weights(conv_root)
        init_parsum = spn.initialize_weights(parsum_root)

        path_conv = spn.MPEPath(value_inference_type=inference_type)
        path_conv.get_mpe_path(conv_root)

        path_parsum = spn.MPEPath(value_inference_type=inference_type)
        path_parsum.get_mpe_path(parsum_root)

        indicator_leaf_count_parsum = path_parsum.counts[indicator_leaf]
        indicator_leaf_count_convsum = path_conv.counts[indicator_leaf]

        weight_counts_parsum = path_parsum.counts[weights]
        weight_counts_conv = path_conv.counts[weights]

        root_val_parsum = path_parsum.value.values[parsum_root]
        root_val_conv = path_conv.value.values[conv_root]

        parsum_counts = path_parsum.counts[parsum_concat]
        conv_counts = path_conv.counts[convsum]

        indicator_feed = np.random.randint(2, size=batch_size * num_vars)\
            .reshape((batch_size, num_vars))
        with tf.Session() as sess:
            sess.run([init_conv, init_parsum])
            indicator_counts_conv_out, indicator_count_parsum_out = sess.run(
                [indicator_leaf_count_convsum, indicator_leaf_count_parsum],
                feed_dict={indicator_leaf: indicator_feed})

            root_conv_value_out, root_parsum_value_out = sess.run(
                [root_val_conv, root_val_parsum],
                feed_dict={indicator_leaf: indicator_feed})

            weight_counts_conv_out, weight_counts_parsum_out = sess.run(
                [weight_counts_conv, weight_counts_parsum],
                feed_dict={indicator_leaf: indicator_feed})

            weight_value_conv_out, weight_value_parsum_out = sess.run([
                convsum.weights.node.variable, parsums[0].weights.node.variable
            ])

            parsum_counts_out, conv_counts_out = sess.run(
                [parsum_counts, conv_counts],
                feed_dict={indicator_leaf: indicator_feed})

            parsum_concat_val, convsum_val = sess.run(
                [
                    path_parsum.value.values[parsum_concat],
                    path_conv.value.values[convsum]
                ],
                feed_dict={indicator_leaf: indicator_feed})

        self.assertTrue(np.all(np.less_equal(convsum_val, 0.0)))
        self.assertTrue(np.all(np.less_equal(parsum_concat_val, 0.0)))
        self.assertAllClose(weight_value_conv_out, weight_value_parsum_out)
        self.assertAllClose(root_conv_value_out, root_parsum_value_out)
        self.assertAllClose(indicator_counts_conv_out,
                            indicator_count_parsum_out)
        self.assertAllClose(parsum_counts_out, conv_counts_out)
        self.assertAllClose(weight_counts_conv_out, weight_counts_parsum_out)
示例#7
0
    def test_compute_dense_gen_two_spatial_decomps_v2(self, node_type,
                                                      input_dist):
        input_channels = 2
        grid_dims = [32, 32]
        num_vars = grid_dims[0] * grid_dims[1]
        vars = spn.IndicatorLeaf(num_vars=num_vars, num_vals=input_channels)

        convert_after = False
        if input_dist == spn.DenseSPNGenerator.InputDist.RAW and \
                node_type in [spn.DenseSPNGenerator.NodeType.BLOCK,
                              spn.DenseSPNGenerator.NodeType.LAYER]:
            node_type = spn.DenseSPNGenerator.NodeType.SINGLE
            convert_after = True

        # First decomposition
        convprod_dilate0 = spn.ConvProducts(vars,
                                            spatial_dim_sizes=grid_dims,
                                            num_channels=16,
                                            padding='valid',
                                            dilation_rate=2,
                                            strides=1,
                                            kernel_size=2)
        convprod_dilate1 = spn.ConvProducts(convprod_dilate0,
                                            spatial_dim_sizes=[30, 30],
                                            num_channels=512,
                                            padding='valid',
                                            dilation_rate=1,
                                            strides=4,
                                            kernel_size=2)
        convsum_dilate = spn.ConvSums(convprod_dilate1,
                                      num_channels=2,
                                      spatial_dim_sizes=[8, 8])

        # Second decomposition
        convprod_stride0 = spn.ConvProducts(vars,
                                            spatial_dim_sizes=grid_dims,
                                            num_channels=16,
                                            padding='valid',
                                            dilation_rate=1,
                                            strides=2,
                                            kernel_size=2)
        convprod_stride1 = spn.ConvProducts(convprod_stride0,
                                            spatial_dim_sizes=[16, 16],
                                            num_channels=512,
                                            padding='valid',
                                            dilation_rate=1,
                                            strides=2,
                                            kernel_size=2)
        convsum_stride = spn.ConvSums(convprod_stride1,
                                      num_channels=2,
                                      spatial_dim_sizes=[8, 8])

        # First decomposition level 2
        convprod_dilate0_l2 = spn.ConvProducts(convsum_stride,
                                               convsum_dilate,
                                               spatial_dim_sizes=[8, 8],
                                               num_channels=512,
                                               padding='valid',
                                               dilation_rate=2,
                                               strides=1,
                                               kernel_size=2)
        convprod_dilate1_l2 = spn.ConvProducts(convprod_dilate0_l2,
                                               spatial_dim_sizes=[6, 6],
                                               num_channels=512,
                                               padding='valid',
                                               dilation_rate=1,
                                               kernel_size=2,
                                               strides=4)
        convsum_dilate_l2 = spn.ConvSums(convprod_dilate1_l2,
                                         num_channels=2,
                                         spatial_dim_sizes=[4, 4])

        # Second decomposition level 2
        convprod_stride0_l2 = spn.ConvProducts(convsum_stride,
                                               convsum_dilate,
                                               spatial_dim_sizes=[8, 8],
                                               num_channels=512,
                                               padding='valid',
                                               dilation_rate=1,
                                               strides=2,
                                               kernel_size=2)
        convprod_stride1_l2 = spn.ConvProducts(convprod_stride0_l2,
                                               spatial_dim_sizes=[4, 4],
                                               num_channels=512,
                                               padding='valid',
                                               dilation_rate=1,
                                               strides=2,
                                               kernel_size=2)
        convsum_stride_l2 = spn.ConvSums(convprod_stride1_l2,
                                         num_channels=2,
                                         spatial_dim_sizes=[4, 4])

        dense_gen = spn.DenseSPNGenerator(num_mixtures=2,
                                          num_decomps=1,
                                          num_subsets=2,
                                          node_type=node_type,
                                          input_dist=input_dist)
        root = dense_gen.generate(convsum_stride_l2, convsum_dilate_l2)
        if convert_after:
            root = dense_gen.convert_to_layer_nodes(root)

        # Assert valid
        self.assertTrue(root.is_valid())

        # Setup the remaining Ops
        spn.generate_weights(root)
        init = spn.initialize_weights(root)
        value_op = tf.squeeze(root.get_log_value())

        with self.test_session() as sess:
            sess.run(init)
            value_out = sess.run(
                value_op, {vars: -np.ones((1, num_vars), dtype=np.int32)})

        self.assertAllClose(value_out, 0.0)