Esempio n. 1
0
    def test_group_initialization(self):
        """Group initialization of weights nodes"""
        v1 = spn.IndicatorLeaf(num_vars=1, num_vals=2)
        v2 = spn.IndicatorLeaf(num_vars=1, num_vals=4)
        v3 = spn.IndicatorLeaf(num_vars=1, num_vals=2)
        v4 = spn.IndicatorLeaf(num_vars=1, num_vals=2)
        # Sum
        s1 = spn.Sum(v1)
        s1.generate_weights(tf.initializers.constant([0.2, 0.3]))
        s2 = spn.Sum(v2)
        s2.generate_weights(tf.initializers.constant(5))
        # ParallelSums
        s3 = spn.ParallelSums(*[v3, v4], num_sums=2)
        s3.generate_weights(
            tf.initializers.constant([0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1]))
        s4 = spn.ParallelSums(*[v1, v2, v3, v4], num_sums=3)
        s4.generate_weights(tf.initializers.constant(2.0))
        # Product
        p = spn.Product(s1, s2, s3, s4)
        init = spn.initialize_weights(p)

        with self.test_session() as sess:
            sess.run([init])
            val1 = sess.run(s1.weights.node.get_value())
            val2 = sess.run(s2.weights.node.get_value())
            val3 = sess.run(s3.weights.node.get_value())
            val4 = sess.run(s4.weights.node.get_value())
            val1_log = sess.run(tf.exp(s1.weights.node.get_log_value()))
            val2_log = sess.run(tf.exp(s2.weights.node.get_log_value()))
            val3_log = sess.run(tf.exp(s3.weights.node.get_log_value()))
            val4_log = sess.run(tf.exp(s4.weights.node.get_log_value()))

        self.assertEqual(val1.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val2.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val3.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val4.dtype, spn.conf.dtype.as_numpy_dtype())
        np.testing.assert_array_almost_equal(val1, [[0.4, 0.6]])
        np.testing.assert_array_almost_equal(val2, [[0.25, 0.25, 0.25, 0.25]])
        np.testing.assert_array_almost_equal(
            val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]])
        np.testing.assert_array_almost_equal(
            val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10])
        self.assertEqual(val1_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val2_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val3_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val4_log.dtype, spn.conf.dtype.as_numpy_dtype())
        np.testing.assert_array_almost_equal(val1_log, [[0.4, 0.6]])
        np.testing.assert_array_almost_equal(val2_log,
                                             [[0.25, 0.25, 0.25, 0.25]])
        np.testing.assert_array_almost_equal(
            val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]])
        np.testing.assert_array_almost_equal(
            val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10])
Esempio n. 2
0
    def expand(self):
        """
        Expand input to include likelihoods for semantics.

        Do nothing if already expanded.
        """
        if not self._expanded:
            print("Expanding...")

            self._likelihood_inputs = spn.RawInput(num_vars=self._num_nodes *
                                                   self._num_vals,
                                                   name=self.vn['LH_CONT'])
            self._semantic_inputs = spn.IVs(num_vars=self._num_nodes,
                                            num_vals=self._num_vals,
                                            name=self.vn['SEMAN_IVS'])
            prods = []
            for i in range(self._num_nodes):
                for j in range(self._num_vals):
                    prod = spn.Product(
                        (self._likelihood_inputs, [i * self._num_vals + j]),
                        (self._semantic_inputs, [i * self._num_vals + j]))
                    prods.append(prod)
            self._conc_inputs.set_inputs(*map(spn.Input.as_input, prods))
            self._expanded = True
Esempio n. 3
0
    def expand(self, use_cont_vars=False):
        """
        Custom method.

        Replaces the IVs inputs with a product node that has two children: a continuous
        input for likelihood, and a discrete input for semantics category.

        Do nothing if already expanded.
        """

        if not self._expanded:

            print("Expanding...")

            num_vars = len(self._graph.nodes)
            self._semantic_inputs = spn.IVs(num_vars=num_vars,
                                            num_vals=self._num_vals)
            # Note: use RawInput when doing cold database experiments with dgsm input. Use ContVars for synthetic experiments
            if use_cont_vars:
                self._likelihood_inputs = spn.ContVars(
                    num_vars=num_vars * self._num_vals
                )  #spn.RawInput(num_vars=num_vars*self._num_vals)
            else:
                self._likelihood_inputs = spn.RawInput(
                    num_vars=num_vars * self._num_vals
                )  #spn.RawInput(num_vars=num_vars*self._num_vals)

            prods = []
            for i in range(num_vars):
                for j in range(self._num_vals):
                    prod = spn.Product(
                        (self._likelihood_inputs, [i * self._num_vals + j]),
                        (self._semantic_inputs, [i * self._num_vals + j]))
                    prods.append(prod)
            self._conc_inputs.set_inputs(*map(spn.Input.as_input, prods))
            self._expanded = True
Esempio n. 4
0
import tensorflow as tf

indicator_leaves = spn.IndicatorLeaf(num_vars=2,
                                     num_vals=2,
                                     name="indicator_x")

# Connect first two sums to indicators of first variable
sum_11 = spn.Sum((indicator_leaves, [0, 1]), name="sum_11")
sum_12 = spn.Sum((indicator_leaves, [0, 1]), name="sum_12")

# Connect another two sums to indicators of the second variable
sum_21 = spn.Sum((indicator_leaves, [2, 3]), name="sum_21")
sum_22 = spn.Sum((indicator_leaves, [2, 3]), name="sum_22")

# Connect three product nodes
prod_1 = spn.Product(sum_11, sum_21, name="prod_1")
prod_2 = spn.Product(sum_11, sum_22, name="prod_2")
prod_3 = spn.Product(sum_12, sum_22, name="prod_3")

# Connect a root sum
root = spn.Sum(prod_1, prod_2, prod_3, name="root")

# Connect a latent indicator
indicator_y = root.generate_latent_indicators(
    name="indicator_y")  # Can be added manually

# Generate weights
spn.generate_weights(
    root,
    initializer=tf.initializers.random_uniform())  # Can be added manually
Esempio n. 5
0
    curr_node_ls = []
    if node_type == 'Sum':
        node_type = 'Prod'
    else:
        node_type = 'Sum'

    node_type = 'Prod'

    for i in range(CURR_CNT):
        #        ch_0= random.choice(last_node_ls)
        #        ch_1= random.choice(last_node_ls)
        ch_0 = last_node_ls[i * 2]
        ch_1 = last_node_ls[i * 2 + 1]
        if node_type == 'Prod':
            node_x = spn.Product(ch_0,
                                 ch_1,
                                 name="prod" + str(l) + '_' + str(i))
        elif node_type == 'Sum':
            iv_x = spn.IndicatorLeaf(num_vars=2,
                                     num_vals=2,
                                     name="iv_x" + str(l) + '_' + str(i))
            node_x = spn.Sum(ch_0, ch_1, name="sum" + str(l) + '_' + str(i))
            node_x.generate_weights(
                tf.initializers.constant([random.random(),
                                          random.random()]))
        else:
            assert 0
        curr_node_ls.append(node_x)

    last_node_ls = curr_node_ls
    LAST_CNT = CURR_CNT
Esempio n. 6
0
    def dup_fun_up(inpt,
                   *args,
                   conc=None,
                   tmpl_num_vars=[0],
                   tmpl_num_vals=[0],
                   graph_num_vars=[0],
                   labels=[[]],
                   tspn=None):
        """
        Purely for template spn copying only. Supports template with multiple types of IVs.
        Requires that the template SPN contains only one concat node where all inputs go through.

        labels: (2D list) variable's numerical label, used to locate the variable's position in the big IVs.
                If there are multiple types of IVs, then this should be a 2D list, where each inner
                list is the label (starting from 0) for one type of IVs, and each outer list represents
                one type of IVs.
        """
        # Know what range of indices each variable takes
        node, indices = inpt
        if node.is_op:
            if isinstance(node, spn.Sum):
                # [2:] is to skip the weights node and the explicit IVs node for this sum.
                return spn.Sum(*args[2:], weights=args[0])
            elif isinstance(node, spn.ParSums):
                return spn.ParSums(*args[2:],
                                   weights=args[0],
                                   num_sums=tspn._num_mixtures)
            elif isinstance(node, spn.Product):
                return spn.Product(*args)
            elif isinstance(node, spn.PermProducts):
                return spn.PermProducts(*args)
            elif isinstance(node, spn.Concat):
                # The goal is to map from index on the template SPN's concat node to the index on
                # the instance SPN's concat node.

                # First, be able to tell which type of iv the index has
                ranges_tmpl = [
                    0
                ]  # stores the start (inclusive) index of the range of indices taken by a type of iv on template SPN
                ranges_instance = [
                    0
                ]  # stores the start (inclusive) index of the range of indices taken by a type of iv on instance SPN
                for i in range(len(tmpl_num_vars)):
                    ranges_tmpl.append(ranges_tmpl[-1] +
                                       tmpl_num_vars[i] * tmpl_num_vals[i])
                    ranges_instance.append(ranges_instance[-1] +
                                           graph_num_vars[i] *
                                           tmpl_num_vals[i])

                big_indices = []
                for indx in indices:
                    iv_type = -1
                    for i, start in enumerate(ranges_tmpl):
                        if indx < start + tmpl_num_vars[i] * tmpl_num_vals[i]:
                            iv_type = i
                            break
                    if iv_type == -1:
                        raise ValueError(
                            "Oops. Something wrong. Index out of range.")

                    # Then, figure out variable index and offset (w.r.t. template Concat node)
                    varidx = (indx -
                              ranges_tmpl[iv_type]) // tmpl_num_vals[iv_type]
                    offset = (indx - ranges_tmpl[iv_type]
                              ) - varidx * tmpl_num_vals[iv_type]
                    # THIS IS the actual position of the variable's inputs in the big Concat.
                    varlabel = labels[iv_type][varidx]
                    big_indices.append(ranges_instance[iv_type] +
                                       varlabel * tmpl_num_vals[iv_type] +
                                       offset)
                return spn.Input(conc, big_indices)
        elif isinstance(node, spn.Weights):
            return node
        else:
            raise ValueError(
                "Unexpected node %s. We don't intend to deal with IVs here. Please remove them from the concat."
                % node)
Esempio n. 7
0
    def _init_struct(self,
                     sess,
                     divisions=-1,
                     num_partitions=1,
                     partitions=None,
                     extra_partition_multiplyer=1):
        """
        Initialize the structure for training. (private method)

        sess: (tf.Session): a session that contains all weights.

        **kwargs:
           num_partitions (int): number of partitions (children for root node)
           If template is EdgeTemplate, then:
             divisions (int) number of views per place
           extra_partition_multiplyer (int): Used to multiply num_partitions so that more partitions
                                             are tried and ones with higher coverage are picked.
        """

        for tspn, template in self._spns:
            # remove inputs; this is necessary for the duplication to work - we don't want
            # the indicator variables to the template spns because the instance spn has its
            # own indicator variable inputs.
            tspn._conc_inputs.set_inputs()

        # Create vars and maps
        self._catg_inputs = spn.IVs(num_vars=len(self._graph.nodes),
                                    num_vals=self._num_vals)
        self._conc_inputs = spn.Concat(self._catg_inputs)
        self._template_nodes_map = {
        }  # map from template id to list of node lds
        self._node_label_map = {
        }  # key: node id. Value: a number (0~num_nodes-1)
        self._label_node_map = {
        }  # key: a number (0~num_nodes-1). Value: node id
        _i = 0
        for nid in self._graph.nodes:
            self._node_label_map[nid] = _i
            self._label_node_map[_i] = nid
            _i += 1

        if partitions is None:
            """Try partition the graph `extra_partition_multiplyer` times more than what is asked for. Then pick the top `num_partitions` with the highest
            coverage of the main template."""
            print(
                "Partitioning the graph... (Selecting %d from %d attempts)" %
                (num_partitions, extra_partition_multiplyer * num_partitions))
            partitioned_results = {}
            main_template = self._spns[0][1]
            for i in range(extra_partition_multiplyer * num_partitions):
                """Note: here, we only partition with the main template. The results (i.e. supergraph, unused graph) are stored
                and will be used later. """
                unused_graph, supergraph = self._graph.partition(
                    main_template,
                    get_unused=True,
                    super_node_class=self._super_node_class,
                    super_edge_class=self._super_edge_class)
                if self._template_mode == NodeTemplate.code():  ## NodeTemplate
                    coverage = len(
                        supergraph.nodes) * main_template.size() / len(
                            self._graph.nodes)
                    partitioned_results[(i, coverage)] = (supergraph,
                                                          unused_graph)
            used_coverages = set({})
            for i, coverage in sorted(partitioned_results,
                                      reverse=True,
                                      key=lambda x: x[1]):
                used_coverages.add((i, coverage))
                sys.stdout.write("%.3f  " % coverage)
                if len(used_coverages) >= num_partitions:
                    break
            sys.stdout.write("\n")
            """Keep partitioning the used partitions, and obtain a list of partitions in the same format as the `partitions` parameter"""
            partitions = []
            for key in used_coverages:
                supergraph, unused_graph = partitioned_results[key]
                partition = {main_template: supergraph}
                # Keep partitioning the unused_graph using smaller templates
                for _, template in self._spns[1:]:  # skip main template
                    unused_graph_2nd, supergraph_2nd = unused_graph.partition(
                        template,
                        get_unused=True,
                        super_node_class=self._super_node_class,
                        super_edge_class=self._super_edge_class)
                    partition[template] = supergraph_2nd
                    unused_graph = unused_graph_2nd
                partitions.append(partition)
        """Building instance spn"""
        print("Building instance spn...")
        pspns = []
        tspns = {}
        for template_spn, template in self._spns:
            tspns[template.__name__] = template_spn
        """Making an SPN"""
        """Now, partition the graph, copy structure, and connect self._catg_inputs appropriately to the network."""
        # Main template partition
        _k = 0

        self._partitions = partitions

        for _k, partition in enumerate(self._partitions):
            print("Partition %d" % (_k + 1))
            nodes_covered = set({})
            template_spn_roots = []
            for template_spn, template in self._spns:
                supergraph = partition[template]
                print("Will duplicate %s %d times." %
                      (template.__name__, len(supergraph.nodes)))
                template_spn_roots.extend(
                    NodeTemplateInstanceSpn._duplicate_template_spns(
                        self, tspns, template, supergraph, nodes_covered))

                ## TEST CODE: COMMENT OUT WHEN ACTUALLY RUNNING
                # original_tspn_root = tspns[template.__name__].root
                # duplicated_tspn_root = template_spn_roots[-1]
                # original_tspn_weights = sess.run(original_tspn_root.weights.node.get_value())
                # duplicated_tspn_weights = sess.run(duplicated_tspn_root.weights.node.get_value())
                # print(original_tspn_weights)
                # print(duplicated_tspn_weights)
                # print(original_tspn_weights == duplicated_tspn_weights)
                # import pdb; pdb.set_trace()

            assert nodes_covered == self._graph.nodes.keys()
            p = spn.Product(*template_spn_roots)
            assert p.is_valid()
            pspns.append(p)  # add spn for one partition
        ## End for loop ##

        # Sum up all
        self._root = spn.Sum(*pspns)
        assert self._root.is_valid()
        self._root.generate_weights(trainable=True)
        # initialize ONLY the weights node for the root
        sess.run(self._root.weights.node.initialize())