Beispiel #1
0
    def sums(inputs,
             indices,
             latent_indicators,
             num_sums,
             inf_type,
             log=True,
             output=None):
        if indices is None:
            inputs = [inputs for _ in range(num_sums)]
        else:
            inputs = [(inputs, ind) for ind in indices]

        # Generate a single Sums node, modeling 'num_sums' sum nodes within,
        # connecting it to inputs and latent_indicators
        s = spn.Sums(*inputs,
                     num_sums=num_sums,
                     latent_indicators=latent_indicators[0])
        # Generate weights of the Sums node
        s.generate_weights()

        # Connect the Sums nodes to a single root Sum node and generate its weights
        root = spn.Sum(s)
        root.generate_weights()

        if log:
            value_op = root.get_log_value(inference_type=inf_type)
        else:
            value_op = root.get_value(inference_type=inf_type)

        return spn.initialize_weights(root), value_op
Beispiel #2
0
    def sum(inputs,
            indices,
            latent_indicators,
            num_sums,
            inf_type,
            log=False,
            output=None):
        if indices is None:
            inputs = [inputs for _ in range(num_sums)]
        else:
            inputs = [(inputs, ind) for ind in indices]

        # Generate 'num_sums' Sum nodes, connecting each to inputs and latent_indicators
        s = []
        for inp, iv in zip(inputs, latent_indicators):
            s = s + [spn.Sum(inp, latent_indicators=iv)]
            # Generate weights for each Sum node
            s[-1].generate_weights()

        # Connect all sum nodes to a single root Sum node and generate its weights
        root = spn.Sum(*s)
        root.generate_weights()

        if log:
            value_op = root.get_log_value(inference_type=inf_type)
        else:
            value_op = root.get_value(inference_type=inf_type)

        return spn.initialize_weights(root), value_op
Beispiel #3
0
    def par_sums(inputs,
                 indices,
                 ivs,
                 num_sums,
                 inf_type,
                 log=True,
                 output=None):
        if indices is None:
            inputs = [inputs]
        else:
            inputs = [(inputs, indices)]

        # Generate a single ParSums node, modeling 'num_sums' sum nodes
        # within, connecting it to inputs and ivs
        s = spn.ParSums(*inputs, num_sums=num_sums, ivs=ivs[0])
        # Generate weights of the ParSums node
        s.generate_weights()

        # Connect the ParSums nodes to a single root Sum node and generate
        # its weights
        root = spn.Sum(s)
        root.generate_weights()

        if log:
            value_op = root.get_log_value(inference_type=inf_type)
        else:
            value_op = root.get_value(inference_type=inf_type)

        return spn.initialize_weights(root), value_op
Beispiel #4
0
    def test_group_initialization(self):
        """Group initialization of weights nodes"""
        v1 = spn.IVs(num_vars=1, num_vals=2)
        v2 = spn.IVs(num_vars=1, num_vals=4)
        s1 = spn.Sum(v1)
        s1.generate_weights([0.2, 0.3])
        s2 = spn.Sum(v2)
        s2.generate_weights(5)
        p = spn.Product(s1, s2)
        init = spn.initialize_weights(p)

        with tf.Session() as sess:
            sess.run([init])
            val1 = sess.run(s1.weights.node.get_value())
            val2 = sess.run(s2.weights.node.get_value())
            val1_log = sess.run(tf.exp(s1.weights.node.get_log_value()))
            val2_log = sess.run(tf.exp(s2.weights.node.get_log_value()))

        self.assertEqual(val1.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val2.dtype, spn.conf.dtype.as_numpy_dtype())
        np.testing.assert_array_almost_equal(val1, [0.4, 0.6])
        np.testing.assert_array_almost_equal(val2, [0.25, 0.25, 0.25, 0.25])
        self.assertEqual(val1_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val2_log.dtype, spn.conf.dtype.as_numpy_dtype())
        np.testing.assert_array_almost_equal(val1_log, [0.4, 0.6])
        np.testing.assert_array_almost_equal(val2_log,
                                             [0.25, 0.25, 0.25, 0.25])
Beispiel #5
0
 def test_mpe_value(self):
     """Calculation of SPN MPE value"""
     # Generate SPN
     model = spn.Poon11NaiveMixtureModel()
     model.build()
     # Set default inference type for each node
     model.root.set_inference_types(spn.InferenceType.MPE)
     # Get values
     init = spn.initialize_weights(model.root)
     val_mpe = model.root.get_value(inference_type=spn.InferenceType.MPE)
     val_default = model.root.get_value()
     val_log_mpe = model.root.get_log_value(
         inference_type=spn.InferenceType.MPE)
     val_log_default = model.root.get_log_value()
     with self.test_session() as sess:
         init.run()
         out_default = sess.run(
             val_default, feed_dict={model.latent_indicators: model.feed})
         out_mpe = sess.run(val_mpe,
                            feed_dict={model.latent_indicators: model.feed})
         out_log_default = sess.run(
             tf.exp(val_log_default),
             feed_dict={model.latent_indicators: model.feed})
         out_log_mpe = sess.run(
             tf.exp(val_log_mpe),
             feed_dict={model.latent_indicators: model.feed})
     # Check joint probabilities
     np.testing.assert_array_almost_equal(out_default,
                                          model.true_mpe_values)
     np.testing.assert_array_almost_equal(out_mpe, model.true_mpe_values)
     np.testing.assert_array_almost_equal(out_log_default,
                                          model.true_mpe_values)
     np.testing.assert_array_almost_equal(out_log_mpe,
                                          model.true_mpe_values)
Beispiel #6
0
    def test_mpe_state(self):
        # Generate SPN
        model = spn.Poon11NaiveMixtureModel()
        model.build()
        # Add ops
        init = spn.initialize_weights(model.root)
        mpe_state_gen = spn.MPEState(
            value_inference_type=spn.InferenceType.MPE, log=False)
        mpe_state_gen_log = spn.MPEState(
            value_inference_type=spn.InferenceType.MPE, log=True)
        latent_indicators_state, = mpe_state_gen.get_state(
            model.root, model.latent_indicators)
        latent_indicators_state_log, = mpe_state_gen_log.get_state(
            model.root, model.latent_indicators)
        # Run
        with self.test_session() as sess:
            init.run()
            out = sess.run(latent_indicators_state,
                           feed_dict={model.latent_indicators: [[-1, -1]]})
            out_log = sess.run(latent_indicators_state_log,
                               feed_dict={model.latent_indicators: [[-1, -1]]})

        # For now we only compare the actual MPE state for input IndicatorLeaf -1
        np.testing.assert_array_equal(out.ravel(), model.true_mpe_state)
        np.testing.assert_array_equal(out_log.ravel(), model.true_mpe_state)
Beispiel #7
0
    def par_sums(inputs,
                 indices,
                 latent_indicators,
                 num_sums,
                 inf_type=None,
                 log=True,
                 output=None):
        if indices is None:
            inputs = [inputs]
        else:
            inputs = [(inputs, indices)]

        # Generate a single ParallelSums node, modeling 'num_sums' sum nodes
        # within, connecting it to inputs and latent_indicators
        s = spn.ParallelSums(*inputs,
                             num_sums=num_sums,
                             latent_indicators=latent_indicators[-1])
        # Generate weights of the ParallelSums node
        weights = s.generate_weights()

        # Connect the ParallelSums nodes to a single root Sum node and generate
        # its weights
        root = spn.Sum(s)
        root.generate_weights()

        if log:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
        else:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type,
                                       log=False)

        mpe_path_gen.get_mpe_path(root)
        path_op = [mpe_path_gen.counts[weights]]
        return spn.initialize_weights(root), path_op
Beispiel #8
0
    def test_withparams_initrandom(self):
        # Build an SPN
        feed = np.array(list(itertools.product(range(2), repeat=6)))
        model = spn.DiscreteDenseModel(
            num_classes=1, num_decomps=1, num_subsets=3,
            num_mixtures=2, weight_init_value=spn.ValueType.RANDOM_UNIFORM(0, 1))
        root1 = model.build(num_vars=6, num_vals=2)
        init1 = spn.initialize_weights(root1)

        with tf.Session() as sess:
            # Initialize
            init1.run()

            # Save
            path = self.out_path(self.cid() + ".spn")
            saver = spn.JSONSaver(path, pretty=True)
            saver.save(root1, save_param_vals=True)

        # Reset graph
        tf.reset_default_graph()

        with tf.Session() as sess:
            # Load
            loader = spn.JSONLoader(path)
            root2 = loader.load(load_param_vals=True)
            ivs2 = loader.find_node('SampleIVs')
            val_marginal2 = root2.get_value(inference_type=spn.InferenceType.MARGINAL)

            # Check model after loading
            self.assertTrue(root2.is_valid())
            out_marginal2 = sess.run(val_marginal2, feed_dict={ivs2: feed})
            self.assertAlmostEqual(out_marginal2.sum(), 1.0, places=6)

            # Writing graph
            self.write_tf_graph(sess, self.sid(), self.cid())
Beispiel #9
0
    def test_withoutparams_initfixed(self):
        # Build an SPN
        model = spn.Poon11NaiveMixtureModel()
        root1 = model.build()

        # Save
        path = self.out_path(self.cid() + ".spn")
        saver = spn.JSONSaver(path, pretty=True)
        saver.save(root1, save_param_vals=False)

        # Reset graph
        tf.reset_default_graph()

        # Load
        loader = spn.JSONLoader(path)
        root2 = loader.load()
        latent_indicators2 = loader.find_node('IndicatorLeaf')
        init2 = spn.initialize_weights(root2)
        val_mpe2 = root2.get_value(inference_type=spn.InferenceType.MPE)
        val_marginal2 = root2.get_value(inference_type=spn.InferenceType.MARGINAL)

        # Check model after loading
        self.assertTrue(root2.is_valid())
        with self.test_session() as sess:
            init2.run()
            out_marginal2 = sess.run(val_marginal2, feed_dict={latent_indicators2: model.feed})
            out_mpe2 = sess.run(val_mpe2, feed_dict={latent_indicators2: model.feed})
        self.assertAlmostEqual(out_marginal2[np.all(model.feed >= 0, axis=1), :].sum(),
                               1.0, places=6)
        np.testing.assert_array_almost_equal(out_marginal2, model.true_values)
        np.testing.assert_array_almost_equal(out_mpe2, model.true_mpe_values)
Beispiel #10
0
    def poons_multi(inputs,
                    num_vals,
                    num_mixtures,
                    num_subsets,
                    inf_type,
                    log=False,
                    output=None):

        # Build a POON-like network with multi-op nodes
        subsets = [
            spn.ParSums((inputs, list(range(i * num_vals,
                                            (i + 1) * num_vals))),
                        num_sums=num_mixtures) for i in range(num_subsets)
        ]
        products = spn.PermProducts(*subsets)
        root = spn.Sum(products, name="root")

        # Generate dense SPN and all weights in the network
        spn.generate_weights(root)

        # Generate path ops based on inf_type and log
        if log:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
        else:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type,
                                       log=False)

        mpe_path_gen.get_mpe_path(root)
        path_ops = [
            mpe_path_gen.counts[inp]
            for inp in (inputs if isinstance(inputs, list) else [inputs])
        ]
        return root, spn.initialize_weights(root), path_ops
Beispiel #11
0
    def sum(inputs,
            indices,
            latent_indicators,
            num_sums,
            inf_type=None,
            log=False,
            output=None):
        if indices is None:
            inputs = [inputs]
        else:
            inputs = [(inputs, indices)]

        # Generate 'num_sums' Sum nodes, connecting each to inputs and latent_indicators
        s = []
        weights = []
        for i in range(0, num_sums):
            s = s + [spn.Sum(*inputs, latent_indicators=latent_indicators[i])]
            weights = weights + [s[-1].generate_weights()]

        # Connect all sum nodes to a single root Sum node and generate its weights
        root = spn.Sum(*s)
        root.generate_weights()

        if log:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
        else:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type,
                                       log=False)

        mpe_path_gen.get_mpe_path(root)
        path_ops = [mpe_path_gen.counts[w] for w in weights]
        return spn.initialize_weights(root), path_ops
Beispiel #12
0
    def test_withoutparams_initrandom(self):
        # Build an SPN
        feed = np.array(list(itertools.product(range(2), repeat=6)))
        model = spn.DiscreteDenseModel(
            num_classes=1, num_decomps=1, num_subsets=3,
            num_mixtures=2, weight_initializer=tf.initializers.random_uniform(0.0, 1.0))
        root1 = model.build(num_vars=6, num_vals=2)

        # Save
        path = self.out_path(self.cid() + ".spn")
        saver = spn.JSONSaver(path, pretty=True)
        saver.save(root1, save_param_vals=False)

        # Reset graph
        tf.reset_default_graph()

        # Load
        loader = spn.JSONLoader(path)
        root2 = loader.load()
        latent_indicators2 = loader.find_node('SampleIndicatorLeaf')
        init2 = spn.initialize_weights(root2)
        val_marginal2 = root2.get_value(inference_type=spn.InferenceType.MARGINAL)

        # Check model after loading
        self.assertTrue(root2.is_valid())
        with self.test_session() as sess:
            init2.run()
            out_marginal2 = sess.run(val_marginal2, feed_dict={latent_indicators2: feed})
        self.assertAlmostEqual(out_marginal2.sum(), 1.0, places=6)
Beispiel #13
0
    def sums(inputs,
             indices,
             ivs,
             num_sums,
             inf_type=None,
             log=True,
             output=None):
        if indices is None:
            inputs = [inputs for _ in range(num_sums)]
        else:
            inputs = [(inputs, indices) for _ in range(num_sums)]

        # Generate a single Sums node, modeling 'num_sums' sum nodes within,
        # connecting it to inputs and ivs
        s = spn.Sums(*inputs, num_sums=num_sums, ivs=ivs[-1])
        # Generate weights of the Sums node
        weights = s.generate_weights()

        # Connect the Sums nodes to a single root Sum node and generate its weights
        root = spn.Sum(s)
        root.generate_weights()

        if log:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
        else:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type,
                                       log=False)

        mpe_path_gen.get_mpe_path(root)
        path_op = [mpe_path_gen.counts[weights]]
        return spn.initialize_weights(root), path_op
Beispiel #14
0
    def generic_model_test(self, name, root, sample_ivs, class_ivs):
        # Generating weight initializers
        init = spn.initialize_weights(root)

        # Testing validity
        self.assertTrue(root.is_valid())

        # Generating value ops
        v = root.get_value()
        v_log = root.get_log_value()

        # Creating session
        with tf.Session() as sess:
            self.write_tf_graph(sess, self.sid(), self.cid())
            # Initializing weights
            init.run()
            # Computing all values
            feed_samples = list(itertools.product(range(2), repeat=6))
            if class_ivs is not None:
                feed_class = np.array([
                    i for i in range(class_ivs.num_vals)
                    for _ in range(len(feed_samples))
                ]).reshape(-1, 1)
                feed_samples = np.array(feed_samples * class_ivs.num_vals)
                feed_dict = {sample_ivs: feed_samples, class_ivs: feed_class}
            else:
                feed_samples = np.array(feed_samples)
                feed_dict = {sample_ivs: feed_samples}
            out = sess.run(v, feed_dict=feed_dict)
            out_log = sess.run(tf.exp(v_log), feed_dict=feed_dict)

            # Test if partition function is 1.0
            self.assertAlmostEqual(out.sum(), 1.0, places=6)
            self.assertAlmostEqual(out_log.sum(), 1.0, places=6)
Beispiel #15
0
    def test_mpe_path(self):
        # Generate SPN
        model = spn.Poon11NaiveMixtureModel()
        model.build()
        # Add ops
        init = spn.initialize_weights(model.root)
        mpe_path_gen = spn.MPEPath(value_inference_type=spn.InferenceType.MPE,
                                   log=False)
        mpe_path_gen_log = spn.MPEPath(
            value_inference_type=spn.InferenceType.MPE, log=True)
        mpe_path_gen.get_mpe_path(model.root)
        mpe_path_gen_log.get_mpe_path(model.root)
        # Run
        with self.test_session() as sess:
            init.run()
            out = sess.run(mpe_path_gen.counts[model.latent_indicators],
                           feed_dict={model.latent_indicators: model.feed})
            out_log = sess.run(
                mpe_path_gen_log.counts[model.latent_indicators],
                feed_dict={model.latent_indicators: model.feed})

        true_latent_indicators_counts = np.array(
            [[0., 1., 1., 0.], [0., 1., 1., 0.], [0., 1., 0., 1.],
             [1., 0., 1., 0.], [1., 0., 1., 0.], [1., 0., 0., 1.],
             [0., 1., 1., 0.], [0., 1., 1., 0.], [0., 1., 0., 1.]],
            dtype=spn.conf.dtype.as_numpy_dtype)

        np.testing.assert_array_equal(out, true_latent_indicators_counts)
        np.testing.assert_array_equal(out_log, true_latent_indicators_counts)
    def test_gradient_on_dense_spn(self, num_decomps, num_subsets,
                                   num_mixtures, input_dist, num_vars,
                                   num_components, softplus):
        batch_size = 9

        mean_init = np.arange(num_vars * num_components).reshape(
            num_vars, num_components)
        gl = spn.GaussianLeaf(num_vars=num_vars,
                              num_components=num_components,
                              loc_init=mean_init,
                              softplus_scale=softplus)

        gen = spn.DenseSPNGenerator(
            num_decomps=num_decomps,
            num_subsets=num_subsets,
            num_mixtures=num_mixtures,
            node_type=spn.DenseSPNGenerator.NodeType.LAYER,
            input_dist=input_dist)

        root = gen.generate(gl, root_name="root")

        with tf.name_scope("Weights"):
            spn.generate_weights(root,
                                 tf.initializers.random_uniform(0.0, 1.0),
                                 log=True)

        init = spn.initialize_weights(root)

        self.assertTrue(root.is_valid())

        log_val = root.get_log_value()

        spn_grad = spn.Gradient(log=True)

        spn_grad.get_gradients(root)

        mean_grad_custom, var_grad_custom = gl._compute_gradient(
            spn_grad.gradients[gl])

        mean_grad_tf, var_grad_tf = tf.gradients(
            log_val, [gl.loc_variable, gl.scale_variable])

        fd = {gl: np.random.rand(batch_size, num_vars)}

        with self.test_session() as sess:
            sess.run(init)
            mu_grad_custom_val, var_grad_custom_val = sess.run(
                [mean_grad_custom, var_grad_custom], fd)

            mu_grad_tf_val, var_grad_tf_val = sess.run(
                [mean_grad_tf, var_grad_tf], fd)

        self.assertAllClose(mu_grad_custom_val,
                            mu_grad_tf_val,
                            atol=1e-4,
                            rtol=1e-4)
        self.assertAllClose(var_grad_custom_val,
                            var_grad_tf_val,
                            atol=1e-4,
                            rtol=1e-4)
Beispiel #17
0
 def test(values, ivs, weights, feed, output):
     with self.subTest(values=values, ivs=ivs, weights=weights,
                       feed=feed):
         n = spn.Sum(*values, ivs=ivs)
         n.generate_weights(weights)
         op = n.get_value(spn.InferenceType.MARGINAL)
         op_log = n.get_log_value(spn.InferenceType.MARGINAL)
         with tf.Session() as sess:
             spn.initialize_weights(n).run()
             out = sess.run(op, feed_dict=feed)
             out_log = sess.run(tf.exp(op_log), feed_dict=feed)
         np.testing.assert_array_almost_equal(
             out,
             np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
         np.testing.assert_array_almost_equal(
             out_log,
             np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
    def test_discretedense_saving_3class_externallatent_indicators(self):
        model1 = spn.DiscreteDenseModel(
            num_classes=3,
            num_decomps=2,
            num_subsets=3,
            num_mixtures=2,
            input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE,
            num_input_mixtures=None,
            weight_initializer=tf.initializers.random_uniform(0.0, 1.0))
        sample_latent_indicators1 = spn.IndicatorLeaf(num_vars=6, num_vals=2)
        class_latent_indicators1 = spn.IndicatorLeaf(num_vars=1, num_vals=3)
        model1.build(sample_latent_indicators1,
                     class_input=class_latent_indicators1)
        init1 = spn.initialize_weights(model1.root)

        feed_samples = list(itertools.product(range(2), repeat=6))
        feed_class = np.array([
            i for i in range(3) for _ in range(len(feed_samples))
        ]).reshape(-1, 1)
        feed_samples = np.array(feed_samples * 3)

        with self.test_session() as sess:
            # Initialize
            init1.run()

            # Save
            path = self.out_path(self.cid() + ".spn")
            model1.save_to_json(path,
                                pretty=True,
                                save_param_vals=True,
                                sess=sess)

        # Reset graph
        tf.reset_default_graph()

        with self.test_session() as sess:
            # Load
            model2 = spn.Model.load_from_json(path,
                                              load_param_vals=True,
                                              sess=sess)
            self.assertIs(type(model2), spn.DiscreteDenseModel)

            val_marginal2 = model2.root.get_value(
                inference_type=spn.InferenceType.MARGINAL)

            # Check model after loading
            self.assertTrue(model2.root.is_valid())
            out_marginal2 = sess.run(val_marginal2,
                                     feed_dict={
                                         model2.sample_inputs[0].node:
                                         feed_samples,
                                         model2.class_input.node: feed_class
                                     })
            self.assertAlmostEqual(out_marginal2.sum(), 1.0, places=6)

            # Writing graph
            self.write_tf_graph(sess, self.sid(), self.cid())
Beispiel #19
0
 def test(values, latent_indicators, weights, feed, output):
     with self.subTest(values=values,
                       latent_indicators=latent_indicators,
                       weights=weights,
                       feed=feed):
         n = spn.Sum(*values, latent_indicators=latent_indicators)
         n.generate_weights(tf.initializers.constant(weights))
         op = n.get_value(spn.InferenceType.MPE)
         op_log = n.get_log_value(spn.InferenceType.MPE)
         with self.test_session() as sess:
             spn.initialize_weights(n).run()
             out = sess.run(op, feed_dict=feed)
             out_log = sess.run(tf.exp(op_log), feed_dict=feed)
         np.testing.assert_array_almost_equal(
             out, np.array(output,
                           dtype=spn.conf.dtype.as_numpy_dtype()))
         np.testing.assert_array_almost_equal(
             out_log,
             np.array(output, dtype=spn.conf.dtype.as_numpy_dtype()))
Beispiel #20
0
    def test_group_initialization(self):
        """Group initialization of weights nodes"""
        v1 = spn.IVs(num_vars=1, num_vals=2)
        v2 = spn.IVs(num_vars=1, num_vals=4)
        v3 = spn.IVs(num_vars=1, num_vals=2)
        v4 = spn.IVs(num_vars=1, num_vals=2)
        # Sum
        s1 = spn.Sum(v1)
        s1.generate_weights(tf.initializers.constant([0.2, 0.3]))
        s2 = spn.Sum(v2)
        s2.generate_weights(tf.initializers.constant(5))
        # ParSums
        s3 = spn.ParSums(*[v3, v4], num_sums=2)
        s3.generate_weights(
            tf.initializers.constant([0.1, 0.2, 0.3, 0.4, 0.4, 0.3, 0.2, 0.1]))
        s4 = spn.ParSums(*[v1, v2, v3, v4], num_sums=3)
        s4.generate_weights(tf.initializers.constant(2.0))
        # Product
        p = spn.Product(s1, s2, s3, s4)
        init = spn.initialize_weights(p)

        with self.test_session() as sess:
            sess.run([init])
            val1 = sess.run(s1.weights.node.get_value())
            val2 = sess.run(s2.weights.node.get_value())
            val3 = sess.run(s3.weights.node.get_value())
            val4 = sess.run(s4.weights.node.get_value())
            val1_log = sess.run(tf.exp(s1.weights.node.get_log_value()))
            val2_log = sess.run(tf.exp(s2.weights.node.get_log_value()))
            val3_log = sess.run(tf.exp(s3.weights.node.get_log_value()))
            val4_log = sess.run(tf.exp(s4.weights.node.get_log_value()))

        self.assertEqual(val1.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val2.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val3.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val4.dtype, spn.conf.dtype.as_numpy_dtype())
        np.testing.assert_array_almost_equal(val1, [[0.4, 0.6]])
        np.testing.assert_array_almost_equal(val2, [[0.25, 0.25, 0.25, 0.25]])
        np.testing.assert_array_almost_equal(
            val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]])
        np.testing.assert_array_almost_equal(
            val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10])
        self.assertEqual(val1_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val2_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val3_log.dtype, spn.conf.dtype.as_numpy_dtype())
        self.assertEqual(val4_log.dtype, spn.conf.dtype.as_numpy_dtype())
        np.testing.assert_array_almost_equal(val1_log, [[0.4, 0.6]])
        np.testing.assert_array_almost_equal(val2_log,
                                             [[0.25, 0.25, 0.25, 0.25]])
        np.testing.assert_array_almost_equal(
            val3, [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]])
        np.testing.assert_array_almost_equal(
            val4, [[0.1] * 10, [0.1] * 10, [0.1] * 10])
Beispiel #21
0
 def test_marginal_value(self):
     """Calculation of SPN marginal value"""
     # Generate SPN
     model = spn.Poon11NaiveMixtureModel()
     model.build()
     # Set default inference type for each node
     model.root.set_inference_types(spn.InferenceType.MARGINAL)
     # Get values
     init = spn.initialize_weights(model.root)
     val_marginal = model.root.get_value(
         inference_type=spn.InferenceType.MARGINAL)
     val_default = model.root.get_value()
     val_log_marginal = model.root.get_log_value(
         inference_type=spn.InferenceType.MARGINAL)
     val_log_default = model.root.get_log_value()
     with self.test_session() as sess:
         init.run()
         out_default = sess.run(
             val_default, feed_dict={model.latent_indicators: model.feed})
         out_marginal = sess.run(
             val_marginal, feed_dict={model.latent_indicators: model.feed})
         out_log_default = sess.run(
             tf.exp(val_log_default),
             feed_dict={model.latent_indicators: model.feed})
         out_log_marginal = sess.run(
             tf.exp(val_log_marginal),
             feed_dict={model.latent_indicators: model.feed})
     # Check if values sum to 1
     # WARNING: Below does not pass test for places=7 with float32 dtype
     self.assertAlmostEqual(
         out_default[np.all(model.feed >= 0, axis=1), :].sum(),
         1.0,
         places=6)
     self.assertAlmostEqual(
         out_marginal[np.all(model.feed >= 0, axis=1), :].sum(),
         1.0,
         places=6)
     self.assertAlmostEqual(
         out_log_default[np.all(model.feed >= 0, axis=1), :].sum(),
         1.0,
         places=6)
     self.assertAlmostEqual(
         out_log_marginal[np.all(model.feed >= 0, axis=1), :].sum(),
         1.0,
         places=6)
     # Check joint probabilities
     np.testing.assert_array_almost_equal(out_default, model.true_values)
     np.testing.assert_array_almost_equal(out_marginal, model.true_values)
     np.testing.assert_array_almost_equal(out_log_default,
                                          model.true_values)
     np.testing.assert_array_almost_equal(out_log_marginal,
                                          model.true_values)
Beispiel #22
0
    def perm_products(inputs,
                      num_inputs,
                      num_input_cols,
                      num_prods,
                      inf_type,
                      indices=None,
                      log=False,
                      output=None):
        if indices is not None:
            # Create inputs list with indices
            inputs = [[(inp, ind) for inp, ind in zip(inps, inds)]
                      for inps, inds in zip(inputs, indices)]

        if isinstance(inputs,
                      list):  # Is a list of ContVars inputs - Multiple inputs
            # Generate 'len(inputs)' PermProducts nodes, modeling 'n_prods' products
            # within each
            p = [spn.PermProducts(*inps) for inps in inputs]
        else:  # Is a single input of type ContVars - A single input
            num_inputs_array = np.array(num_inputs)
            num_input_cols_array = np.array(num_input_cols)
            num_cols = num_input_cols[0]
            num_vars = int(np.sum(num_inputs_array * num_input_cols_array))

            indices_list = [
                list(range(i, i + num_cols))
                for i in range(0, num_vars, num_cols)
            ]
            num_inputs_cumsum = np.cumsum(num_inputs_array).tolist()
            num_inputs_cumsum.insert(0, 0)

            inputs_list = [[(inputs, inds)
                            for inds in indices_list[start:stop]]
                           for start, stop in zip(num_inputs_cumsum[:-1],
                                                  num_inputs_cumsum[1:])]

            # Generate 'len(inputs)' PermProducts nodes, modeling 'n_prods'
            # products within each, and inputs for each node emination from a
            # commoninput source
            p = [spn.PermProducts(*inps) for inps in inputs_list]

        # Connect all PermProducts nodes to a single root Sum node and generate
        # its weights
        root = spn.Sum(*p)
        root.generate_weights()

        if log:
            value_op = root.get_log_value(inference_type=inf_type)
        else:
            value_op = root.get_value(inference_type=inf_type)

        return spn.initialize_weights(root), value_op
    def sum(inputs, sum_indices, repetitions, inf_type, log=False, ivs=None):
        """ Creates the graph using only Sum nodes """
        sum_nodes = []
        for ind in sum_indices:
            sum_nodes.extend(
                [spn.Sum((inputs, ind)) for _ in range(repetitions)])
        [s.generate_weights() for s in sum_nodes]
        if ivs:
            [s.set_ivs(iv) for s, iv in zip(sum_nodes, ivs)]

        root, value_op = Ops._build_root_and_value(inf_type, log, sum_nodes)

        return spn.initialize_weights(root), value_op
Beispiel #24
0
    def products(inputs,
                 num_inputs,
                 num_input_cols,
                 num_prods,
                 inf_type,
                 indices=None,
                 log=False,
                 output=None):
        p = []
        # Generate 'len(inputs)' Products node, modelling 'n_prods' ∈ 'num_prods'
        # products within each
        for inps, n_inp_cols, n_prods in zip(inputs, num_input_cols,
                                             num_prods):
            num_inputs = len(inps)
            # Create permuted indices based on number and size of inps
            inds = map(int, np.arange(n_inp_cols))
            permuted_inds = list(product(inds, repeat=num_inputs))
            permuted_inds_list = [list(elem) for elem in permuted_inds]
            permuted_inds_list_of_list = []
            for elem in permuted_inds_list:
                permuted_inds_list_of_list.append(
                    [elem[i:i + 1] for i in range(0, len(elem), 1)])

            # Create inputs-list by combining inps and indices
            permuted_inputs = []
            for indices in permuted_inds_list_of_list:
                permuted_inputs.append([tuple(i) for i in zip(inps, indices)])
            permuted_inputs = list(chain.from_iterable(permuted_inputs))

            # Generate a single Products node, modeling 'n_prods' product nodes
            # within, connecting it to inputs
            p = p + [spn.Products(*permuted_inputs, num_prods=n_prods)]

        # Connect all product nodes to a single root Sum node and generate its
        # weights
        root = spn.Sum(*p)
        root.generate_weights()

        if log:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
        else:
            mpe_path_gen = spn.MPEPath(value_inference_type=inf_type,
                                       log=False)

        mpe_path_gen.get_mpe_path(root)
        path_ops = [
            mpe_path_gen.counts[inp]
            for inp in list(chain.from_iterable(inputs))
        ]
        return spn.initialize_weights(root), path_ops
Beispiel #25
0
    def test_discretedense_saving_1class_externalivs(self):
        model1 = spn.DiscreteDenseModel(
            num_classes=1,
            num_decomps=2,
            num_subsets=3,
            num_mixtures=2,
            input_dist=spn.DenseSPNGenerator.InputDist.MIXTURE,
            num_input_mixtures=None,
            weight_init_value=spn.ValueType.RANDOM_UNIFORM(0, 1))
        sample_ivs1 = spn.IVs(num_vars=6, num_vals=2)
        model1.build(sample_ivs1)
        init1 = spn.initialize_weights(model1.root)

        feed_samples = np.array(list(itertools.product(range(2), repeat=6)))

        with tf.Session() as sess:
            # Initialize
            init1.run()

            # Save
            path = self.out_path(self.cid() + ".spn")
            model1.save_to_json(path,
                                pretty=True,
                                save_param_vals=True,
                                sess=sess)

        # Reset graph
        tf.reset_default_graph()

        with tf.Session() as sess:
            # Load
            model2 = spn.Model.load_from_json(path,
                                              load_param_vals=True,
                                              sess=sess)
            self.assertIs(type(model2), spn.DiscreteDenseModel)

            val_marginal2 = model2.root.get_value(
                inference_type=spn.InferenceType.MARGINAL)

            # Check model after loading
            self.assertTrue(model2.root.is_valid())
            out_marginal2 = sess.run(
                val_marginal2,
                feed_dict={model2.sample_inputs[0].node: feed_samples})
            self.assertAlmostEqual(out_marginal2.sum(), 1.0, places=6)

            # Writing graph
            self.write_tf_graph(sess, self.sid(), self.cid())
Beispiel #26
0
    def generic_dense_test(self, name, num_decomps, num_subsets, num_mixtures,
                           input_dist, num_input_mixtures):
        """A generic test for DenseSPNGenerator."""
        v1 = spn.IVs(num_vars=3, num_vals=2, name="IVs1")
        v2 = spn.IVs(num_vars=3, num_vals=2, name="IVs2")

        gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
                                    num_subsets=num_subsets,
                                    num_mixtures=num_mixtures,
                                    input_dist=input_dist,
                                    num_input_mixtures=num_input_mixtures)

        # Generating SPN
        root = gen.generate(v1, v2)

        # Generating random weights
        with tf.name_scope("Weights"):
            spn.generate_weights(root,
                                 tf.initializers.random_uniform(0.0, 1.0))

        # Generating weight initializers
        init = spn.initialize_weights(root)

        # Testing validity
        self.assertTrue(root.is_valid())

        # Generating value ops
        v = root.get_value()
        v_log = root.get_log_value()

        # Creating session
        with self.test_session() as sess:
            # Initializing weights
            init.run()
            # Computing all values
            feed = np.array(list(itertools.product(range(2), repeat=6)))
            feed_v1 = feed[:, :3]
            feed_v2 = feed[:, 3:]
            out = sess.run(v, feed_dict={v1: feed_v1, v2: feed_v2})
            out_log = sess.run(tf.exp(v_log),
                               feed_dict={
                                   v1: feed_v1,
                                   v2: feed_v2
                               })
            # Test if partition function is 1.0
            self.assertAlmostEqual(out.sum(), 1.0, places=6)
            self.assertAlmostEqual(out_log.sum(), 1.0, places=6)
            self.write_tf_graph(sess, self.sid(), self.cid())
Beispiel #27
0
 def test_mixed_value(self):
     """Calculation of a mixed MPE/marginal value"""
     # Generate SPN
     model = spn.Poon11NaiveMixtureModel()
     model.build()
     # Set default inference type for each node
     model.root.set_inference_types(spn.InferenceType.MARGINAL)
     model.root.inference_type = spn.InferenceType.MPE
     # Get values
     init = spn.initialize_weights(model.root)
     val_marginal = model.root.get_value(
         inference_type=spn.InferenceType.MARGINAL)
     val_mpe = model.root.get_value(inference_type=spn.InferenceType.MPE)
     val_default = model.root.get_value()
     val_log_marginal = model.root.get_log_value(
         inference_type=spn.InferenceType.MARGINAL)
     val_log_mpe = model.root.get_log_value(
         inference_type=spn.InferenceType.MPE)
     val_log_default = model.root.get_log_value()
     with self.test_session() as sess:
         init.run()
         out_default = sess.run(
             val_default, feed_dict={model.latent_indicators: model.feed})
         out_marginal = sess.run(
             val_marginal, feed_dict={model.latent_indicators: model.feed})
         out_mpe = sess.run(val_mpe,
                            feed_dict={model.latent_indicators: model.feed})
         out_log_default = sess.run(
             tf.exp(val_log_default),
             feed_dict={model.latent_indicators: model.feed})
         out_log_marginal = sess.run(
             tf.exp(val_log_marginal),
             feed_dict={model.latent_indicators: model.feed})
         out_log_mpe = sess.run(
             tf.exp(val_log_mpe),
             feed_dict={model.latent_indicators: model.feed})
     # Check joint probabilities
     true_default = [[0.5], [0.35], [0.15], [0.2], [0.14], [0.06], [0.3],
                     [0.216], [0.09]]
     np.testing.assert_array_almost_equal(out_default, true_default)
     np.testing.assert_array_almost_equal(out_marginal, model.true_values)
     np.testing.assert_array_almost_equal(out_mpe, model.true_mpe_values)
     np.testing.assert_array_almost_equal(out_log_default, true_default)
     np.testing.assert_array_almost_equal(out_log_marginal,
                                          model.true_values)
     np.testing.assert_array_almost_equal(out_log_mpe,
                                          model.true_mpe_values)
Beispiel #28
0
 def build_sumslayer_common(self, feed_dict, input_tuples, ivs, sum_sizes, weights,
                            root_weights):
     sumslayer = spn.SumsLayer(*input_tuples, num_or_size_sums=sum_sizes)
     if ivs:
         ivs_nodes = [sumslayer.generate_ivs()]
         feed_dict[ivs_nodes[0]] = np.stack(ivs, axis=1)
     else:
         ivs_nodes = []
     mask = sumslayer._build_mask()
     weights_padded = np.zeros(mask.size)
     weights_padded[mask.ravel()] = weights
     weight_node = sumslayer.generate_weights(
         initializer=tf.initializers.constant(weights_padded))
     # Connect a single sum to group outcomes
     root = spn.SumsLayer(sumslayer, num_or_size_sums=1)
     root.generate_weights(initializer=tf.initializers.constant(root_weights))
     init = spn.initialize_weights(root)
     return init, ivs_nodes, root, weight_node
    def test_sum_update_1(self):
        child1 = spn.GaussianLeaf(num_vars=1,
                                  num_components=1,
                                  total_counts_init=3,
                                  loc_init=0.0,
                                  scale_init=1.0,
                                  learn_dist_params=True)
        child2 = spn.GaussianLeaf(num_vars=1,
                                  num_components=1,
                                  total_counts_init=7,
                                  loc_init=1.0,
                                  scale_init=4.0,
                                  learn_dist_params=True)
        root = spn.Sum(child1, child2)
        root.generate_weights()

        value_inference_type = spn.InferenceType.MARGINAL
        init_weights = spn.initialize_weights(root)
        learning = spn.EMLearning(root,
                                  log=True,
                                  value_inference_type=value_inference_type,
                                  use_unweighted=True)
        reset_accumulators = learning.reset_accumulators()
        accumulate_updates = learning.accumulate_updates()
        update_spn = learning.update_spn()
        train_likelihood = learning.value.values[root]

        with self.test_session() as sess:
            sess.run(init_weights)
            sess.run(reset_accumulators)
            sess.run(accumulate_updates, {child1: [[0.0]], child2: [[0.0]]})
            sess.run(update_spn)

            child1_n = sess.run(child1._total_count_variable)
            child2_n = sess.run(child2._total_count_variable)

        # equalWeight is true, so update passes the data point to the component
        # with highest likelihood without considering the weight of each component.
        # In this case, N(0|0,1) > N(0|1,4), so child1 is picked.
        # If component weights are taken into account, then child2 will be picked
        # since 0.3*N(0|0,1) < 0.7*N(0|1,4).
        # self.assertEqual(root.n, 11)
        self.assertEqual(child1_n, 4)
        self.assertEqual(child2_n, 7)
Beispiel #30
0
    def poons_multi(inputs, num_vals, num_mixtures, num_subsets, inf_type,
                    log=False, output=None):

        # Build a POON-like network with multi-op nodes
        subsets = [spn.ParSums((inputs, list(range(i*num_vals, (i+1)*num_vals))),
                               num_sums=num_mixtures) for i in range(num_subsets)]
        products = spn.PermProducts(*subsets)
        root = spn.Sum(products, name="root")

        # Generate dense SPN and all weights in the network
        spn.generate_weights(root)

        # Generate value ops based on inf_type and log
        if log:
            value_op = root.get_log_value(inference_type=inf_type)
        else:
            value_op = root.get_value(inference_type=inf_type)

        return root, spn.initialize_weights(root), value_op