def test_eval_parametric(self): data = np.array([1, 1, 1, 1, 1, 1, 1], dtype=np.float32).reshape( (1, 7)) spn = (Gaussian(mean=1.0, stdev=1.0, scope=[0]) * Exponential(l=1.0, scope=[1]) * Gamma(alpha=1.0, beta=1.0, scope=[2]) * LogNormal(mean=1.0, stdev=1.0, scope=[3]) * Poisson(mean=1.0, scope=[4]) * Bernoulli(p=0.6, scope=[5]) * Categorical(p=[0.1, 0.2, 0.7], scope=[6])) ll = log_likelihood(spn, data) tf_ll = eval_tf(spn, data) self.assertTrue(np.all(np.isclose(ll, tf_ll))) spn_copy = Copy(spn) tf_graph, data_placeholder, variable_dict = spn_to_tf_graph( spn_copy, data, 1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) tf_graph_to_spn(variable_dict) str_val = spn_to_str_equation(spn) str_val2 = spn_to_str_equation(spn_copy) self.assertEqual(str_val, str_val2)
def tensorflow(): import numpy as np spn = create_SPN() test_data = np.array([1.0, 0.0, 1.0]).reshape(-1, 3) from spn.gpu.TensorFlow import eval_tf lltf = eval_tf(spn, test_data) print("tensorflow ll", lltf, np.exp(lltf)) from spn.algorithms.Inference import log_likelihood from spn.gpu.TensorFlow import optimize_tf optimized_spn = optimize_tf(spn, test_data) lloptimized = log_likelihood(optimized_spn, test_data) print("tensorflow optimized ll", lloptimized, np.exp(lloptimized))
def test_eval_gaussian(self): np.random.seed(17) data = np.random.normal(10, 0.01, size=2000).tolist() + np.random.normal( 30, 10, size=2000).tolist() data = np.array(data).reshape((-1, 10)) data = data.astype(np.float32) ds_context = Context(meta_types=[MetaType.REAL] * data.shape[1], parametric_types=[Gaussian] * data.shape[1]) spn = learn_parametric(data, ds_context) ll = log_likelihood(spn, data) tf_ll = eval_tf(spn, data) self.assertTrue(np.all(np.isclose(ll, tf_ll)))
def test_eval_histogram(self): np.random.seed(17) data = np.random.normal(10, 0.01, size=2000).tolist() + np.random.normal( 30, 10, size=2000).tolist() data = np.array(data).reshape((-1, 10)) data[data < 0] = 0 data = data.astype(int) ds_context = Context(meta_types=[MetaType.DISCRETE] * data.shape[1]) ds_context.add_domains(data) spn = learn_mspn(data, ds_context) ll = log_likelihood(spn, data) tf_ll = eval_tf(spn, data) self.assertTrue(np.all(np.isclose(ll, tf_ll)))
memory = Memory(cachedir="cache", verbose=0, compress=9) @memory.cache def learn(data, ds_context): spn = learn_structure(data, ds_context, get_split_rows_KMeans(), get_split_cols_RDC(), create_histogram_leaf) return spn if __name__ == '__main__': np.random.seed(17) data = np.random.normal(10, 0.01, size=2000).tolist() + np.random.normal(30, 10, size=2000).tolist() data = np.array(data).reshape((-1, 10)) ds_context = Context(meta_types=[MetaType.REAL] * data.shape[1]) ds_context.add_domains(data) spn = learn(data, ds_context) py_ll = likelihood(spn, data, histogram_likelihood) tf_out, time = eval_tf(spn, data, log_space=False, save_graph_path='tfgraph', trace=True) tf_out, time = eval_tf(spn, data, log_space=False, save_graph_path='tfgraph', trace=True) tf_out_log, time2 = eval_tf(spn, data, log_space=True, save_graph_path='tfgraph') print("results are similar for TF and Python?", np.all(np.isclose(py_ll - np.log(tf_out), 0))) print("time in ns", time) print("results are similar for Log TF and Python?", np.all(np.isclose(py_ll - tf_out_log, 0)))
if __name__ == '__main__': add_histogram_inference_support() np.random.seed(17) data = np.random.normal(10, 0.01, size=2000).tolist() + np.random.normal( 30, 10, size=2000).tolist() data = np.array(data).reshape((-1, 10)) data[data < 0] = 0 data = (data * 1).astype(int) ds_context = Context(meta_types=[MetaType.DISCRETE] * data.shape[1]) ds_context.add_domains(data) data[:, 0] = 0 data[:, 1] = 1 spn = learn(data, ds_context) spn = create_histogram_leaf(data[:, 0].reshape((-1, 1)), ds_context, [0], alpha=False, hist_source="kde") * \ create_histogram_leaf(data[:, 1].reshape((-1, 1)), ds_context, [1], alpha=False, hist_source="kde") spn = 0.3 * create_histogram_leaf(data[:, 0].reshape((-1, 1)), ds_context, [0], alpha=False, hist_source="kde") + \ 0.7 * create_histogram_leaf(data[:, 0].reshape((-1, 1)), ds_context, [0], alpha=False, hist_source="kde") py_ll = log_likelihood(spn, data) tf_graph, placeholder = spn_to_tf_graph(spn, data) log_tf_out = eval_tf(tf_graph, placeholder, data) print("results are similar for Log TF and Python?", np.all(np.isclose(py_ll, log_tf_out)))