def testWeisfeilerLehman(self): wl_state_exp = { "labels": { "0": "wl_0.0", "1": "wl_0.1", "a": "wl_0.2", "b": "wl_0.3", "wl_0.0;in(wl_0.3)": "wl_1.0", "wl_0.0;any(wl_0.2),in(wl_0.2)": "wl_1.1", "wl_0.1;any(wl_0.2),out(wl_0.2,wl_0.3)": "wl_1.2", "wl_0.1;any(wl_0.2),out(wl_0.2)": "wl_1.3", "wl_0.2;in(wl_0.1),out(wl_0.0)": "wl_1.4", "wl_0.2;any(wl_0.0,wl_0.1)": "wl_1.5", "wl_0.3;in(wl_0.1),out(wl_0.0)": "wl_1.6", "wl_1.0;in(wl_1.6)": "wl_2.0", "wl_1.1;any(wl_1.5),in(wl_1.4)": "wl_2.1", "wl_1.2;any(wl_1.5),out(wl_1.4,wl_1.6)": "wl_2.2", "wl_1.3;any(wl_1.5),out(wl_1.4)": "wl_2.3", "wl_1.4;in(wl_1.2),out(wl_1.1)": "wl_2.4", "wl_1.4;in(wl_1.3),out(wl_1.1)": "wl_2.5", "wl_1.5;any(wl_1.1,wl_1.2)": "wl_2.6", "wl_1.5;any(wl_1.1,wl_1.3)": "wl_2.7", "wl_1.6;in(wl_1.2),out(wl_1.0)": "wl_2.8", "wl_2.0;in(wl_2.8)": "wl_3.0", "wl_2.1;any(wl_2.7),in(wl_2.4)": "wl_3.1", "wl_2.1;any(wl_2.6),in(wl_2.5)": "wl_3.2", "wl_2.2;any(wl_2.6),out(wl_2.4,wl_2.8)": "wl_3.3", "wl_2.3;any(wl_2.7),out(wl_2.5)": "wl_3.4", "wl_2.4;in(wl_2.2),out(wl_2.1)": "wl_3.5", "wl_2.5;in(wl_2.3),out(wl_2.1)": "wl_3.6", "wl_2.6;any(wl_2.1,wl_2.2)": "wl_3.7", "wl_2.7;any(wl_2.1,wl_2.3)": "wl_3.8", "wl_2.8;in(wl_2.2),out(wl_2.0)": "wl_3.9" }, "next_labels": { 0: 4, 1: 7, 2: 9, 3: 10 } } hyper_dummy_wl = Hypergraph(example_graphs.gt_dummy_wl) hyper_dummy_wl, wl_state = weisfeiler_lehman.init(hyper_dummy_wl, test_mode=True) i = 1 while True: new_hyper_dummy_wl, wl_state = weisfeiler_lehman.iterate( hyper_dummy_wl, wl_state, i, test_mode=True) if weisfeiler_lehman.is_stable(hyper_dummy_wl, new_hyper_dummy_wl, i): break hyper_dummy_wl = new_hyper_dummy_wl i += 1 self.assertEqual( wl_state_exp, wl_state, "The multi-sets of labels computed by Weisfeiler-Lehman are not correct." )
def testWeisfeilerLehman(self): wl_state_exp = { "labels": { "0": "wl_0.0", "1": "wl_0.1", "a": "wl_0.2", "b": "wl_0.3", "wl_0.0;in(wl_0.3)": "wl_1.0", "wl_0.0;any(wl_0.2),in(wl_0.2)": "wl_1.1", "wl_0.1;any(wl_0.2),out(wl_0.2,wl_0.3)": "wl_1.2", "wl_0.1;any(wl_0.2),out(wl_0.2)": "wl_1.3", "wl_0.2;in(wl_0.1),out(wl_0.0)": "wl_1.4", "wl_0.2;any(wl_0.0,wl_0.1)": "wl_1.5", "wl_0.3;in(wl_0.1),out(wl_0.0)": "wl_1.6", "wl_1.0;in(wl_1.6)": "wl_2.0", "wl_1.1;any(wl_1.5),in(wl_1.4)": "wl_2.1", "wl_1.2;any(wl_1.5),out(wl_1.4,wl_1.6)": "wl_2.2", "wl_1.3;any(wl_1.5),out(wl_1.4)": "wl_2.3", "wl_1.4;in(wl_1.2),out(wl_1.1)": "wl_2.4", "wl_1.4;in(wl_1.3),out(wl_1.1)": "wl_2.5", "wl_1.5;any(wl_1.1,wl_1.2)": "wl_2.6", "wl_1.5;any(wl_1.1,wl_1.3)": "wl_2.7", "wl_1.6;in(wl_1.2),out(wl_1.0)": "wl_2.8", "wl_2.0;in(wl_2.8)": "wl_3.0", "wl_2.1;any(wl_2.7),in(wl_2.4)": "wl_3.1", "wl_2.1;any(wl_2.6),in(wl_2.5)": "wl_3.2", "wl_2.2;any(wl_2.6),out(wl_2.4,wl_2.8)": "wl_3.3", "wl_2.3;any(wl_2.7),out(wl_2.5)": "wl_3.4", "wl_2.4;in(wl_2.2),out(wl_2.1)": "wl_3.5", "wl_2.5;in(wl_2.3),out(wl_2.1)": "wl_3.6", "wl_2.6;any(wl_2.1,wl_2.2)": "wl_3.7", "wl_2.7;any(wl_2.1,wl_2.3)": "wl_3.8", "wl_2.8;in(wl_2.2),out(wl_2.0)": "wl_3.9" }, "next_labels": { 0: 4, 1: 7, 2: 9, 3: 10 } } hyper_dummy_wl = Hypergraph(example_graphs.gt_dummy_wl) hyper_dummy_wl, wl_state = weisfeiler_lehman.init(hyper_dummy_wl, test_mode=True) i = 1 while True: new_hyper_dummy_wl, wl_state = weisfeiler_lehman.iterate(hyper_dummy_wl, wl_state, i, test_mode=True) if weisfeiler_lehman.is_stable(hyper_dummy_wl, new_hyper_dummy_wl, i): break hyper_dummy_wl = new_hyper_dummy_wl i += 1 self.assertEqual(wl_state_exp, wl_state, "The multi-sets of labels computed by Weisfeiler-Lehman are not correct.")
def extract_canon_repr_for_each_wl_iter(hypergraph, wl_iterations=0, wl_state=None, accumulate_wl_results=True): for i in range(wl_iterations + 1): if i == 1: hypergraph, wl_state = weisfeiler_lehman.init(hypergraph, wl_state) if i >= 1: hypergraph, wl_state = weisfeiler_lehman.iterate(hypergraph, wl_state, i) if i == wl_iterations or accumulate_wl_results: canon_str = arnborg_proskurowski.get_canonical_representation(hypergraph) if canon_str == u"Tree-width > 3": # TODO: How to handle graphs with larger tree-width? # for now ignore the graph raise StopIteration yield i, canon_str, wl_state
def extract_features_for_each_wl_iter(hypergraph, wl_iterations=0, wl_state=None, accumulate_wl_shingles=True): raw_features = arnborg_proskurowski.get_reduced_features(hypergraph) # if tw == -1: # # TODO: How to handle graphs with larger tree-width? # # for now collect all possible features # print "The hypergraph has tree-width > 3." for i in range(wl_iterations + 1): if i == 1: hypergraph, wl_state = weisfeiler_lehman.init(hypergraph, wl_state) if i >= 1: # old_hypergraph = hypergraph hypergraph, wl_state = weisfeiler_lehman.iterate(hypergraph, wl_state, i) if i == wl_iterations or accumulate_wl_shingles: new_features = [process_raw_feature(raw_feature, hypergraph) for raw_feature in raw_features] yield i, itertools.chain(*new_features), wl_state