def test_make_clique_bipartite(self): G=self.G B=nx.make_clique_bipartite(G) assert_equal(sorted(B.nodes()), [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) H=nx.project_down(B) assert_equal(H.adj,G.adj) H1=nx.project_up(B) assert_equal(H1.nodes(),[1, 2, 3, 4, 5]) H2=nx.make_max_clique_graph(G) assert_equal(H1.adj,H2.adj)
def test_make_clique_bipartite(self): G = self.G B = nx.make_clique_bipartite(G) assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # Project onto the nodes of the original graph. H = nx.project(B, range(1, 12)) assert H.adj == G.adj # Project onto the nodes representing the cliques. H1 = nx.project(B, range(-5, 0)) # Relabel the negative numbers as positive ones. H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) assert sorted(H1) == [1, 2, 3, 4, 5]
def test_make_clique_bipartite(self): G = self.G B = nx.make_clique_bipartite(G) assert_equal(sorted(B), [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # Project onto the nodes of the original graph. H = nx.project(B, range(1, 12)) assert_equal(H.adj, G.adj) # Project onto the nodes representing the cliques. H1 = nx.project(B, range(-5, 0)) # Relabel the negative numbers as positive ones. H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) assert_equal(sorted(H1), [1, 2, 3, 4, 5])
def test_make_clique_bipartite(self): G = self.G B = nx.make_clique_bipartite(G) assert_equal(sorted(B), [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # Project onto the nodes of the original graph. H = nx.project(B, range(1, 12)) assert_equal(H.adj, G.adj) # Project onto the nodes representing the cliques. H1 = nx.project(B, range(-5, 0)) # Relabel the negative numbers as positive ones. H1 = nx.relabel_nodes(H1, dict((-v, v) for v in range(1, 6))) assert_equal(sorted(H1), [1, 2, 3, 4, 5])
def test_make_max_clique_graph(self): """Tests that the maximal clique graph is the same as the bipartite clique graph after being projected onto the nodes representing the cliques. """ G = self.G B = nx.make_clique_bipartite(G) # Project onto the nodes representing the cliques. H1 = nx.project(B, range(-5, 0)) # Relabel the negative numbers as nonnegative ones, starting at # 0. H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) H2 = nx.make_max_clique_graph(G) assert H1.adj == H2.adj
def test_make_max_clique_graph(self): """Tests that the maximal clique graph is the same as the bipartite clique graph after being projected onto the nodes representing the cliques. """ G = self.G B = nx.make_clique_bipartite(G) # Project onto the nodes representing the cliques. H1 = nx.project(B, range(-5, 0)) # Relabel the negative numbers as nonnegative ones, starting at # 0. H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) H2 = nx.make_max_clique_graph(G) assert_equal(H1.adj, H2.adj)
def main(): missing = file("missing.txt", "w") global di global u global v global gay_set global straight_set global gay_cliques global straight_cliques global B graph_file = file(sys.argv[1]) u = pickle.load(graph_file) graph_file.close() for ego in u: if "orientation" in u.node[ego]: if u.node[ego]["orientation"] == 1: gay_set.add(ego) else: straight_set.add(ego) B = nx.make_clique_bipartite(u) for node in gay_set: gay_cliques = gay_cliques | set(B.neighbors(node)) for node in straight_set: straight_cliques = straight_cliques | set(B.neighbors(node)) print "Gay max cliques: %d" % len(gay_cliques) print "Straight max cliques: %d" % len(straight_cliques) print "Intersection: %d" % len(straight_cliques & gay_cliques) for ego in u: if "orientations" in u.node[ego]: B.node[ego]["orientation"] = u.node[ego]["orientation"] for (ego, alter) in B.edges(): B[ego][alter]["embeddedness"] = 1.0 bipartite_output_file = file(sys.argv[2], "w") pickle.dump(B, bipartite_output_file) bipartite_output_file.close()
lvl2.append(graphs_len[i]) # lvl2.append(len(ClG_relabeled.nodes()[i])) #eval # lvl2.append(len(eval(ClG_relabeled.nodes()[i]))) #eval # print 'lvl2' # print lvl2 # print str(" ") # plt.figure() # # nx.draw(ClG_relabeled,pos=posC,font_size=16,with_labels=False,node_size=[v * 100 for v in lvl2],node_color='g') #node_size=[v * 100 for v in lvl2], # for p in posC: # posC[p][1] += 0.04 # nx.draw_networkx_labels(ClG_relabeled,posC) BcG = nx.make_clique_bipartite(G, fpos=True) bottom_nodes, top_nodes = bipartite.sets(BcG) BcG_labels = dict() Bcg_labels = dict() for nd in top_nodes: tem = '[' for cc in nx.all_neighbors(BcG, nd): tem += str(cc) + ', ' tem = tem[:-2] + ']' BcG_labels[nd] = tem Bcg_labels[nd] = tem for nd in bottom_nodes: BcG_labels[nd] = str(nd) # print bottom_nodes # print top_nodes
#!/usr/bin/env python # Funtion: # Filename: import networkx as nx from networkx.algorithms import bipartite import matplotlib.pyplot as plt B = nx.Graph() #添加一个项目101,它有3个参与者:201,202,203 B.add_edge(101, 201) B.add_edge(101, None) B.add_edge(101, 202) B.add_edge(101, 203) #添加一个项目102,它有2个参与者:203,202,2034 B.add_edge(102, 203) B.add_edge(102, 204) NSet = bipartite.sets(B) #将二分图中的两类节点分别提取出来 print('NSet', NSet) Act = nx.project(B, NSet[0]) #向项目节点投影 Actor = nx.project(B, NSet[1]) #向参与者节点投影 print(Act.edges()) #输出 [(101, 102)] print(Actor.edges()) #输出 [(201, 202), (201, 203), (202, 203), (203, 204)] G = nx.make_clique_bipartite(Actor) print(G.edges()) #输出:[(201, -1), (202, -1), (203, -2), (203, -1), (204, -2)] nx.draw(G) plt.show()
# lvl2.append(len(eval(ClG_relabeled.nodes()[i]))) #eval # print 'lvl2' # print lvl2 # print str(" ") # plt.figure() # # nx.draw(ClG_relabeled,pos=posC,font_size=16,with_labels=False,node_size=[v * 100 for v in lvl2],node_color='g') #node_size=[v * 100 for v in lvl2], # for p in posC: # posC[p][1] += 0.04 # nx.draw_networkx_labels(ClG_relabeled,posC) BcG = nx.make_clique_bipartite(G, fpos=True) bottom_nodes, top_nodes = bipartite.sets(BcG) BcG_labels = dict() Bcg_labels = dict() for nd in top_nodes: tem = "[" for cc in nx.all_neighbors(BcG, nd): tem += str(cc) + ", " tem = tem[:-2] + "]" BcG_labels[nd] = tem Bcg_labels[nd] = tem for nd in bottom_nodes: BcG_labels[nd] = str(nd) # print bottom_nodes # print top_nodes
def optimize(self): # observation_set = ["weight", "nnet_outputs"] factors = self.factors all_variables = sorted( set(v for factor in factors for v in factor.names)) factors = [ new_factor for factor in factors for new_factor in factor.factorize() ] adjacency_matrix = np.eye(len(all_variables), dtype=bool) indices_to_factor = [] for factor in factors: for v1 in (v for v in factor.names): for v2 in (v for v in factor.names): adjacency_matrix[all_variables.index(v1), all_variables.index(v2)] = 1 indices_to_factor.append({ "indices": [all_variables.index(var) for var in factor.names], "factor": factor, "assigned": False }) g = nx.from_numpy_matrix(adjacency_matrix) G = nx.make_clique_bipartite(g) cliques = [v for v in G.nodes() if G.nodes[v]['bipartite'] == 0] clique_graph = nx.project(G, cliques) # sort by decreasing number of neighbor cliques mapping = [] new_cliques = [] new_factors = [] not_yet_mapped_names = set(self.names) changed_variables = set() ################################### # Merge ConstraintFactor together # # and compute new variables # ################################### for clique_idx in sorted( clique_graph, key=lambda node_idx: len(clique_graph[node_idx]), reverse=True): # merge clique factors together clique_var_indices = list(G[clique_idx].keys()) clique_factors = [] for ind_fac in indices_to_factor: if set(ind_fac["indices"]) <= set( clique_var_indices) and not ind_fac["assigned"]: ind_fac["assigned"] = True clique_factors.append(ind_fac["factor"]) constraint_factors = [ fac for fac in clique_factors if isinstance(fac, ConstraintFactor) ] non_constraint_factors = [ fac for fac in clique_factors if fac not in constraint_factors ] clique_factors = ([ ConstraintFactor(And( *(fac.expr for fac in constraint_factors))), *non_constraint_factors ] if len(constraint_factors) else non_constraint_factors) new_cliques.append( list( range(len(new_factors), len(new_factors) + len(clique_factors)))) new_factors.extend(clique_factors) for factor in clique_factors: if isinstance(factor, ConstraintFactor): variables_to_group = [ v for v in factor.names if v not in changed_variables ] valid_assignements = torch.unique( factor.get_states(variables_to_group).long(), dim=0).bool() super_variable_name = "/".join(map(str, variables_to_group)) indices_in_input = pd.factorize( [*self.names, *variables_to_group])[0][len(self.names):] mapping.append((super_variable_name, variables_to_group, valid_assignements, indices_in_input)) not_yet_mapped_names -= set(variables_to_group) changed_variables |= set(variables_to_group) for name in sorted(not_yet_mapped_names): indice_in_input = self.names.index(name) mapping.append((name, [name], None, [indice_in_input])) # new_variables.extend(set(all_variables) - changed_variables) factors = [factor.change_variables(mapping) for factor in new_factors] cliques = new_cliques new_cliques = [] new_factors = [] cluster_hints = [] ############################## # Merge HintFactors together # ############################## for clique in cliques: clique_factors = [factors[i] for i in clique] clique_hint_factors = [ fac for fac in clique_factors if isinstance(fac, (HintFactor, ObservationFactor)) and isinstance(fac.fn, Indexer) ] cluster_hints = [] for fac in clique_hint_factors: matching_cluster_hint = next( (cluster_hint for cluster_hint in cluster_hints if can_merge(cluster_hint, fac)), None) if matching_cluster_hint is None: matching_cluster_hint = fac.clone() matching_cluster_hint.mask = matching_cluster_hint.mask.long( ) cluster_hints.append(matching_cluster_hint) else: last_indexers_1 = matching_cluster_hint.fn.indexers[-1] last_indexers_1 = list(last_indexers_1) if isinstance( last_indexers_1, (list, tuple)) else [last_indexers_1] last_indexers_2 = fac.fn.indexers[-1] last_indexers_2 = list(last_indexers_2) if isinstance( last_indexers_2, (list, tuple)) else [last_indexers_2] new_last_indexer = last_indexers_1 + last_indexers_2 matching_cluster_hint.fn = Indexer[(*fac.fn.indexers[:-1], new_last_indexer)] offseted_mask = fac.mask.long() + len(last_indexers_1) offseted_mask[~fac.mask] = 0 matching_cluster_hint.mask = matching_cluster_hint.mask + offseted_mask for cluster_hint in cluster_hints: cluster_hint.mask -= 1 new_factors.extend((fac for fac in clique_factors if fac not in clique_hint_factors)) new_factors.extend(cluster_hints) factors_input_indices = factorize( values=[np.asarray(factor.names) for factor in new_factors], reference_values=[entry[0] for entry in mapping], freeze_reference=True)[0] return CRF(new_factors, mapping, names=self.names, shape=self.shape, factors_input_indices=factors_input_indices)
def main(): missing = file("missing.txt", "w") global di global u global v global gays global straights global gay_cliques global straight_cliques global B global labeled graph_file = file(sys.argv[3]) u = pickle.load(graph_file) graph_file.close() # try w/ clique graph u = nx.make_clique_bipartite(u) for i, j in u.edges(): u[i][j]["embeddedness"] = 1 u = set_orientation_by_file(sys.argv[1], "orientation", u) u = set_orientation_by_file(sys.argv[2], "test_orientation", u) all = u.subgraph([x for x in u.node if "orientation" in u.node[x]]) gay_graph = u.subgraph([ y for y in [x for x in u.node if "orientation" in u.node[x]] if u.node[y]['orientation'] == 1 ]) straight_graph = u.subgraph([ y for y in [x for x in u.node if "orientation" in u.node[x]] if u.node[y]['orientation'] == -1 ]) ''' B = nx.make_clique_bipartite(u) ''' for node in gay_graph: gay_cliques = gay_cliques | set(u.neighbors(node)) for node in straight_graph: straight_cliques = straight_cliques | set(u.neighbors(node)) #for node in straight_cliques & gay_cliques: # print "Share clique: %s" % node # print "Neighbors: %s " % u.neighbors(node) # u.remove_node(node) ################################## # # Invent new node labels that snap will understand # i.e., from 0 to n-1 # put the labeled training nodes first, followed by test nodes, # followed by the rest # # Not that u is not actually relabeled. The labels are applied # when the output file is written. # count = 0 not_labeled = list() labeled = list() test_labeled = list() test_labels = list() labeled_data_for_snap = file("snap_labels.txt", "w") node_untrans = dict() for ego in u: if "orientation" in u.node[ego]: node_trans[ego] = count node_untrans[count] = ego labeled_data_for_snap.write("%d\n" % u.node[ego]["orientation"]) labeled.append(u.node[ego]["orientation"]) count += 1 elif "test_orientation" in u.node[ego]: test_labeled.append(ego) test_labels.append(u.node[ego]["test_orientation"]) else: not_labeled.append(ego) labeled_data_for_snap.close() for ego in test_labeled: node_trans[ego] = count node_untrans[count] = ego count += 1 for ego in not_labeled: node_trans[ego] = count node_untrans[count] = ego count += 1 ###################### # # Output graph with labels and node translations # These are intermediate outputs that may later be # helpful in interpreting the data # pkl_file = open(sys.argv[6], "w") pickle.dump(u, pkl_file) pkl_file.close() #labeled = u.copy() pkl_file = open(sys.argv[7], "w") pickle.dump(node_trans, pkl_file) pkl_file.close() #u = label_by_voting (u) #u = label_by_weighted_voting (u, float(sys.argv[5])) #u = label_by_weighted_voting2 (u, float(sys.argv[5]), test_labeled) u = label_by_weighted_voting3(u, float(sys.argv[5]), test_labeled) #dump_tests (u, test_labeled) #u = label_by_revoting (u, float(sys.argv[5]), test_labeled) #dump_tests (u, test_labeled) write_to_snap(sys.argv[4], u, node_trans, node_untrans, labeled, test_labels, float(sys.argv[5]))