def time_align_visualize(alignments, time, y, namespace='time_align'): plt.figure() heat = np.flip(alignments + alignments.T + np.eye(alignments.shape[0]), axis=0) sns.heatmap(heat, cmap="YlGnBu") plt.savefig(namespace + '_heatmap.svg') G = nx.from_numpy_matrix(alignments) G = nx.maximum_spanning_tree(G) pos = {} for i in range(len(G.nodes)): pos[i] = np.array([time[i], y[i]]) mst_edges = set(nx.maximum_spanning_tree(G).edges()) weights = [ G[u][v]['weight'] if (not (u, v) in mst_edges) else 8 for u, v in G.edges() ] plt.figure() nx.draw(G, pos, edges=G.edges(), width=10) plt.ylim([-1, 1]) plt.savefig(namespace + '.svg')
def getFullGraphs(greenw, edges, wnodes, unitWeights=False, nocolor=False): #oWordMap G = nx.Graph() for w in wnodes: G.add_node(w) for e in edges: #add all edges, use sum of both weights weight = edges[e] if weight == 0: continue nodes = e.split(esep) eid2 = nodes[1] + esep + nodes[0] if eid2 in edges: weight += edges[eid2] edges[eid2] = 0 edges[e] = 0 #G.add_edge(oWordMap.get(nodes[0],nodes[0]), oWordMap.get(nodes[1],nodes[1]), weight=weight) G.add_edge(nodes[0], nodes[1], weight=1 if unitWeights else weight) #keep large graph components #Gcomp= sorted(, key=len,reverse=True) Gcomp = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True) if not len(Gcomp): return None Gc = Gcomp[0] Gc = nx.maximum_spanning_tree(Gc) if len(Gcomp) > 1: for g in Gcomp[1:4]: if len(g) > 0.1 * len(Gcomp[0]) and len(g) > 10: Gc = nx.compose(Gc, nx.maximum_spanning_tree( g)) #keep graph if at least 10% of largest component return getStyledGraph(Gc, greenw, wnodes, nocolor=nocolor, tree=True) #oWordMap
def capture_results(A, X, name): store = dict( # X=X, ) ### Un-Supervised Metric (knee-finding heuristic) ### plt.figure() knee, A_thres = all_thres(X, pct_thres=None, plot=True) # find knee plt.savefig(tmp / f'{name}_knee.png') ex.add_artifact(tmp / f'{name}_knee.png') graph_compare(A, G, A_thres, name, 'knee') f_knee = metrics.fbeta_score(A.flatten(), A_thres.flatten(), 1.) print(f'{name} - Knee F_1 = {f_knee:.3f}') # get knee-based fscore unsup = dict( knee=knee, # X_knee = A_thres, fscore_knee=f_knee) store['unsupervised'] = unsup ### Supervised Optimum (Best F1-Score) ### p_, r_, t_ = precision_recall_curve(A.flatten(), X.flatten()) aps_ = average_precision_score(A.flatten(), X.flatten()) f_ = 2 * p_[:-1] * r_[:-1] / (p_[:-1] + r_[:-1]) ts_ = t_[np.nanargmax(f_)] # best threshold for f1-score print(f'{name} - Opt. F_1 = {np.nanmax(f_):.3f}' ) # get knee-based fscore B_thres = np.where(X >= ts_, 1., 0.) graph_compare(A, G, B_thres, name, 'f-score') sup = dict( # X_opt=B_thres, precision=p_, recall=r_, thres=t_, fscores=np.where(np.isnan(f_), 0, f_), thres_opt=ts_, fscore_opt=np.nanmax(f_), aps=aps_, ) store['supervised'] = sup T = nx.maximum_spanning_tree(G) pathfind = nx.to_numpy_array(nx.maximum_spanning_tree(nx.Graph(X))) C_thres = np.where(pathfind > 0, 1, 0) f_pf = metrics.fbeta_score( nx.to_numpy_array(T).flatten(), C_thres.flatten(), 1.) graph_compare(A, T, C_thres, name, 'pathfinder') pf = dict( # X_pf = C_thres, fscore_pf=f_pf, ) store['pathfinder'] = pf # pprint.pprint(store) return store
def test_prim_maximum_spanning_tree_edges_specify_weight(self): G = nx.Graph() G.add_edge(1, 2, weight=1, color="red", distance=7) G.add_edge(1, 3, weight=30, color="blue", distance=1) G.add_edge(2, 3, weight=1, color="green", distance=1) G.add_node(13, color="purple") G.graph["foo"] = "bar" T = nx.maximum_spanning_tree(G, algorithm="prim") assert_equal(sorted(T.nodes()), [1, 2, 3, 13]) assert_equal(sorted(T.edges()), [(1, 2), (1, 3)]) T = nx.maximum_spanning_tree(G, weight="distance", algorithm="prim") assert_equal(sorted(T.edges()), [(1, 2), (1, 3)]) assert_equal(sorted(T.nodes()), [1, 2, 3, 13])
def test_prim_maximum_spanning_tree_edges_specify_weight(self): G = nx.Graph() G.add_edge(1, 2, weight=1, color='red', distance=7) G.add_edge(1, 3, weight=30, color='blue', distance=1) G.add_edge(2, 3, weight=1, color='green', distance=1) G.add_node(13, color='purple') G.graph['foo'] = 'bar' T = nx.maximum_spanning_tree(G, algorithm='prim') assert_equal(sorted(T.nodes()), [1, 2, 3, 13]) assert_equal(sorted(T.edges()), [(1, 2), (1, 3)]) T = nx.maximum_spanning_tree(G, weight='distance', algorithm='prim') assert_equal(sorted(T.edges()), [(1, 2), (1, 3)]) assert_equal(sorted(T.nodes()), [1, 2, 3, 13])
def mstree_plot(A_thres, title=None, ax=None): """plot maximum spanning tree with layered edges for viz""" if ax is None: ax = plt.gca() G = nx.from_pandas_adjacency(A_thres, create_using=nx.Graph) G = nx.convert_node_labels_to_integers(G, label_attribute='item') D = nx.maximum_spanning_tree(G) nontree_edges = nx.from_numpy_array( nx.to_numpy_array(G) - nx.to_numpy_array(D) > 0, create_using=nx.Graph) pos = nx.layout.kamada_kawai_layout(D) if title is not None: ax.set_title(title) draw_G(D, pos, fp=nontree_edges, withlabels=True, font_size=8., font_family='serif', legend=False, ax=ax, node_size=20.) # print(f'C_β = {nx.average_clustering(G):.2f}') ax.axis('off') ax.set_clip_on(False)
def test_kruskal_maximum_spanning_tree_disconnected(self): G = nx.Graph() G.add_path([1, 2]) G.add_path([10, 20]) T = nx.maximum_spanning_tree(G, algorithm="kruskal") assert_equal(sorted(map(sorted, T.edges())), [[1, 2], [10, 20]]) assert_equal(sorted(T.nodes()), [1, 2, 10, 20])
def test_prim_maximum_spanning_tree_disconnected(self): G = nx.Graph() G.add_edge(1, 2) G.add_edge(10, 20) T = nx.maximum_spanning_tree(G, algorithm='prim') assert_equal(sorted(map(sorted, T.edges())), [[1, 2], [10, 20]]) assert_equal(sorted(T.nodes()), [1, 2, 10, 20])
def naive_dij(c): all_students = list(range(1, c.students + 1)) non_home = list(range(1, c.home)) + list(range(c.home + 1, c.v + 1)) # c.scout(random.choice(non_home), all_students) G = c.G # for _ in range(100): # u, v = random.choice(list(c.G.edges())) # c.remote(u, v) MST = nx.maximum_spanning_tree(G) # 找到所有MST的 # query_student = all_students[:len(all_students)//2] query_student = all_students query_total = [c.scout(vertex, query_student) for vertex in non_home] query_result = [] # print(non_home) # print(c.home) for q in query_total: witness = 0 for s in query_student: if q[s]: witness += 1 query_result.append(witness) # print(query_result) # construct the expected bots number_of_bots = c.bots max_num_index_list = [] for _ in range(len(query_result)): i = query_result.index(max(query_result)) if i < c.home - 1: max_num_index_list.append(i + 1) # print("#:", i+1," value: ",query_result[i]) else: max_num_index_list.append(i + 2) query_result[i] = 0 # Begin Digstra robots_remain = c.bots # pred, dist = nx.dijkstra_predecessor_and_distance(G, c.home, cutoff=None, weight='weight') # print("Home: ",c.home) # print(max_num_index_list) for bot_num in max_num_index_list: if robots_remain > 0: path = nx.dijkstra_path(G, bot_num, c.home) # print(path) # print("bots remain",robots_remain) judge = c.remote(bot_num, path[1]) if judge > 0: for i in range(2, len(path)): c.remote(path[i - 1], path[i]) robots_remain -= 1
def backbone_inf(X, resolution=0.5): import networkx as nx conn, G = nearest_neighbor(X, k=15) groups, n_clust = leiden(conn, resolution=resolution) mean_cluster = [[] for x in range(n_clust)] for i, cat in enumerate(np.unique(groups)): idx = np.where(groups == cat)[0] mean_cluster[int(cat)] = np.mean(X[idx, :], axis=0) mst = np.zeros((n_clust, n_clust)) for i in range(n_clust): for j in range(n_clust): mst[i, j] = np.linalg.norm(np.array(mean_cluster[i]) - np.array(mean_cluster[j]), ord=2) G = nx.from_numpy_matrix(-mst) T = nx.maximum_spanning_tree(G, weight='weight', algorithm='kruskal') T = nx.to_numpy_matrix(T) # conn is the adj of the MST. return groups, mean_cluster, T
def DMST(G, dummy_wt=0.1): G_copy = G.copy() edge_map = dict() for e in G_copy.edges(data="weight"): edge_map[(e[0], e[1])] = e[2] G_copy.add_node("-1") for node in G_copy.nodes(): if str(node) == "-1": root = node for node in G_copy.nodes(): if str(node) != "-1": G_copy.add_edge(root, node, weight=dummy_wt) T = nx.maximum_spanning_tree(G_copy.to_undirected()).to_directed() rem = list() attr = dict() for e in T.edges(data="weight"): p1 = (e[0], e[1]) p2 = (e[1], e[0]) if p1 not in edge_map: rem.append(e) elif (p2 not in edge_map) or (edge_map[p1] > edge_map[p2]): attr[p1] = {"weight": edge_map[p1]} else: rem.append(e) nx.set_edge_attributes(T, attr) for r in rem: T.remove_edge(r[0], r[1]) T.remove_node(root) return T
def _generate_graphs(self): ''' Creates the complete graph from the proximity matrix and finds its maximum spanning tree. ''' self._complete_graph = nx.from_pandas_adjacency(self._proximity) self._maxst = nx.maximum_spanning_tree(self._complete_graph)
def tree_decomp(self): clusters = self.clusters graph = nx.empty_graph(len(clusters)) for atom, nei_cls in enumerate(self.atom_cls): if len(nei_cls) <= 1: continue bonds = [c for c in nei_cls if len(clusters[c]) == 2] rings = [c for c in nei_cls if len(clusters[c]) > 4] #need to change to 2 if len(nei_cls) > 2 and len(bonds) >= 2: clusters.append([atom]) c2 = len(clusters) - 1 graph.add_node(c2) for c1 in nei_cls: graph.add_edge(c1, c2, weight=100) elif len(rings) > 2: #Bee Hives, len(nei_cls) > 2 clusters.append([atom]) #temporary value, need to change c2 = len(clusters) - 1 graph.add_node(c2) for c1 in nei_cls: graph.add_edge(c1, c2, weight=100) else: for i, c1 in enumerate(nei_cls): for c2 in nei_cls[i + 1:]: inter = set(clusters[c1]) & set(clusters[c2]) graph.add_edge(c1, c2, weight=len(inter)) n, m = len(graph.nodes), len(graph.edges) assert n - m <= 1 #must be connected return graph if n - m == 1 else nx.maximum_spanning_tree(graph)
def tree_decomp(self): clusters = self.clusters graph = nx.empty_graph(len(clusters)) for atom, nei_cls in enumerate(self.atom_cls): if len(nei_cls) <= 1: continue inter = set(self.clusters[nei_cls[0]]) for cid in nei_cls: inter = inter & set(self.clusters[cid]) assert len(inter) >= 1 if len(nei_cls) > 2 and len( inter) == 1: # two rings + one bond has problem! clusters.append([atom]) c2 = len(clusters) - 1 graph.add_node(c2) for c1 in nei_cls: graph.add_edge(c1, c2, weight=100) else: for i, c1 in enumerate(nei_cls): for c2 in nei_cls[i + 1:]: union = set(clusters[c1]) | set(clusters[c2]) graph.add_edge(c1, c2, weight=len(union)) n, m = len(graph.nodes), len(graph.edges) assert n - m <= 1 #must be connected return graph if n - m == 1 else nx.maximum_spanning_tree(graph)
def test_kruskal_maximum_spanning_tree_disconnected(self): G = nx.Graph() G.add_path([1, 2]) G.add_path([10, 20]) T = nx.maximum_spanning_tree(G, algorithm='kruskal') assert_equal(sorted(map(sorted, T.edges())), [[1, 2], [10, 20]]) assert_equal(sorted(T.nodes()), [1, 2, 10, 20])
def resolve_diamond(self): """Resolve relationships that have a diamond graphs. This method is a pipline to solve diamond graphs by depending on MST to resolve the present cycles and maintains information by copying data from the cut ties. """ self.relationships['weight'] = self.merge_cost() G = nx.from_pandas_edgelist(self.relationships, source='parent_entity', target='child_entity', edge_attr=['weight']) if len(list(nx.cycle_basis(G))) > 0: self.resolve_reference() G = nx.from_pandas_edgelist(self.relationships, source='parent_entity', target='child_entity', edge_attr=['weight']) X = nx.maximum_spanning_tree(G) edges = [x for x in G.edges() if x not in X.edges()] for edge in edges: if edge[0] != edge[1]: self.merge(edge, remove=True)
def maxtree(G, conn): if sufficient(G): J = nx.maximum_spanning_tree(G) st.write("#### Maximum tree") viz.draw(J, conn, cmap=cmap) st.pyplot()
def gerani_paper_arrg_to_aht( graph: nx.MultiDiGraph, max_number_of_nodes: int = 100, weight: str = "moi", alpha_coefficient: float = 0.5, ) -> nx.Graph: logger.info("Generate Aspect Hierarchical Tree based on ARRG") aspects_weighted_page_rank = calculate_weighted_page_rank(graph, "weight") graph = calculate_moi_by_gerani( graph=graph, weighted_page_rank=aspects_weighted_page_rank, alpha_coefficient=alpha_coefficient, ) graph_flatten = merge_multiedges(graph, node_attrib_name=weight, default_node_weight=0) sorted_nodes = sorted( list(aspects_weighted_page_rank.items()), key=lambda node_degree_pair: node_degree_pair[1], reverse=True, ) csv_name = "/tmp/gerani_page_ranks.csv" pd.DataFrame(sorted_nodes, columns=["aspect", weight]).to_csv(csv_name) mlflow.log_artifact(csv_name) top_nodes = list(pluck(0, sorted_nodes[:max_number_of_nodes])) sub_graph = graph_flatten.subgraph(top_nodes) maximum_spanning_tree = nx.maximum_spanning_tree(sub_graph, weight=weight) nx.set_node_attributes(maximum_spanning_tree, dict(sub_graph.nodes.items())) return maximum_spanning_tree
def determine_junctions(g, junction_threshold=100): """" Determine the backbones of the graph with ambiguous nodes being removed """ gmst = nx.maximum_spanning_tree(g, weight="n") junctions = determine_junctions_of_trees(gmst, junction_threshold) return junctions
def show_network( ax, net, topic_category_map, label, loc, size_lookup, color_lookup, norm=2000, norm_2=1.2, layout=nx.kamada_kawai_layout, ec="white", alpha=0.6, ): """ Plots a network visualisation of a topic netwirk """ new_net = net.copy() new_net_2 = nx.maximum_spanning_tree(new_net) # Calculate the layout pos = layout(new_net_2, center=(0.5, 0.5)) # Node size node_s = list( [size_lookup[x]**norm_2 for x in dict(new_net_2.degree).keys()]) # Node colour node_c = [] for x in new_net_2.nodes: if x not in topic_category_map.keys(): node_c.append("white") else: if topic_category_map[x] not in color_lookup.keys(): node_c.append("white") else: c = color_lookup[topic_category_map[x]] node_c.append(c) # Draw the network. There is quite a lot of complexity here nx.draw_networkx_nodes( new_net_2, pos, node_size=node_s, node_color=node_c, cmap="tab20c", alpha=alpha, edgecolors="darkgrey", ax=ax, ) edge_w = [e[2]["weight"] / norm for e in new_net_2.edges(data=True)] nx.draw_networkx_edges(new_net_2, pos, width=edge_w, edge_color=ec, ax=ax, alpha=alpha)
def post_processing_tree(adj): import networkx as nx # deg = np.sum(adj, axis = 1) # norm_adj = 1 / np.sqrt(deg)[:,None] * adj * 1 / np.sqrt(deg)[None,:] G = nx.from_numpy_matrix(adj, create_using=nx.Graph) T = nx.maximum_spanning_tree(G, weight='weight', algorithm='kruskal') T = nx.to_numpy_matrix(T) T = np.where(T != 0, 1, 0) return T
def test_prim_maximum_spanning_tree_attributes(self): G = nx.Graph() G.add_edge(1, 2, weight=1, color='red', distance=7) G.add_edge(2, 3, weight=1, color='green', distance=2) G.add_edge(1, 3, weight=10, color='blue', distance=1) G.add_node(13, color='purple') G.graph['foo'] = 'bar' T = nx.maximum_spanning_tree(G, algorithm='prim') assert_equal(T.graph, G.graph) assert_equal(T.node[13], G.node[13]) assert_equal(T.edge[1][2], G.edge[1][2])
def test_prim_maximum_spanning_tree_attributes(self): G = nx.Graph() G.add_edge(1, 2, weight=1, color="red", distance=7) G.add_edge(2, 3, weight=1, color="green", distance=2) G.add_edge(1, 3, weight=10, color="blue", distance=1) G.add_node(13, color="purple") G.graph["foo"] = "bar" T = nx.maximum_spanning_tree(G, algorithm="prim") assert_equal(T.graph, G.graph) assert_equal(T.node[13], G.node[13]) assert_equal(T.edge[1][2], G.edge[1][2])
def sample_random_view_tree(views, view0, landmarks): G = build_view_graph(views, landmarks) for s, t, data in G.edges(data=True): data['weight'] = random.random() T = nx.maximum_spanning_tree(G) minimum_tree = list(nx.dfs_edges(T, source=view0)) return minimum_tree
def get_keywords(G, top, D=None): G_tmp = nx.maximum_spanning_tree(G) k = min(G.number_of_nodes(), top) B = nx.betweenness_centrality(G_tmp, weight='weight') D = nx.degree(G, weight='weight') if not D else D P = nx.pagerank(G_tmp) val_tmp = {i: B[i] * D[i] * (P[i] + 0.0001) for i in G.nodes()} return ' '.join([ i[0] for i in sorted(val_tmp.items(), key=lambda x: x[1], reverse=True)[:k] ])
def compare_result(gama): import glob import numpy as np import pandas as pd files = glob.glob('./financial_institution2/*') mdate = [] mG1 = [] mG2 = [] mG3 = [] mG4 = [] for k in range(len(files)): mdate.append(files[k][-15:-4]) G = network_from_sinduja_data(k) G1 = planar_maximally_filter(G) G2 = simplification_minimum_lost_connectivity(G, gama) minT = nx.minimum_spanning_tree(G, weight='weight') maxT = nx.maximum_spanning_tree(G, weight='weight') w1 = [ weight1 for u1, v1, weight1 in G1.edges(data='weight', default=1) ] w2 = [ weight2 for u2, v2, weight2 in G2.edges(data='weight', default=1) ] w3 = [ weight3 for u3, v3, weight3 in minT.edges(data='weight', default=1) ] w4 = [ weight4 for u4, v4, weight4 in maxT.edges(data='weight', default=1) ] print len(w1) print len(w2) mG1.append(np.mean(w1)) mG2.append(np.mean(w2)) mG3.append(np.mean(w3)) mG4.append(np.mean(w4)) df = pd.DataFrame( { 'date': mdate, 'PMFG': mG1, 'minimum_lost': mG2, 'min_tree': mG3, 'max_tree': mG4 }, columns=['date', 'PMFG', 'minimum_lost', 'min_tree', 'max_tree']) df['date'] = pd.to_datetime(df.date) df = df.sort_values(by='date') #print df.groupby(df.date.dt.year)['PMFG', 'minimum_lost'].transform('mean') #df = df.groupby([df.date.dt.strftime('%Y')])['PMFG','minimum_lost','min_tree','max_tree'].mean() #df.index.name = 'date' #df.reset_index(level=0, inplace=True) return df
def ML_spanning_tree(data): n = data.shape[1] triu = list(zip(*np.triu_indices(n, k=1))) scores = score_edges(data) G = nx.empty_graph(n) for i in range(n * (n - 1) // 2): G.add_edge( triu[i][0], triu[i][1], weight=scores[i]) T = nx.maximum_spanning_tree(G, algorithm="kruskal") return Graph(n, dol=nx.to_dict_of_lists(T)), scores
def makeG(df_size,df_attr,df_state,price,path): # df_size relates to size of node for example it could be power price # df_attr relates to the power output import numpy as np #G=GfromAEMO(df_size) #23/07 G=GfromAEMO(df_size) G=nx.maximum_spanning_tree(G) G=attributes_one(G,df_size.replace([np.inf, -np.inf], np.nan),df_attr,df_state,price) G=attributes(G) G=attributes_color(G) graphout=graph_build_lga(G,path,False) return G
def fit( self, X: pd.DataFrame, y: pd.Series, *, weights: pd.Series = None ) -> "TreeBayesianNetworkClassifier": if len(X) <= 0: raise ValueError(f"len(X) must be positive, but is {len(X)}") if len(y) != len(X): raise ValueError(f"len(y) must equal len(X), but is {len(y)}") if weights is None: weights = pd.Series(np.ones(len(X))) if len(weights) != len(X): raise ValueError(f"len(weights) must equal len(X), but is {len(weights)}") self.classes_ = unique_labels(y) self.features_ = np.array(X.columns) data = __class__._compose_data(X, y) G = nx.Graph() N = len(data) # add nodes for col in data.columns: sr = data[col] probs = weights.groupby(sr).sum() / weights.sum() labels = unique_labels(sr) G.add_node(col, probs=probs, labels=labels) # add edges for i_f1 in range(len(data.columns) - 1): for i_f2 in range(i_f1 + 1, len(data.columns)): cols = sorted([data.columns[i_f1], data.columns[i_f2]]) contingency = weighted_contingency_matrix(*data[cols], weights) mutual_info = weighted_mutual_info_score(contingency) # compute joint probability distribution nd = reduce(mul, contingency.shape) # arity of the ``*cols`` domain pseudocount = contingency.sum() / nd # for Laplace smoothing probs = (contingency + pseudocount) / ( N + (pseudocount * nd) ) # uses Laplace smoothing df = pd.DataFrame(probs) df.index = G.nodes[cols[0]]["labels"] df.columns = G.nodes[cols[-1]]["labels"] sr = df.stack() sr.index.names = cols G.add_edge(*cols, joint_probs=sr, mutual_info=mutual_info) # extract maximum spanning tree by mutual information T = nx.maximum_spanning_tree(G, weight="mutual_info") arborescence = root_tree(T, root=__class__._Y_COL_PREFIX) self.network_ = arborescence return self
def test_weight_attribute(self): G = nx.Graph() G.add_edge(0, 1, weight=1, distance=7) G.add_edge(0, 2, weight=30, distance=1) G.add_edge(1, 2, weight=1, distance=1) G.add_node(3) T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight='distance') assert_nodes_equal(sorted(T), list(range(4))) assert_edges_equal(sorted(T.edges()), [(0, 2), (1, 2)]) T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight='distance') assert_nodes_equal(sorted(T), list(range(4))) assert_edges_equal(sorted(T.edges()), [(0, 1), (0, 2)])
def build_chow_liu_tree(X, n): """ Build a Chow-Liu tree from the data, X. n is the number of features. The weight on each edge is the negative of the mutual information between those features. The tree is returned as a networkx object. """ G = nx.Graph() for v in range(n): G.add_node(v) for u in range(v): G.add_edge(u, v, weight=calculate_mutual_information(X, u, v)) T = nx.maximum_spanning_tree(G) return T
def main(): data = get_dataset() G = nx.Graph() nodes = data.columns for i in range(len(nodes)): for j in range(i + 1, len(nodes)): G.add_edge(nodes[i], nodes[j], weight=mi(nodes[i], nodes[j], data)) T = nx.maximum_spanning_tree(G) terminal_nodes = [x for x in T.nodes() if T.degree(x) == 1] root = random.choice(terminal_nodes) print(root) T = nx.bfs_tree(T, root)
def main(): # 1st row: Number of agents;Number of meetings;Number of variables # Open file inputFilename = 'constraint_graphs/dcop_constraint_graph' # inputFilename = 'constraint_graphs/dcop_simple' # inputFilename = 'constraint_graphs/DCOP_Problem_10' input = open(inputFilename, 'r') # Read first line [nrAgents, nrMeetings, nrVars] = u.readLine(input) # Read agents/variables/constraints agents = u.readMeetings(input, nrVars) # Read preferences per agent agents = u.readPreferences(input, agents, nrAgents) # Create graph G = nx.Graph() # Add agents/nodes G = ptree.addNodes(G, agents) # Add edges and keep track of back-edges [G, back_edges_candidates] = ptree.addEdges(G, agents, nrMeetings) # Convert to spanning tree T = nx.maximum_spanning_tree(G) # T = nx.dfs_successors(G, 0) # Create back edges T = ptree.addBackEdges(T, back_edges_candidates) # Print nodes print('----------------ALL NODES----------------') # u.printNodes(T) layout = graphviz_layout(T, prog="dot") edges = T.edges() colors = [T[u][v]['color'] for u, v in edges] #print(list(nx.bfs_edges(T,3))) compute_utils(T, ptree.getLeafNodes(T)) nx.draw(T, layout, edge_color=colors, with_labels=True) plt.show() # Print nodes print('----------------ALL NODES----------------') u.printNodes(T)
def test_kruskal_maximum_spanning_tree_isolate(self): G = nx.Graph() G.add_nodes_from([1, 2]) T = nx.maximum_spanning_tree(G, algorithm="kruskal") assert_equal(sorted(T.nodes()), [1, 2]) assert_equal(sorted(T.edges()), [])
def test_prim_maximum_spanning_tree(self): T = nx.maximum_spanning_tree(self.G, algorithm='prim') assert_equal(T.edges(data=True), self.maximum_spanning_edgelist)
def test_multigraph_keys_tree_max(self): G = nx.MultiGraph() G.add_edge(0, 1, key="a", weight=2) G.add_edge(0, 1, key="b", weight=1) T = nx.maximum_spanning_tree(G) assert_equal([(0, 1, 2)], list(T.edges(data="weight")))
def test_maximum_spanning_tree(self): T = nx.maximum_spanning_tree(self.G, algorithm="kruskal") assert_equal(sorted(T.edges(data=True)), self.maximum_spanning_edgelist)
def test_maximum_tree(self): T = nx.maximum_spanning_tree(self.G, algorithm=self.algo) actual = sorted(T.edges(data=True)) assert_edges_equal(actual, self.maximum_spanning_edgelist)