def print_graph_parameters(G, pathways): # pragma: no cover '''Prints a set of parameters characterizing the graph ''' print('\nGRAPH PARAMETERS') num_paths = len(pathways) print("A total of " + str(num_paths) + " pathways were generated") shortest = get_shortest_path(pathways) longest = get_longest_path(pathways) print("\nThe shortest pathway is length " + str(len(next(iter(shortest))))) print("pathways with this length are " + str(shortest)) print("\nGraph depth is " + str(len(next(iter(longest))))) print("pathways with this length are " + str(longest)) semiconnected = nx.is_semiconnected(G) print('\nIs the graph semiconnected? ' + str(semiconnected)) if semiconnected is False: if len(list(n for n, in_deg in G.in_degree() if in_deg == 0)) > 1: print("You have multiple source facilities") hierarchy = nx.flow_hierarchy(G) print("\nGraph hierarchy is " + "{:.3f}".format(hierarchy)) return
def test_hierarchy_weight(): G = nx.DiGraph() G.add_edges_from([(0, 1, {'weight': .3}), (1, 2, {'weight': .1}), (2, 3, {'weight': .1}), (3, 1, {'weight': .1}), (3, 4, {'weight': .3}), (0, 4, {'weight': .3})]) assert nx.flow_hierarchy(G, weight='weight') == .75
def test_hierarchy_weight(): G = nx.DiGraph() G.add_edges_from([(0, 1, {'weight': .3}), (1, 2, {'weight': .1}), (2, 3, {'weight': .1}), (3, 1, {'weight': .1}), (3, 4, {'weight': .3}), (0, 4, {'weight': .3})]) assert_equal(nx.flow_hierarchy(G, weight='weight'), .75)
def core_network_statistics(G, labels=None, name="example"): rframe = pd.DataFrame(columns=[ "Name", "classes", "nodes", "edges", "degree", "diameter", "connected components", "clustering coefficient", "density", "flow_hierarchy" ]) nodes = len(G.nodes()) edges = len(G.edges()) cc = len(list(nx.connected_components(G.to_undirected()))) try: cc = nx.average_clustering(G.to_undirected()) except: cc = None try: dx = nx.density(G) except: dx = None clustering = None if labels is not None: number_of_classes = labels.shape[1] else: number_of_classes = None node_degree_vector = list(dict(nx.degree(G)).values()) mean_degree = np.mean(node_degree_vector) try: diameter = nx.diameter(G) except: diameter = "intractable" try: flow_hierarchy = nx.flow_hierarchy(G) except: flow_hierarchy = "intractable" point = { "Name": name, "classes": number_of_classes, "nodes": nodes, "edges": edges, "diameter": diameter, "degree": mean_degree, "flow hierarchy": flow_hierarchy, "connected components": cc, "clustering coefficient": clustering, "density": dx } rframe = rframe.append(point, ignore_index=True) return rframe
def hierarchy(self): rslt = {} if self.directed == 'directed': rslt['flow_hierarchy'] = nx.flow_hierarchy(self.graph) rslt['isolates'] = nx.isolates(self.graph) fname_hierarchy = self.DIR + '/' + 'hierarchy.json' with open(fname_hierarchy, "w") as f: json.dump(rslt, f, cls=SetEncoder, indent=2) print(fname_hierarchy)
def main(): path = 'C:/Users/97899/Desktop/N/N_year/' # 各物种环的数量 write = pd.ExcelWriter("C:/Users/97899/Desktop/N/Network/circle21.xls") for year in range(2008, 2009): D = {} path1 = path + "N_" + str(year) + '/Assemb/' + str(year) + '-' + str( 0) + '.txt' Specise_set = LoadDict(path1) path3 = path + "N_" + str(year) + '/Spearman/' + str(year) + '-' + str( 0) + '.txt' Spear_set = LoadDict(path3) for ex in range(1, 2): D[ex] = {} ex = float(ex) path2 = path + "N_" + str(year) + '/CPmat/' + str( year) + '-' + str(ex) + '.txt' CP_mat = LoadDict(path2) if year < 2016: C_mat, Ass = Select_Zuhe(CP_mat, Specise_set[str(ex)], Spear_set[str(ex)]) else: C_mat, Ass = Select_Zuhe(CP_mat, Specise_set[ex], Spear_set[ex]) if np.all(C_mat == 0): D[ex] = {3: -0.15} else: print(C_mat) node_list = Ass # print(node_list) plt.rcParams['axes.unicode_minus'] = False plt.rcParams['font.sans-serif'] = ['SimHei'] G = nx.DiGraph() G.add_nodes_from(node_list) # 添加点a edge_list = get_edge(C_mat, node_list) print(edge_list) G.add_edges_from(edge_list) # 添加边,起点为x,终点为y cyc_sys = list(nx.simple_cycles(G)) print(year, ex) print(cyc_sys) print(nx.flow_hierarchy(G)) D[ex] = sta_sys_three(cyc_sys) if not bool(D[ex]): D[ex] = {3: 0} '''显示图形''' nx.draw(G, pos=nx.circular_layout(G), node_color='lightgreen', edge_color='black', with_labels=True, font_size=10, node_size=3000) plt.show()
def test_hierarchy_weight(): G = nx.DiGraph() G.add_edges_from( [ (0, 1, {"weight": 0.3}), (1, 2, {"weight": 0.1}), (2, 3, {"weight": 0.1}), (3, 1, {"weight": 0.1}), (3, 4, {"weight": 0.3}), (0, 4, {"weight": 0.3}), ] ) assert_equal(nx.flow_hierarchy(G, weight="weight"), 0.75)
def write_graph_info_to_file(G, graph_name=GRAPH_NAME): with open("{}_info".format(graph_name), "w") as report_file: try: report_file.write("Number of nodes: {}\n".format( G.number_of_nodes())) report_file.write("Number of edges: {}\n".format( G.number_of_edges())) write_components_info(G, report_file) write_distance_info(G, report_file) write_dag_info(G, report_file) report_file.write("Is Eulerian: {}\n".format(nx.is_eulerian(G))) report_file.write("Density: {}\n".format(nx.density(G))) report_file.write("Flow hierarchy: {}\n".format( nx.flow_hierarchy(G))) report_file.write("----INFO FROM NETWORKX----\n") report_file.write("{}\n".format(nx.info(G))) except ZeroDivisionError as e: report_file.write("Zero Division: {}".format(e))
def hnx(A, output_exec_time=False): #networkx has a fast built-in func try: import networkx as nx time_start = default_timer() g = nx.DiGraph(A) hdegree = nx.flow_hierarchy(g, weight='weight') time_end = default_timer() time = time_end - time_start except ImportError: print('networkx not found') hdegree = 0 time = 0 pass if output_exec_time: return hdegree, time else: return hdegree
def test_hierarchy_weight(): G = nx.DiGraph() G.add_edges_from([ (0, 1, { "weight": 0.3 }), (1, 2, { "weight": 0.1 }), (2, 3, { "weight": 0.1 }), (3, 1, { "weight": 0.1 }), (3, 4, { "weight": 0.3 }), (0, 4, { "weight": 0.3 }), ]) assert nx.flow_hierarchy(G, weight="weight") == 0.75
def main(): path = 'C:/Users/97899/Desktop/N/' D = {} for year in range(2008, 2021): D[year] = [] # print(year) path1 = path + "N_year/N_" + str(year) + '/Assemb/' + str( year) + '-' + str(0) + '.txt' Specise_set = LoadDict(path1) path3 = path + "N_year/N_" + str(year) + '/Spearman/' + str( year) + '-' + str(0) + '.txt' Spear_set = LoadDict(path3) for ex in range(1, 39): ex = float(ex) path2 = path + "N_year/N_" + str(year) + '/CPmat/' + str( year) + '-' + str(ex) + '.txt' CP_mat = LoadDict(path2) if year < 2016: C_mat, Ass = Select_Zuhe(CP_mat, Specise_set[str(ex)], Spear_set[str(ex)]) else: C_mat, Ass = Select_Zuhe(CP_mat, Specise_set[ex], Spear_set[ex]) if np.all(C_mat == 0): D[year].append(-0.15) else: G_mat, Tra_D = LoadDataSet(C_mat) # C矩阵,有向图矩阵 '''寻找有向图中的环''' dfs(Tra_D, [], 0) '''统计网络中的环数''' # Stasitccircle(ans)[0] # print(ex,Stasitccircle(ans)) plt.rcParams['axes.unicode_minus'] = False plt.rcParams['font.sans-serif'] = ['SimHei'] node_list = Ass G = nx.DiGraph() G.add_nodes_from(node_list) # 添加点a edge_list = get_edge(C_mat, node_list) G.add_edges_from(edge_list) # 添加边,起点为x,终点为y strong = nx.flow_hierarchy(G) D[year].append(strong + 0.01) # strong = (strong/len(Ass))*10 print(year, ex) # print(list(nx.simple_cycles(G))) # if strong > 1: # D[year].append(0) # else: # D[year].append(1-strong) # print(nx.flow_hierarchy(G),node_list,Stasitccircle(ans)) # nx.closeness_centrality(G),节点距离中心系数,节点到其他节点的平均路径的倒数,值越大中心性越高 # nx.flow_hierarchy(G),有向图中不参与循环的边的分数,实现了查找强链接组件的替代方法 # list(nx.simple_cycles(G)) 寻找有向图中的环 # pagerank 结点对网络的影响力大小 # add_path 添加一条路径 '''显示图形''' # nx.draw(G, pos=nx.circular_layout(G), node_color='lightgreen', edge_color='black', with_labels=True, # font_size=10, node_size=3000) # plt.show() # print(D) pd.DataFrame(D).to_excel(path + "Network/flow_hierarchy.xls")
print "min path time: %d s" % min(weights) print "max path time: %d s" % max(weights) print "average browse time: %.2f s" % (float(sum(times)) / len(times)) print "max browse time: %d s" % max(times) print "min browse time: %d s" % min(times) print "average out degree: %.2f" % (float(sum(degrees)) / len(degrees)) print "min out degree: %d" % min(degrees) print "max out degree: %d" % max(degrees) print "average pages browsed: %.2f " % ( float(sum([int(pages[x][0][0]) for x in pages])) / len(pages)) print "co-relateness(degree, first path): %.2f" % pearsonr( degrees, first_paths)[0] print "co-relateness(degree, pages): %.2f" % pearsonr(degrees2, num_pages)[0] print "co-relateness(first path, pages): %.2f" % pearsonr( first_paths2, num_pages)[0] print "flow hierarchy: %.2f" % nx.flow_hierarchy(D) with open("weights.csv", "w") as fout: fout.write("\n".join([str(x) for x in weights])) with open("out_degrees.csv", "w") as fout: fout.write("\n".join([str(x) for x in degrees])) with open("pages.csv", "w") as fout: fout.write("\n".join([pages[x][0][0] for x in pages])) sns.set_style("whitegrid") weights = [w / 3600.0 for w in weights] plt.hist(weights, bins=50, log=True, edgecolor="grey", lw=1, alpha=0.8) plt.xlabel("path time (hour)") plt.ylabel("count") plt.savefig("weights.png", dpi=300)
def sf_complement_flow_hierarchy(g, sg): score = 1 - nx.flow_hierarchy(sg) return score
def test_hierarchy_1(): G = nx.DiGraph() G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 1), (3, 4), (0, 4)]) assert nx.flow_hierarchy(G) == 0.5
def test_hierarchy_tree(): G = nx.full_rary_tree(2, 16, create_using=nx.DiGraph()) assert nx.flow_hierarchy(G) == 1.0
def test_hierarchy_cycle(): G = nx.cycle_graph(5, create_using=nx.DiGraph()) assert nx.flow_hierarchy(G) == 0.0
def main(): path = 'C:/Users/97899/Desktop/N/N_year/' # 各物种环的数量 write = pd.ExcelWriter("C:/Users/97899/Desktop/N/Network/circle20.xls") for year in range(2008, 2021): D = {} path1 = path + "N_" + str(year) + '/Assemb/' + str(year) + '-' + str( 0) + '.txt' Specise_set = LoadDict(path1) path3 = path + "N_" + str(year) + '/Spearman/' + str(year) + '-' + str( 0) + '.txt' Spear_set = LoadDict(path3) for ex in range(1, 39): D[ex] = {} ex = float(ex) path2 = path + "N_" + str(year) + '/CPmat/' + str( year) + '-' + str(ex) + '.txt' CP_mat = LoadDict(path2) if year < 2016: C_mat, Ass = Select_Zuhe(CP_mat, Specise_set[str(ex)], Spear_set[str(ex)]) else: C_mat, Ass = Select_Zuhe(CP_mat, Specise_set[ex], Spear_set[ex]) if np.all(C_mat == 0): D[ex] = {3: -0.15} else: G_mat, Tra_D = LoadDataSet(C_mat) # C矩阵,有向图矩阵 '''寻找有向图中的环''' dfs(Tra_D, [], 0) '''统计网络中的环数''' D[ex] = Stasitccircle(ans)[0] print(Stasitccircle(ans)) # 判断字典是否为空 if not bool(D[ex]): D[ex] = {3: 0} # 返回的两个值以元组的形式存储 ans.clear() # print(str(year) + '年', '第' + str(ex) + '个实验组合', D[year][ex]) node_list = Ass G = nx.DiGraph() G.add_nodes_from(node_list) # 添加点a edge_list = get_edge(C_mat, node_list) G.add_edges_from(edge_list) # 添加边,起点为x,终点为y print(nx.flow_hierarchy(G)) # nx.closeness_centrality(G),节点距离中心系数,节点到其他节点的平均路径的倒数,值越大中心性越高 # nx.flow_hierarchy(G),有向图中不参与循环的边的分数,实现了查找强链接组件的替代方法 # list(nx.simple_cycles(G)) 寻找有向图中的环 # pagerank 结点对网络的影响力大小 # add_path 添加一条路径 '''显示图形''' nx.draw(G, pos=nx.circular_layout(G), node_color='lightgreen', edge_color='black', with_labels=True, font_size=10, node_size=3000) # plt.show() F = pd.DataFrame.from_dict(D, orient='index') F_s = F.sort_index(axis=1).sort_index(axis=0) # 对行列索引排名 F_s.fillna(0, inplace=True) # 对列排名 F_s.to_excel(write, sheet_name=str(year)) write.close() write.save()
def check_cyclic(g, result): for n, subgraph in enumerate(nx.weakly_connected_component_subgraphs(g)): h = nx.flow_hierarchy(subgraph) if h != 1: print nx.simple_cycles(subgraph) write_png(subgraph, os.path.join(result, '%s.png' % n))
def test_hierarchy_1(): G = nx.DiGraph() G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 1), (3, 4), (0, 4)]) assert_equal(nx.flow_hierarchy(G), 0.5)
def test_hierarchy_tree(): G = nx.full_rary_tree(2, 16, create_using=nx.DiGraph()) assert_equal(nx.flow_hierarchy(G), 1.0)
def test_hierarchy_cycle(): G = nx.cycle_graph(5, create_using=nx.DiGraph()) assert_equal(nx.flow_hierarchy(G), 0.0)