def compare(): G = create_graph() # print(G.degree(1)) # path = nx.dijkstra_path(G, 1, 1200) # print(path) # nx.draw(G) # plt.title("graph") # plt.axis('on') # plt.xticks([]) # plt.yticks([]) # plt.show() dist1 = np.zeros(G.number_of_nodes() + 1) for i in range(1, G.number_of_nodes() + 1): dist1[i] = nx.dijkstra_path_length(G, 2, i) print(np.max(dist1)) print(np.min(dist1)) reader = DataReader() net_array = reader.data_reader() net_distance = NetDistance(net_array) node_dist1 = net_distance.node_distance(2) node_dist1[np.where(node_dist1 == float("inf"))] = 0 print(np.max(node_dist1)) # print(np.where(node_dist == 7)) print(dist1.size, node_dist1.size) print(np.where(dist1 == node_dist1))
def test_delete_node_distance(): reader = DataReader() net_array = reader.data_reader() x_list = [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] for i in x_list: # 删除i比例的节点 nodes_deleted = delete_nodes(net_array, i) net_distance = NetDistance(nodes_deleted) all_dict = net_distance.all_pair_node_distance() distri_array = net_distance.all_pair_lenth_distribution(all_dict) net_distance.write_distance_distribution_to_file(distri_array, file_path="../data/distance/distance_distribution_test_"+str(i)+".json")
''' :param clustering: 每个节点的聚类系数 :param rootPath: 保存文件的根目录 :return: ''' # filename = input("输入保存的文件名:") # file_path = os.path.join(rootPath, filename) file_path = rootPath with open(file_path, 'w') as fp: json.dump(clustering, fp) if __name__ == "__main__": filename = "../data/clustering coefficient.json" data = DataReader() adj_matrix = data.data_reader() # 计算每个节点的聚类系数 my_cal = cal_clustring(adj_matrix) # 计算clustering的最大和最小值 # max_num, ave_num = clu_pro(filename) # print("节点聚类系数最大值{}, 平均值{}".format(max_num, ave_num)) # 保存文件到data/cluster文件夹 # rootPath = '../data/cluster/' rootPath = '../data/cluster/cluster.json' write_cluster_tofile(my_cal, rootPath) # 与nexworkx进行对比
if v not in seen: seen.add(v) for i in np.where(net_array[v] == 1)[0]: if i not in next_level: next_level.append(i) return seen def write_submap_to_file(net_array, file_path="../data/submap/submap_counts.json"): x_list = [0, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] y_dict = dict() for t in range(10): y_list = [] for i in x_list: # 删除i比例的节点 nodes_deleted = delete_nodes(net_array, i) y_list.append(calculate_unicom_submap(nodes_deleted)) y_dict.update({str(t): y_list}) submap_dict = {"x": x_list, "y": y_dict} with open(file_path, 'w') as f: f.write(json.dumps(submap_dict)) if __name__ == "__main__": reader = DataReader() net_array = reader.data_reader() write_submap_to_file(net_array)