def get_train_data(): train_x_y = [] global flag0_count global flag1_count flag0_count = 0 flag1_count = 0 sess = tf.InteractiveSession() for i in range(1, 10): file_path_community = './0814data/%d/community-standard.txt' % i file_path_network = './0814data/%d/network.txt' % i # print(file_path_network) social_list = cnn_socialNet_read_data.get_standard_network( file_path_community) my_graph = cnn_socialNet_read_data.get_graph(file_path_network) cnn_socialNet_read_data.add_flag_graph(my_graph, social_list) edges = [] for (u, v, flag) in my_graph.edges.data('flag'): # print(u, v, flag) if int(flag) == 0: flag0_count = flag0_count + 1 else: flag1_count = flag1_count + 1 edges.append(((u, v), flag)) for j in range(len(edges)): matrix1, row1, clown1 = cnn_socialNet_deal_data.get_jump1_3dimension_different_size_matrix( my_graph, edges[j][0]) image1 = tf.convert_to_tensor(matrix1) image1 = tf.image.convert_image_dtype(image1, tf.float32) resize_image1 = tf.image.resize_images(image1, [128, 128], method=3) img_numpy1 = resize_image1.eval(session=sess) matrix2, row2, clown2 = cnn_socialNet_deal_data.get_jump2_3dimension_different_size_matrix( my_graph, edges[j][0]) image2 = tf.convert_to_tensor(matrix2) image2 = tf.image.convert_image_dtype(image2, tf.float32) resize_image2 = tf.image.resize_images(image2, [128, 128], method=3) img_numpy2 = resize_image2.eval(session=sess) # print('resize_iamge', img_numpy) # matrix1 = tf.constant(resize_image).eval() # print(edges[j][1]) if int(edges[j][1]) == 1: label = [1, 0] else: label = [0, 1] train_x_y.append((img_numpy1, img_numpy2, label)) sess.close() return train_x_y, flag0_count, flag1_count
def get_train_data(): train_x_y = [] global flag0_count global flag1_count flag0_count = 0 flag1_count = 0 sess = tf.InteractiveSession() file_path_community = './small_data/karate-standard.txt' file_path_network = './small_data/karate-edges.txt' # print(file_path_network) social_list = cnn_socialNet_read_data.get_standard_network( file_path_community) print(social_list) my_graph = cnn_socialNet_read_data.get_graph(file_path_network, split=',') print(my_graph.edges) cnn_socialNet_read_data.add_flag_graph(my_graph, social_list) edges = [] for (u, v, flag) in my_graph.edges.data('flag'): print(u, v, flag) if int(flag) == 0: flag0_count = flag0_count + 1 else: flag1_count = flag1_count + 1 edges.append(((u, v), flag)) for j in range(len(edges)): matrix, row, clown = cnn_socialNet_deal_data.get_jump1_3dimension_different_size_matrix( my_graph, edges[j][0]) image = tf.convert_to_tensor(matrix) image = tf.image.convert_image_dtype(image, tf.float32) resize_image = tf.image.resize_images(image, [128, 128], method=3) img_numpy = resize_image.eval(session=sess) # print('resize_iamge', img_numpy) # matrix1 = tf.constant(resize_image).eval() # print(edges[j][1]) if int(edges[j][1]) == 1: label = [1, 0] else: label = [0, 1] train_x_y.append((img_numpy, label)) sess.close() return train_x_y, flag0_count, flag1_count
import networkx as nx import cnn_socialNet_read_data file_path = './0814data/1/community-standard.txt' test_file_path = './0814data/test_community.txt' social_list = cnn_socialNet_read_data.get_standard_network(test_file_path) print(social_list) # 测试获取网络 test_network = './0814data/test_nodes.txt' network_file_path = '0814data/1/network.txt' test_G = cnn_socialNet_read_data.get_graph(test_network) print(test_G.edges()) # 测试为网络中每条边添加flag属性 edges = [] cnn_socialNet_read_data.add_flag_graph(test_G, social_list) for (u, v, flag) in test_G.edges.data('flag'): print(u, v, flag) edges.append(((u, v), int(flag))) # 划分社区算法 visited = {} def breadth_first_search(root=None): queue = [] social = [] nodes = test_G.nodes def bfs(first_node): order = [first_node]
A = np.array([12,15,19,21,26,33,38,41,42,45,48,55,58,59,60,61,71,76,80,82,83,89,102,104,109,110,117]) B = np.array([1,193,121,48,58,41,104,82,55,26,109,117,60,102,42,83,110,71,89,33,38,80,21,76,59,61,15,45]) # A = np.array([3,5,6,7],[1,2]) # B = np.array([3,5,7],[1,2,6]) # print('NMI:', NMI(A, B)) print('P:', precision(A, B)) print('R:', recall(A, B)) print('F1:',f1(A, B)) community1 = [['1','2','3','4','5','6','7'], ['8','9','10','11','12','13'], ['14','15','16','17','18']] community2 = [['1', '2', '3', '4', '5', '7'], ['8', '9', '10', '11', '12', '13'], ['14', '15', '16', '17', '18','6']] community3 = [['1', '2', '3', '4', '5', '7'], ['8', '9', '10', '11', '13'], ['14', '15', '16', '17', '18', '6', '12']] test_network = './0814data/test_nodes.txt' # network_file_path = '0814data/1/network.txt' test_G = cnn_socialNet_read_data.get_graph(test_network) print('Q:',Q1(community1, test_G)) print('Q:', Q1(community2, test_G)) print('Q:', Q1(community3, test_G)) karate_comm = [['1', '2', '3', '4', '5', '6', '7', '8', '11', '12', '13', '14', '17', '18', '20', '22'], ['9', '10', '15', '16', '19', '21', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34']] karate_network = './small_data/karate-edges.txt' karate_G = cnn_socialNet_read_data.get_graph(karate_network,split=',') print('karate_Q:',Q1(karate_comm,karate_G)) comm1 = ['1', '2', '3', '4', '5', '6', '7'] # 13/(13+2) = 0.86 comm2 = ['8', '9', '10', '11', '12', '13'] # 9/(9+2) = 0.81 comm3 = ['14', '15', '16', '17', '18'] # 8/(8+2) = 0.8 print('局部模块度:', R(comm1, test_G)) print('局部模块度:', R(comm2, test_G))
else: virtual_social.append(comm) matrix_r = [[0 for i in range(len(real_social))] for i in range(len(virtual_social))] for i in range(len(virtual_social)): for j in range(len(real_social)): merge_com = virtual_social[i] + real_social[j] virtual_comm = virtual_social[i] virtual_r = evaluation.R(virtual_comm, G) merge_r = evaluation.R(merge_com, G) data_r = merge_r - virtual_r matrix_r[i][j] = data_r print(matrix_r) print(len(matrix_r)) for m in range(len(matrix_r)): row_list = matrix_r[m] row = row_list.index(max(row_list)) print(row_list) print(row) real_social[row] + virtual_social[m] #real_social[matrix_r[m].index(max(matrix_r[m]))]+(virtual_social[m]) #print(matrix_r[m]) return real_social football_network = './small_data/football-edges.txt' G = cnn_socialNet_read_data.get_graph(football_network, split=',') print(merge_community(social_list, G)) #print(len(merge_community(social_list, G))) #print('nmi:',nmi_test.nmi())
bout = weight_variable([classnum]) # out = tf.matmul(dropf, Wout) + bout out = tf.add(tf.matmul(dropf, Wout), bout) return out ''' 1、构造图 2、将图的每条边转化为图片,根据训练好的模型识别每条边的标签 3、简化图:去掉识别为社区间的边,构成新的简化图 4、重复上面的1、2步骤,直到Q参数稳定 ''' # 1:构造图 file_network = './0814data/test_nodes.txt' G = cnn_socialNet_read_data.get_graph(file_network) # print(G.edges()) def get_data(g): sess = tf.InteractiveSession() edges = [] data = [] for u, v in g.edges(): edges.append((u, v)) for j in range(len(edges)): matrix, row, clown = cnn_socialNet_deal_data.get_jump1_3dimension_different_size_matrix(g, edges[j]) image = tf.convert_to_tensor(matrix) image = tf.image.convert_image_dtype(image, tf.float32) resize_image = tf.image.resize_images(image, [128, 128], method=3) img_numpy = resize_image.eval(session=sess)
print(G.edges()) a = tf.Variable(3) print(a) # 测试获取标准社区数据 file_path = './0814data/1/community-standard.txt' test_file_path = './0814data/test_community.txt' social_list = cnn_socialNet_read_data.get_standard_network(file_path) print(social_list) # 测试获取网络 test_network = './0814data/test_nodes.txt' network_file_path = '0814data/1/network.txt' test_G = cnn_socialNet_read_data.get_graph(network_file_path) print(test_G.edges()) # 测试为网络中每条边添加flag属性 edges = [] cnn_socialNet_read_data.add_flag_graph(test_G, social_list) for (u, v, flag) in test_G.edges.data('flag'): # print(u, v, flag) edges.append(((u, v), int(flag))) # 获取每条边的矩阵 train_x = [] for i in range(len(edges)): matrix, row, clown = cnn_socialNet_deal_data.get_jump1_3dimension_different_size_matrix(test_G, edges[i][0]) train_x.append((matrix, edges[i][1], edges[i][0])) # print("matrix", matrix)