Exemplo n.º 1
0
    def main(self, filename):

        # #拿到图
        initG = commons.get_networkByFile(filename)
        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 1)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG, T = commons.propagation1(max_sub_graph, source_list)
        infectG1, T = commons.propagation1(max_sub_graph, source_list)
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        # 将在这里进行单源测试。

        subinfectG1 = commons.get_subGraph_true(infectG1)

        # 多个观察点
        result_node = self.mutiple_Observation(infectG, subinfectG,
                                               subinfectG1, source_list[0])

        print('真实源是', source_list[0])
        print('预测源是', result_node[0])
        distance = nx.shortest_path_length(subinfectG,
                                           source=source_list[0],
                                           target=result_node[0])
        print('结果是', distance)
        return distance
Exemplo n.º 2
0
    def delete_high_betweenness_edge_centrality_second(self, infectG):
        subinfectG = commons.get_subGraph_true(infectG)
        # 根据中介性分层然后删除。

        commons_node_list = []
        one_subgraph = None
        two_subgraph = None
        commons_node_list_copy = []
        flag = 1
        while flag:
            sort_list = Partion_common.get_layer_edge_between(subinfectG)
            print('sort_list', sort_list)
            print('sort_list[0][0][0]', sort_list[0][0][0])
            subinfectG.remove_edge(sort_list[0][0][0], sort_list[0][0][1])
            commons_node_list.append(sort_list[0][0][0])
            commons_node_list.append(sort_list[0][0][1])
            # 重新计算一下那个中介
            one_subgraph_nodelist, two_subgraph_nodelist = self.judge_two_subgraph2(
                subinfectG)
            print('one_subgraph', one_subgraph_nodelist)
            if one_subgraph_nodelist and two_subgraph_nodelist:
                one_subgraph_nodelist, two_subgraph_nodelist = one_subgraph_nodelist, two_subgraph_nodelist
                commons_node_list_copy = copy.deepcopy(commons_node_list)
                # print('commons_node_liost', commons_node_list.extend(one_subgraph_nodelist))
                commons_node_list.extend(one_subgraph_nodelist)
                commons_node_list_copy.extend(two_subgraph_nodelist)
                break

        return [commons_node_list, commons_node_list_copy]
Exemplo n.º 3
0
    def delete_high_betweenness_centrality(self, infectG):
        subinfectG = commons.get_subGraph_true(infectG)
        sort_list = Partion_common.get_layer_node_between(subinfectG)
        #根据中介性分层然后删除。
        print('sort_list', sort_list)  #先验证高中介性节点是否在中间。
        # for every_node in sort_list:
        first_layer_between = [x[0] for x in sort_list[0]]  #取第一层节点
        two_source = random.sample(first_layer_between,
                                   2)  # 从list中随机获取2个元素,作为一个片断返回

        lengthA_B = 100000
        good_two_result = []
        best_node_two_result = None
        for iter in range(0, 100):
            # 对这两个点进行Djstra,计算所有点到他们的距离。
            print('two_source', two_source)
            lengthA_dict = nx.single_source_bellman_ford_path_length(
                subinfectG, two_source[0], weight='weight')
            lengthB_dict = nx.single_source_bellman_ford_path_length(
                subinfectG, two_source[1], weight='weight')
            # 初始化两个集合,用来保存两个类别节点集合。
            node_twolist = [[], []]  # 保存两个类别节点集合
            node_diff_twolist = [[], []]  # 保存不同点
            for node in list(subinfectG.nodes):
                if lengthA_dict[node] > lengthB_dict[node]:  # 这个点离b近一些。
                    node_twolist[1].append(node)
                    node_diff_twolist[1].append(node)
                elif lengthA_dict[node] < lengthB_dict[node]:
                    node_twolist[0].append(node)
                    node_diff_twolist[0].append(node)
                else:
                    node_twolist[0].append(node)
                    node_twolist[1].append(node)
            print('node_twolist', len(node_twolist[1]))
            # 在两个list中找到中心位置,有几种中心性可以度量的。或者进行快速算法。
            # 判断这次找的两个中心好不好。

            lengthA_sum = 0  # a这个不同点,
            lengthB_sum = 0
            for i in node_diff_twolist[0]:  # 距离a近,第一个源点近的点。统计它跟自己区域点的距离之和
                lengthA_sum += lengthA_dict[i]
            for j in node_diff_twolist[1]:  # 距离b近,第二个源点近的点。统计它跟自己区域点的距离之和
                lengthB_sum += lengthB_dict[j]

            sums = lengthA_sum + lengthB_sum
            if sums < lengthA_B:
                print('sums', sums)
                # 是比原来好的的两个源。
                lengthA_B = sums
                good_two_result = two_source
                best_node_two_result = node_twolist
            else:
                # 重新长生两个源吧。这里还是可以做优化的,选择的方向问题。
                two_source = random.sample(first_layer_between, 2)

        print('good_two_result', good_two_result)
        print('good_node_two_result', best_node_two_result)

        return [[good_two_result[0], best_node_two_result[0]],
                [good_two_result[1], best_node_two_result[1]]]
Exemplo n.º 4
0
    def delete_high_betweenness_edge_centrality(self, infectG):
        subinfectG = commons.get_subGraph_true(infectG)
        # 根据中介性分层然后删除。
        sort_list = Partion_common.get_layer_edge_between(subinfectG)
        print('sort_list', sort_list)

        commons_node_list = []
        one_subgraph = None
        two_subgraph = None
        for edge, between in sort_list:  #
            print(edge, between)
            subinfectG.remove_edge(edge[0], edge[1])
            commons_node_list.append(edge[0])
            commons_node_list.append(edge[1])
            #重新计算一下那个中介
            one_subgraph_nodelist, two_subgraph_nodelist = self.judge_two_subgraph(
                subinfectG)
            print('one_subgraph', one_subgraph_nodelist)
            if len(one_subgraph_nodelist) > 1:
                print('true')
                one_subgraph_nodelist, two_subgraph_nodelist = one_subgraph_nodelist, two_subgraph_nodelist
                break
        commons_node_list_copy = copy.deepcopy(commons_node_list)
        print('commons_node_liost',
              commons_node_list.extend(one_subgraph_nodelist))
        commons_node_list.extend(one_subgraph_nodelist)
        commons_node_list_copy.extend(two_subgraph_nodelist)
        return [commons_node_list, commons_node_list_copy]
Exemplo n.º 5
0
    def jaya_add_coverage(self, infectG):
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。

        print('传播图的点个数为', subinfectG.number_of_nodes())
        print('传播图的边个数为', subinfectG.number_of_edges())

        print('传播子图是否连通?', )
        sub_connect_infect = self.judge_connect(subinfectG)
        singleRegionList = list(sub_connect_infect.nodes)
        results = commons.jayawith_dynami_H(infectG, singleRegionList, 2,
                                            [4, 5, 6, 7], singleRegionList)
        print(results)
        node_coverage1 = []
        node_coverage2 = []
        # #计算两个传播区域的重合区域。
        node_coverage1.extend(
            list(
                nx.bfs_tree(infectG,
                            source=results[0][0],
                            depth_limit=results[1])))
        node_coverage2.extend(
            list(
                nx.bfs_tree(infectG,
                            source=results[0][1],
                            depth_limit=results[1])))
        return [[results[0][0], node_coverage1],
                [results[0][1], node_coverage2]]
Exemplo n.º 6
0
    def main(self, filename):

        # #拿到图
        initG = commons.get_networkByFile(filename)
        max_sub_graph = commons.judge_data(initG)
        print('是否是一棵树?', nx.is_tree(max_sub_graph))
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = self.produce_source(max_sub_graph, 1)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG, T = commons.propagation1(max_sub_graph, source_list)

        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        # 将在这里进行单源测试。
        '''   第一种,就是jarden center '''
        #
        object_single = single_Source_detection.Single_source()
        reverse_node = object_single.revsitionAlgorithm_singlueSource(
            subinfectG)
        result_node = self.single_source_bound_ture(subinfectG,
                                                    reverse_node[0],
                                                    source_list[0])
        print('真实源是', source_list[0])
        print('预测源是', result_node[0])
        distance = nx.shortest_path_length(subinfectG,
                                           source=source_list[0],
                                           target=result_node[0])
        print('结果是', distance)
        return distance
Exemplo n.º 7
0
    def main(self, filename):

        # #拿到图
        initG = commons.get_networkByFile(filename)

        # ecc=nx.eccentricity(initG)
        # sort_ecc=sorted(ecc.items(),key=lambda  x:x[1])
        # product_srouce =sort_ecc[0][0]
        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 1)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG, T = commons.propagation1(max_sub_graph, [1000])
        # infectG1, T = commons.propagation1(max_sub_graph, [source_list])
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        #将在这里进行单源测试。
        print(sorted(list(subinfectG.nodes())))
        #
        # result_node = self.revsitionAlgorithm_singlueSource(subinfectG)
        # ''' 第二种,就是coverage/distance'''
        # result_node= self.single_source_bydistance_coverage(infectG,subinfectG,source_list[0])
        # '''  第3种,距离中心'''
        # result_node = self.single_source_bydistance( subinfectG)

        #'''  第6种,质量距离中心'''
        # result_node = self.single_source_byQuality_centrality(infectG,subinfectG)

        # #''''第7种,特征向量中心性
        # result_node = self.single_source_bybetweenness_centrality( subinfectG)
        # #''''第8种,反转加t性
        # result_node = self.single_source_get_T_jarden_center( T,subinfectG)

        #第9种,谣言中心性‘’

        result_node = self.rumor_center(infectG, subinfectG, 1000)

        #
        # # #’‘ 乔丹中心性
        #   result_node = self.jarden_cente_networkx(infectG,subinfectG,source_list[0])

        # 覆盖率加我们的操作
        # result_node = self.coverage_BFS_single_source(infectG,subinfectG,source_list[0])

        # #多个观察点
        # result_node = self.coverage_BFS_single_source(infectG,subinfectG,source_list[0])

        #基于覆盖率的计算方式

        # result_node = self.belief_algorithm(infectG, subinfectG,1000)
        print('真实源是', source_list[0])
        # print('预测源是',result_node[0])
        distance = nx.shortest_path_length(subinfectG,
                                           source=1000,
                                           target=result_node[0])
        print('结果他们的距离是', distance)
        return distance
Exemplo n.º 8
0
    def main(self, filename):
        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')
        # initG = commons.get_networkByFile('../../data/4_regular_graph_3000_data.txt')

        initG = commons.get_networkByFile(filename)
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)

        # subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        single_Source_detection_object = single_Source_detection.Single_source(
        )
        '''
        底下将是所有的步骤组合操作。目前是2源的。
        1  抽取子图操作
        2  分区
        3 分别多源定位
        '''

        # 1  抽取子图操作,共有3种抽取子图操作。我们选择那3种呢?
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''''' '# 2 分区,分区的太多了,我们看看那种好。' ''

        result = self.findmultiplesource(infectG, subinfectG, sourceNumber=2)

        result_source_list = []
        for community in result:
            subsubinfectG = nx.Graph()
            for edge in list(subinfectG.edges()):
                if edge[0] in community and (edge[1] in community):
                    subsubinfectG.add_edge(edge[0], edge[1])
            # 看下图连通吗。
            maxsubsubinfectG = self.judge_data(subsubinfectG)
            # 开始单源定位了。
            '''jar center'''
            source_node = single_Source_detection_object.revsitionAlgorithm_singlueSource(
                maxsubsubinfectG)
            # source_node = single_Source_detection_object.single_source_bydistance_coverage(infectG, maxsubsubinfectG)
            result_source_list.append(source_node[0])

        distance = commons.cal_distance(max_sub_graph, source_list,
                                        result_source_list)

        return distance
Exemplo n.º 9
0
    def main(self, filename):
        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')
        # initG = commons.get_networkByFile('../../data/4_regular_graph_3000_data.txt')

        initG = commons.get_networkByFile(filename)
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)

        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''
        底下将是所有的步骤组合操作。目前是2源的。
        1  抽取子图操作
        2  分区
        3 分别多源定位
        '''

        # 1  抽取子图操作,共有3种抽取子图操作。我们选择那3种呢?
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''''' '# 2 分区,分区的太多了,我们看看那种好。' ''

        result = self.findmultiplesource(infectG, subinfectG, sourceNumber=2)

        distance = commons.cal_distance(max_sub_graph, source_list, result)

        return distance
Exemplo n.º 10
0
    def main(self, filename, method):

        # #拿到图
        initG = commons.get_networkByFile(filename)
        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 1)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        # 将在这里进行种子节点覆盖。
        ''' 第1种,就是coverage/distance'''
        func = getattr(self, method)
        sort_partion = func(infectG, subinfectG, source_list[0])
        return sort_partion
Exemplo n.º 11
0
    def main(self, filename):
        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')
        initG = commons.get_networkByFile(filename)

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)

        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        self.judge_data(subinfectG)

        # single_source = commons.revsitionAlgorithm_singlueSource(subinfectG)
        distance_iter = nx.shortest_path_length(subinfectG)
        everynode_distance = []
        for node, node_distance in distance_iter:
            # print(node_distance)
            sort_list = sorted(node_distance.items(),
                               key=lambda x: x[1],
                               reverse=True)
            # print('sort_list',sort_list)
            everynode_distance.append([node, sort_list[0][0], sort_list[0][1]])
        # print('everynode_idstance',everynode_distance)
        sort_every_distance = sorted(everynode_distance,
                                     key=lambda x: x[2],
                                     reverse=True)
        print('sort_every_distance', sort_every_distance)

        #从两个最远的点进行BFS直到找到单源的位置。

        # print(nx.shortest_path_length(infectG,source=single_source[0],target=sort_every_distance[0][0]))
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[0][1]))
        #
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[1][0]))
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[1][1]))
        #
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[2][0]))
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[2][1]))

        # #根据最远得点,把我们的那个啥,分区,然后利用分区点进行单源定位。。
        node_twolist = [[], []]
        lengthA_dict = nx.single_source_bellman_ford_path_length(
            subinfectG, sort_every_distance[0][0], weight='weight')
        lengthB_dict = nx.single_source_bellman_ford_path_length(
            subinfectG, sort_every_distance[0][1], weight='weight')
        for node in list(subinfectG.nodes):
            if lengthA_dict[node] > lengthB_dict[node]:  # 这个点离b近一些。
                node_twolist[1].append(node)
            elif lengthA_dict[node] < lengthB_dict[node]:
                node_twolist[0].append(node)
            # else:
            #     node_twolist[0].append(node)
            #     node_twolist[1].append(node)
        print('len(node_twolist[0]', len(node_twolist[0]))
        print('len(node_twolist[1]', len(node_twolist[1]))
        # 边界点。
        bound_list = []
        for node_temp in list(infectG.nodes()):
            # print(node_temp)
            if infectG.node[node_temp]['SI'] == 2:
                neighbors_list = list(nx.neighbors(infectG, node_temp))
                neighbors_infect_list = [
                    x for x in neighbors_list if infectG.node[x]['SI'] == 2
                ]
                if len(neighbors_list) != 1 and len(
                        neighbors_infect_list) == 1:
                    # if  len(neighbors_infect_list) == 1:
                    bound_list.append(node_temp)

        print('boundelist', len(bound_list))

        print('len(kjlk)',
              len([x for x in bound_list if x in list(subinfectG.nodes())]))
        left = [x for x in bound_list if x in node_twolist[0]]
        right = [x for x in bound_list if x in node_twolist[1]]

        print('left', left)
        print('right', right)

        left_source = commons.revsitionAlgorithm_singlueSource_receive(
            subinfectG, left)
        right_source = commons.revsitionAlgorithm_singlueSource_receive(
            subinfectG, right)
        if set(left) < set(list(subinfectG.nodes())):
            print('left在感染点里面啊')
        if set(right) < set(list(subinfectG.nodes())):
            print('left在感染点里面啊')
        distance = commons.cal_distance(infectG,
                                        [left_source[0], right_source[0]],
                                        source_list)

        return distance
Exemplo n.º 12
0
    def main(self, filename):
        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')
        initG = commons.get_networkByFile(filename)

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)

        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''
        思路:
        1  借助概率p= 0,5以及不同的时间t生成大量的集合。假设知道时间吧。
        2 然后用这些集合去覆盖感染区域,尽可能让blue中点多,而红色区域少。
            这种方法还可以用来确定种子节点的。
            
        
        '''

        # 1  抽取子图操作,共有3种抽取子图操作。我们选择那3种呢?
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''''' '# 2 分区,分区的太多了,我们看看那种好。' ''

        self.product_many_region(0.5, 6, infectG, subinfectG, source_list)

        partion_graph_object = Partion_graph.Partion_graph()
        result = partion_graph_object.other_k_center(infectG,
                                                     subinfectG,
                                                     source_list,
                                                     source_number=2)

        single_Source_detection_object = single_Source_detection.Single_source(
        )
        print('result', result)
        # 对每一个感染的点建图,用真实的建成一个传播子图
        result_source_list = []
        '''

            3  针对2传回来的多个区域,开始定位源点。
         '''
        for community in result:
            subsubinfectG = nx.Graph()
            for edge in list(subinfectG.edges()):
                if edge[0] in community and (edge[1] in community):
                    subsubinfectG.add_edge(edge[0], edge[1])
            # 看下图连通吗。
            maxsubsubinfectG = self.judge_data(subsubinfectG)
            # 开始单源定位了。
            '''jar center'''
            # source_node = single_Source_detection_object.revsitionAlgorithm_singlueSource(maxsubsubinfectG)
            source_node = single_Source_detection_object.single_source_bydistance_coverage(
                infectG, maxsubsubinfectG)
            #
            # source_node = single_Source_detection_object.single_source_bydistance(maxsubsubinfectG)

            result_source_list.append(source_node[0])

        distance = commons.cal_distance(max_sub_graph, source_list,
                                        result_source_list)

        return distance
Exemplo n.º 13
0
    def main(self):
        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')

        # initG = commons.get_networkByFile('../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../data/3regular_tree1000.txt')
        initG = commons.get_networkByFile(
            '../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)

        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''
        底下将是所有的步骤组合操作。目前是2源的。
        1  抽取子图操作
        2  分区
        3 分别多源定位
        '''

        # 1  抽取子图操作,共有3种抽取子图操作。我们选择那3种呢?
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''''' '# 2 分区,分区的太多了,我们看看那种好。' ''
        '''2.1  Different'''

        result = self.Parttion_Different_Time(infectG, sourceNumber=2)
        print('result', len(result))
        single_Source_detection_object = single_Source_detection.Single_source(
        )
        print('result', result)
        # 对每一个感染的点建图,用真实的建成一个传播子图
        result_source_list = []
        '''

            3  针对2传回来的多个区域,开始定位源点。
         '''
        for community_node, community in result:
            subsubinfectG = nx.Graph()
            for edge in list(subinfectG.edges()):
                if edge[0] in community and (edge[1] in community):
                    subsubinfectG.add_edge(edge[0], edge[1])
            # 看下图连通吗。
            maxsubsubinfectG = self.judge_data(subsubinfectG)
            # 开始单源定位了。

            source_node = single_Source_detection_object.revsitionAlgorithm_singlueSource(
                maxsubsubinfectG)
            result_source_list.append(source_node[0])

        distance = commons.cal_distance(max_sub_graph, source_list,
                                        result_source_list)

        return distance
Exemplo n.º 14
0
    def main(self, filename):
        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')
        initG = commons.get_networkByFile(filename)

        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        # initG = commons.get_networkByFile('../../../data/email-Eu-core.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
        infectG = commons.propagation1(max_sub_graph, source_list)

        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        self.judge_data(subinfectG)
        '''
        思路:
        1  借助概率p= 0,5以及不同的时间t生成大量的集合。假设知道时间吧。
        2 然后用这些集合去覆盖感染区域,尽可能让blue中点多,而红色区域少。
            这种方法还可以用来确定种子节点的。

        '''

        # 1  抽取子图操作,共有3种抽取子图操作。我们选择那3种呢?
        subinfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。
        '''''' '# 2 分区,分区的太多了,我们看看那种好。' ''
        partion_graph_object = Partion_graph.Partion_graph()

        #开始分区,怎么分呢?
        #根据边界节点来。两部分的边界节点,能够找到两个点使得他们到边界点的距离最短?

        #
        # result = partion_graph_object.other_k_center(infectG, subinfectG, source_list, source_number=2)
        #

        # single_source = commons.revsitionAlgorithm_singlueSource(subinfectG)
        distance_iter = nx.shortest_path_length(subinfectG)
        everynode_distance = []
        for node, node_distance in distance_iter:
            # print(node_distance)
            sort_list = sorted(node_distance.items(),
                               key=lambda x: x[1],
                               reverse=True)
            # print('sort_list',sort_list)
            everynode_distance.append([node, sort_list[0][0], sort_list[0][1]])
        # print('everynode_idstance',everynode_distance)
        sort_every_distance = sorted(everynode_distance,
                                     key=lambda x: x[2],
                                     reverse=True)
        print('sort_every_distance', sort_every_distance)

        #从两个最远的点进行BFS直到找到单源的位置。

        # print(nx.shortest_path_length(infectG,source=single_source[0],target=sort_every_distance[0][0]))
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[0][1]))
        #
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[1][0]))
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[1][1]))
        #
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[2][0]))
        # print(nx.shortest_path_length(infectG, source=single_source[0], target=sort_every_distance[2][1]))

        # #根据最远得点,把我们的那个啥,分区,然后利用分区点进行单源定位。。
        node_twolist = [[], []]
        lengthA_dict = nx.single_source_bellman_ford_path_length(
            subinfectG, sort_every_distance[0][0], weight='weight')
        lengthB_dict = nx.single_source_bellman_ford_path_length(
            subinfectG, sort_every_distance[0][1], weight='weight')
        for node in list(subinfectG.nodes):
            if lengthA_dict[node] > lengthB_dict[node]:  # 这个点离b近一些。
                node_twolist[1].append(node)
            elif lengthA_dict[node] < lengthB_dict[node]:
                node_twolist[0].append(node)
            # else:
            #     node_twolist[0].append(node)
            #     node_twolist[1].append(node)
        print('len(node_twolist[0]', len(node_twolist[0]))
        print('len(node_twolist[1]', len(node_twolist[1]))
        # 边界点。
        bound_list = []
        for node_temp in list(infectG.nodes()):
            # print(node_temp)
            if infectG.node[node_temp]['SI'] == 2:
                neighbors_list = list(nx.neighbors(infectG, node_temp))
                neighbors_infect_list = [
                    x for x in neighbors_list if infectG.node[x]['SI'] == 2
                ]
                if len(neighbors_list) != 1 and len(
                        neighbors_infect_list) == 1:
                    # if  len(neighbors_infect_list) == 1:
                    bound_list.append(node_temp)

        print('boundelist', len(bound_list))

        print('len(kjlk)',
              len([x for x in bound_list if x in list(subinfectG.nodes())]))
        left = [x for x in bound_list if x in node_twolist[0]]
        right = [x for x in bound_list if x in node_twolist[1]]

        print('left', left)
        print('right', right)

        left_source = commons.revsitionAlgorithm_singlueSource_receive(
            subinfectG, left)
        right_source = commons.revsitionAlgorithm_singlueSource_receive(
            subinfectG, right)
        if set(left) < set(list(subinfectG.nodes())):
            print('left在感染点里面啊')
        if set(right) < set(list(subinfectG.nodes())):
            print('left在感染点里面啊')
        distance = commons.cal_distance(infectG,
                                        [left_source[0], right_source[0]],
                                        source_list)

        return distance
Exemplo n.º 15
0
    def Partion_graph_K_center_seed(self,
                                    G,
                                    subinfectG,
                                    true_source_list,
                                    source_number_=2):
        # 开始分区,输出每个区域的点和边。当前是两源的。

        sort_list = commons.partion_layer_dict_bfs(G,
                                                   subinfectG,
                                                   2,
                                                   number_layer=10)  # 分层

        first_layer = [x[0] for x in sort_list]  # 用第一层的节点。
        # 先验证源点在不在第一层。
        b = set(true_source_list)
        print('源点在不在第一层呢?', b.issubset(first_layer))
        print('第一层节点个数', len(first_layer))
        subinfectG = commons.get_subGraph_true(G)  # 获取真实的传播图
        # 判断是否连通看看。
        self.judge_connect(subinfectG)
        print('如果不连通,这个方法就会出问题,一定要是连连通的。')
        two_source = random.sample(first_layer, 2)  # 从list中随机获取2个元素,作为一个片断返回

        flag = 1
        lengthA_B = 10000
        good_two_result = []
        best_node_two_result = None

        averageA = 1
        averageB = 1
        for iter in range(0, 100):
            # 对这两个点进行Djstra,计算所有点到他们的距离。
            print('two_source', two_source)
            lengthA_dict = nx.single_source_bellman_ford_path_length(
                subinfectG, two_source[0], weight='weight')
            lengthB_dict = nx.single_source_bellman_ford_path_length(
                subinfectG, two_source[1], weight='weight')
            # 统计两者距离相等的点个数
            count = 0
            for node, distance in lengthB_dict.items():
                if lengthA_dict[node] == lengthB_dict[node]:
                    count += 1
            print('两者距离相等的点个数为', count)
            # 初始化两个集合,用来保存两个类别节点集合。
            node_twolist = [[], []]  # 保存两个类别节点集合
            node_diff_twolist = [[], []]  # 保存不同点
            count = 0
            for node in list(subinfectG.nodes):
                if lengthA_dict[node] > lengthB_dict[node]:  # 这个点离b近一些。
                    node_twolist[1].append(node)
                    node_diff_twolist[1].append(node)
                elif lengthA_dict[node] < lengthB_dict[node]:
                    node_twolist[0].append(node)
                    node_diff_twolist[0].append(node)
                else:
                    node_twolist[0].append(node)
                    node_twolist[1].append(node)
                    count += 1
            print('node_twolist1 ', len(node_twolist[1]))
            print('node_twolist2 ', len(node_twolist[0]))
            print('count是公共节点数目', count)
            # 在两个list中找到中心位置,有几种中心性可以度量的。或者进行快速算法。
            # 判断这次找的两个中心好不好。

            lengthA_sum = 0  # a这个不同点,
            lengthB_sum = 0
            for i in node_diff_twolist[0]:  # 距离a近,第一个源点近的点。统计它跟第二个区域点的距离之和
                lengthA_sum += lengthB_dict[i]
            for j in node_diff_twolist[1]:  # 距离b近,第二个源点近的点。统计它跟第二个区域点的距离之和
                lengthB_sum += lengthA_dict[j]

            print('平均距离计算')
            average_lengthA = lengthA_sum / len(node_diff_twolist[0])
            average_lengthB = lengthB_sum / len(node_diff_twolist[1])

            sums = lengthA_sum + lengthB_sum
            print(lengthA_B)
            print('平均距离有增大就可以了。')
            if average_lengthA > averageA and average_lengthB > averageB:

                averageA = average_lengthA
                averageB = average_lengthB
                print('sums', sums)
                # 是比原来好的的两个源。
                print('node_diff_twolist', len(node_diff_twolist[0]))
                print('node_diff_twolist', len(node_diff_twolist[1]))
                print('更行sums', sums)
                lengthA_B = sums
                good_two_result = two_source
                best_node_two_result = node_twolist

            else:
                # 重新长生两个源吧。这里还是可以做优化的,选择的方向问题。
                two_source = random.sample(first_layer, 2)
                print('新生成两个源是', two_source)
        print('传播子图所有节点个数', len(list(subinfectG.nodes())))
        print('len(good_two_result[0]', len(best_node_two_result[0]))
        print('len(good_two_result[0]', len(best_node_two_result[1]))

        print('分开的两个区域的点的交集大小。')
        print(
            'LEN',
            len([
                x for x in best_node_two_result[0]
                if x in best_node_two_result[1]
            ]))
        print('good_two_result', good_two_result)
        print(
            'short_length',
            nx.shortest_path_length(subinfectG,
                                    source=good_two_result[0],
                                    target=good_two_result[1]))
        print('good_node_two_result', best_node_two_result)

        return [[good_two_result[0], best_node_two_result[0]],
                [good_two_result[1], best_node_two_result[1]]]
        print('down_message_dict_temp',down_messages_dict_temp[jordan_center])
        print(upmessage_dict_temps[jordan_center])
        return jordan_center



if __name__ == '__main__':
    # #拿到图
    initG = commons.get_networkByFile('../../../data/3regular_tree9.txt')
    max_sub_graph = commons.judge_data(initG)
    # source_list = product_sourceList(max_sub_graph, 2)
    source_list = commons.product_sourceList(max_sub_graph, 1)
    # print('两个节点的距离', nx.shortest_path_length(max_sub_graph, source=source_list[0], target=source_list[1]))
    infectG, T = commons.propagation1(max_sub_graph,source_list)
    # infectG1, T = commons.propagation1(max_sub_graph, [source_list])
    subInfectG = commons.get_subGraph_true(infectG)  # 只取感染点,为2表示,真实的感染图。

    # result_node = rumor_center(infectG, subinfectG, source_list[0])

    # 将图构造成两个list,一个是感染点list,一个是感染和它的邻居点构造成的list
    # infect_node = []
    # infect_neighbour_list = []
    # print(infectG.number_of_nodes())
    # random_node = random.choice(list(subInfectG.nodes()))
    # subinfectG_temp = nx.bfs_tree(subInfectG, source=source_list[0])
    # subinfectG = subinfectG_temp.to_undirected()

    jordan_center_object = jordan()
    center = jordan_center_object.jordan_centrality(subInfectG)

    print('center', center)
Exemplo n.º 17
0
    def main(self, filename):

        # #拿到图
        # subGraph=self.get_Graph('../Propagation_subgraph/many_methods/result/chouqu.txt')
        # initG = commons.get_networkByFile('../../../data/CA-GrQc.txt')
        # initG = commons.get_networkByFile('../../../data/3regular_tree1000.txt')
        initG = commons.get_networkByFile(filename)
        # initG = commons.get_networkByFile('../../../data/4_regular_graph_3000_data.txt')

        max_sub_graph = commons.judge_data(initG)
        # source_list = product_sourceList(max_sub_graph, 2)
        source_list = commons.product_sourceList(max_sub_graph, 2)
        print(
            '两个节点的距离',
            nx.shortest_path_length(max_sub_graph,
                                    source=source_list[0],
                                    target=source_list[1]))
        #看下分区效果行不行,传播两次,就好了,
        # 一个是SI为2,一个是SI为3。交叉区域为4。
        infectG = commons.propagation_dif_sigl(max_sub_graph, source_list[0],
                                               3)  #3表示一个源点。
        infectG_other = commons.propagation_dif_sigl(infectG, source_list[1],
                                                     4)  # 4标示表示一个源点。 如果两个标示重合,
        #那就取5

        #提起其中某些节点的东西,提取3,4,5
        node_list3 = []
        node_list4 = []
        node_list5 = []
        for nodes in list(infectG_other.nodes):
            if infectG_other.node[nodes]['SIDIF'] == 3:
                node_list3.append(nodes)
            elif infectG_other.node[nodes]['SIDIF'] == 4:
                node_list4.append(nodes)
            elif infectG_other.node[nodes]['SIDIF'] == 5:
                node_list5.append(nodes)
        node_list3.extend(node_list5)
        node_list4.extend(node_list5)

        print('first——node_list3', len(node_list3))
        print('second——node_list4', len(node_list4))
        print('common_node_list', len(node_list5))

        subinfectG = commons.get_subGraph_true(
            infectG_other)  #只取感染点,为2表示,真实的感染图。
        #然后将感染点之间所有边都相连接起来。
        #第一种方法。
        '''
        应该在这个地方进行传播分区的各种实验,先做好2源的分区。
        '''
        # twosource_node_list =self.Partion_graph_K_center(infectG_other,source_list,2)
        #第3种方式

        # twosource_node_list = self.Partion_graph_K_center_seed(infectG_other,subinfectG,source_list,2)
        # twosource_node_list = self.other_k_center(infectG_other, subinfectG, source_list, 2)

        twosource_node_list = self.randmo_BFS(infectG_other, subinfectG,
                                              source_list)
        #进行覆盖率走,并进行jaya算法。
        # twosource_node_list=self.jaya_add_coverage(infectG_other)
        #进行删除边操作。
        # twosource_node_list = self.delete_high_betweenness_edge_centrality_second(infectG_other)
        # print(twosource_node_list)
        # return self.verification(twosource_node_list, [node_list3, node_list4])

        #
        # # 第7种方法。
        #
        # twosource_node_list = self.label_progration_community(subinfectG)
        # print(twosource_node_list)
        # return self.verification(twosource_node_list,[node_list3,node_list4])
        #
        # # #第四种,进行判断高中介性点为中间点。判断比例
        # # twosource_node_list= self.delete_high_betweenness_centrality(infectG_other)
        # #
        #

        #  最后一种,分区的方法了

        # twosource_node_list = self.other_k_center(subinfectG)
        # print(twosource_node_list)
        return self.verification(twosource_node_list, [node_list3, node_list4])