예제 #1
0
    def detect(self):
        """detect the source with GSBA.
        Returns:
            @rtype:int
            the detected source
        """

        ''' 
        这是什么时候调用的?这是公式需要,对别的来说,就不是这个了。所有对于贪心是有两个

        先验的,一个是要谣言中心性,一个是其他。
        我们要加我们的就是加一个先验的,比如覆盖的操作,模仿其谣言定位方式,自己写一个
        然后作为先验,放在先验中。

        '''
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        # print('先验检测器是什么?')
        # print(self.prior)

        # 谣言中心
        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph, 'centrality')
        # #print('先验加进去,试试看')




        # epa带权重的东西
        self.reset_centrality()
        epa_weight_object = epa2.EPA_center_weight()
        epa_weight_object.set_data(self.data)
        epa_weight_object.detect()
        epa_weight_cnetralities = nx.get_node_attributes(self.subgraph, 'centrality')

        self.reset_centrality()
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        # print(infected_nodes)
        # print('infected_nodes')

        posterior = {}
        included = set()
        neighbours = set()
        weights = self.data.weights
        for v in infected_nodes:
            # print('------从v点开始----------')
            # print(v)
            """find the approximate upper bound by greedy searching"""
            included.clear()
            neighbours.clear()
            included.add(v)
            neighbours.add(v)
            likelihood = 1
            w = {}  # effective propagation probabilities: node->w
            w_key_sorted = blist()
            w[v] = 1
            w_key_sorted.append(v)
            while len(included) < n:
                # print('邻居用来计算所谓的neighbours')
                # print(neighbours)
                w_sum = sum([w[j] for j in neighbours])
                u = w_key_sorted.pop()  # pop out the last element from w_key_sorted with the largest w
                likelihood *= w[u] / w_sum
                # print('分母是?')
                # print(w_sum)
                # print('likelihood')
                # print(likelihood)
                included.add(u)
                neighbours.remove(u)
                new = nx.neighbors(self.data.graph, u)
                # print('new也就是在总图中的邻居')
                # print(new)
                for h in new:
                    # print('遍历到某个邻居')
                    # print(h)
                    if h in included:
                        continue
                    neighbours.add(h)
                    # compute w for h
                    w_h2u = weights[self.data.node2index[u], self.data.node2index[h]]
                    # w_h2u = weights[self.data.node2index[u]][self.data.node2index[h]]
                    if h in w.keys():
                        # print('------')
                        # print(w[h])
                        # print(w_h2u)
                        # print(h)
                        # print(epa_weight_cnetralities)

                            w[h] = (1 - (1 - w[h]) * (1 - w_h2u))
                        # print('w[h],,,,h在keys')
                        # print(w[h])
                    else:
                        # print('h不在keys')
                        w[h] = w_h2u
                        # print(w[h])

                    # print('w是什么')
                    # print(w)
                    # h_neighbor = nx.neighbors(self.data.graph, h)
                    # w_h = 1
                    # for be in included.intersection(h_neighbor):
                    #     w_h *= 1 - self.data.get_weight(h, be)
                    # w[h] = 1 - w_h
                    """insert h into w_key_sorted, ranking by w from small to large"""
                    if h in infected_nodes:
                        # print('开始排序了')
                        if h in w_key_sorted:
                            w_key_sorted.remove(h)  # remove the old w[h]
                        k = 0

                        while k < len(w_key_sorted):
                            if w[w_key_sorted[k]] > w[h]:
                                break
                            k += 1
                        # print(w_key_sorted)
                        w_key_sorted.insert(k, h)  # 安排降序加入,就是排列可能性加入,安排顺序插入进去
                        # print('w_key_sorted')
                        # print(w_key_sorted)

                        # w_key_sorted[k:k] = [h]
            # print('每次开始的是那个节点呢?')
            # print(v)
            # print('每一个的可能性是likehood')
            # print(likelihood)
            print('点为多少个时,各个参数的值为多少')
            print(len(self.subgraph.nodes()))
            print(decimal.Decimal(self.prior[v]))
            print(decimal.Decimal(likelihood))
            print(rumor_centralities[v])
            print(epa_weight_cnetralities[v])

            posterior[v] = (decimal.Decimal(self.prior[v]) * decimal.Decimal(likelihood) *
                            rumor_centralities[v]*epa_weight_cnetralities[v] )
            print(posterior[v])

        # print('w_key_sorted')
        # print(w_key_sorted)
        #
        # print('------------')
        # print(coverage_centralities)
        # print('看下这里的posterior')
        # print(posterior)
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()
예제 #2
0
    def detect(self):
        """detect the source with GSBA.
        Returns:
            @rtype:int
            the detected source
        """
        ''' 
        这是什么时候调用的?这是公式需要,对别的来说,就不是这个了。所有对于贪心是有两个

        先验的,一个是要谣言中心性,一个是其他。
        我们要加我们的就是加一个先验的,比如覆盖的操作,模仿其谣言定位方式,自己写一个
        然后作为先验,放在先验中。

        '''
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        # print('先验检测器是什么?')
        # print(self.prior)

        # epa带权重的东西
        self.reset_centrality()
        epa_weight_object = epa2.EPA_center_weight()
        epa_weight_object.set_data(self.data)
        epa_weight_object.detect()
        epa_weight_cnetralities = nx.get_node_attributes(
            self.subgraph, 'centrality')

        # 谣言中心
        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph,
                                                    'centrality')
        # #print('先验加进去,试试看')

        self.reset_centrality()
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        # print(infected_nodes)
        # print('infected_nodes')

        posterior = {}
        included = set()
        neighbours = set()
        weights = self.data.weights
        for v in infected_nodes:
            posterior[v] = (decimal.Decimal(self.prior[v]) *
                            epa_weight_cnetralities[v])

        # print('w_key_sorted')
        # print(w_key_sorted)
        #
        # print('------------')
        # print(coverage_centralities)
        # print('看下这里的posterior')
        # print(posterior)
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()
예제 #3
0
                            rumor_centralities[v]*epa_weight_cnetralities[v] )
            print(posterior[v])

        # print('w_key_sorted')
        # print(w_key_sorted)
        #
        # print('------------')
        # print(coverage_centralities)
        # print('看下这里的posterior')
        # print(posterior)
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()


if __name__ == "__main__":
    prior_detector1 = rc.RumorCenter()

    # gsba =GSBA(prior_detector1)
    methods = [GSBA_coverage_3(prior_detector1)]
    logger = log.Logger(logname='../data/main_test.log', loglevel=logging.INFO,
                        logger="experiment").get_log()

    experiment = Experiment(methods, logger)
    experiment.propagation_model = 'SI'
    start_time = clock()
    print "Starting..."
    # data就是我们的图,我们可以做一些操作。      创建一个简单的图。试试看结果。
    ''' 
    1 创建例子的图
    2 给定传播点子图
예제 #4
0
import map_gsba as gsba
import map_gsba_old as gsba_old
import reverse_infection as ri
import rumor_center as rc
import dmp2
import map_bfsa_parallel as bfsa_p
import prior
import numpy as np
from experiment import Experiment

import map_ulbaa as ulbaa

if __name__ == '__main__':

    prior_detector0 = prior.Uniform()
    prior_detector1 = rc.RumorCenter()
    prior_detector2 = dmp2.DynamicMessagePassing()
    prior_detector3 = dc.DistanceCenter()
    prior_detector4 = jc.JordanCenter()
    prior_detector5 = ri.ReverseInfection()
    methods = [rc.RumorCenter(), dc.DistanceCenter(), jc.JordanCenter(),
               gsba.GSBA(prior_detector1),gsba.GSBA(prior_detector3),gsba.GSBA(prior_detector4)]
    # methods = [dc.DistanceCenter()]
    #methods = [bfsa_p.BFSA(prior_detector1)]
    # methods = [dmp2.DynamicMessagePassing()]

    logger = log.Logger(logname='../data/main_power_grid1202.log', loglevel=logging.INFO, logger="experiment").get_log()
    experiment = Experiment(methods, logger)
    experiment.propagation_model = 'SI'

    start_time = clock()
예제 #5
0
    def detect(self):
        """detect the source with GSBA.

        Returns:
            @rtype:int
            the detected source
        """
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph, 'centrality')

        self.reset_centrality()
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        posterior = {}
        included = set()
        neighbours = set()
        weights = self.data.weights
        for v in infected_nodes:
            """find the approximate upper bound by greedy searching"""
            included.clear()
            neighbours.clear()
            included.add(v)
            neighbours.add(v)
            likelihood = 1
            w = {}  # effective propagation probabilities: node->w
            w[v] = 1
            w_sum = 1
            ratio_max_node = v
            while len(included) < n:
                # w_sum = sum([w[j] for j in neighbours])
                u = ratio_max_node
                likelihood *= w[u] / w_sum
                included.add(u)
                neighbours.remove(u)
                new = nx.neighbors(self.data.graph, u)
                for h in new:
                    if h in included:
                        continue
                    neighbours.add(h)
                    # compute w for h
                    w_h2u = weights[self.data.node2index[u],self.data.node2index[h]]
                    if h in w.keys():
                        w[h] = 1-(1-w[h])*(1-w_h2u)
                    else:
                        w[h] = w_h2u

                w_sum = sum([w[j] for j in neighbours])
                ratio_max = 0.0

                """select the next node to maximize the ratio of w/sum"""
                for h in neighbours:
                    r = w_sum-w[h]
                    h_neighbors = nx.neighbors(self.data.graph, h)
                    for k in h_neighbors:
                        if k in included:
                            continue
                        w_h2k = weights[self.data.node2index[h]][self.data.node2index[k]]
                        if k in neighbours:
                            r = r-w[k]+ 1-(1-w[h])*(1-w_h2k) # update previous w[k]
                        else:
                            r += w_h2k
                    r = w[h]/r
                    if r>ratio_max:
                        ratio_max = r
                        ratio_max_node = h



            posterior[v] = (decimal.Decimal(self.prior[v])* decimal.Decimal(likelihood) * rumor_centralities[v])
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()
예제 #6
0
        self.logger.info(self.ranking)
        for m in methods:
            l = len(self.precision[test][m.method_name])*1.0
            if l==0 : continue
            r = sum(self.precision[test][m.method_name]) / l, sum(self.error[test][m.method_name]) / l, sum(
                self.topological_error[test][m.method_name]) / l, sum(self.ranking[test][m.method_name]) / l, m.method_name, l
            print r
            logger.info(r)

if __name__ == '__main__':
    logger = log.Logger(logname='../data/test3.log', loglevel=logging.INFO, logger="experiment").get_log()
    experiment = Experiment()
    experiment.logger = logger

    prior_detector0 = prior.Uniform()
    prior_detector1 = rc.RumorCenter()
    prior_detector2 = dmp2.DynamicMessagePassing()
    prior_detector3 = dc.DistanceCenter()
    prior_detector4 = jc.JordanCenter()
    prior_detector5 = ri.ReverseInfection()
    methods = [rc.RumorCenter(), dc.DistanceCenter(), jc.JordanCenter(),ri.ReverseInfection(),prior_detector2,
               gsba.GSBA( prior_detector1),gsba.GSBA(prior_detector2), gsba.GSBA( prior_detector3),
               gsba.GSBA(prior_detector4), gsba.GSBA( prior_detector5)]
    methods = [rc.RumorCenter(), dc.DistanceCenter(), jc.JordanCenter(), ri.ReverseInfection(), di.DynamicImportance(), prior_detector2,
               gsba.GSBA(prior_detector0), gsba.GSBA(prior_detector1), gsba.GSBA( prior_detector3),
               gsba.GSBA(prior_detector4), gsba.GSBA( prior_detector5), gsba.GSBA(prior_detector2), bfsa_p.BFSA(prior_detector1)]
    methods = [bfsa.BFSA(prior_detector1)]
    # methods = [dmp2.DynamicMessagePassing()]
    experiment.methods = methods

    start_time = clock()
예제 #7
0
    def detect(self):
        """detect the source with RSA.

        Returns:
            @rtype:int
            the detected source
        """
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph,
                                                    'centrality')

        self.reset_centrality()
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        posterior = {}
        included = set()
        neighbours = set()
        weights = self.data.weights
        for v in infected_nodes:
            """find the approximate upper bound by greedy searching"""
            included.clear()
            neighbours.clear()
            included.add(v)
            neighbours.add(v)
            likelihood = 1
            w = {}  # effective propagation probabilities: node->w
            w_key_sorted = list()
            w[v] = 1
            w_key_sorted.append(v)
            while len(included) < n:
                w_sum = sum([w[j] for j in neighbours])
                u = w_key_sorted.pop(
                )  # pop out the last element from w_key_sorted with the largest w
                likelihood *= w[u] / w_sum
                included.add(u)
                neighbours.remove(u)
                new = nx.neighbors(self.data.graph, u)
                for h in new:
                    if h in included:
                        continue
                    neighbours.add(h)
                    # compute w for h
                    w_h2u = weights[self.data.node2index[u],
                                    self.data.node2index[h]]
                    # w_h2u = weights[self.data.node2index[u]][self.data.node2index[h]]
                    if h in w.keys():
                        w[h] = 1 - (1 - w[h]) * (1 - w_h2u)
                    else:
                        w[h] = w_h2u
                    # h_neighbor = nx.neighbors(self.data.graph, h)
                    # w_h = 1
                    # for be in included.intersection(h_neighbor):
                    #     w_h *= 1 - self.data.get_weight(h, be)
                    # w[h] = 1 - w_h
                    """insert h into w_key_sorted, ranking by w from small to large"""
                    if h in infected_nodes:
                        if h in w_key_sorted:
                            w_key_sorted.remove(h)  # remove the old w[h]
                        w_key_sorted.append(h)
                        #w_key_sorted[k:k] = [h]
            posterior[v] = (decimal.Decimal(self.prior[v]) *
                            decimal.Decimal(likelihood) *
                            rumor_centralities[v])
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()
예제 #8
0
    def detect(self):
        """detect the source with GSBA.
        Returns:
            @rtype:int
            the detected source
        """
        ''' 
        这是什么时候调用的?这是公式需要,对别的来说,就不是这个了。所有对于贪心是有两个

        先验的,一个是要谣言中心性,一个是其他。
        我们要加我们的就是加一个先验的,比如覆盖的操作,模仿其谣言定位方式,自己写一个
        然后作为先验,放在先验中。

        '''
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        # print('先验检测器是什么?')
        # print(self.prior)

        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph,
                                                    'centrality')

        self.reset_centrality()

        # 获取边界点.
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        bound_list = []
        for node in infected_nodes:
            neig = nx.neighbors(self.data.graph, node)
            infect_ne = len([x for x in neig if x in infected_nodes])
            if infect_ne == 1:
                bound_list.append(node)
            print('bound_list')
            print(bound_list)

        # 获取边界点.
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        # infected_nodes_new = set(simple_subgraph.nodes())
        # n = len(infected_nodes_new)
        posterior = {}
        included = set()
        neighbours = set()
        weights = self.data.weights
        for v in infected_nodes:
            path_list = []
            path_list.append(v)
            for bound_node in bound_list:
                path = nx.bidirectional_shortest_path(self.subgraph,
                                                      source=v,
                                                      target=bound_node)
                print('path')
                print(path)
                path_list.extend(path)
            simple_subgraph = self.data.graph.subgraph(set(path_list))

            print('开始寻找likehood的')
            """find the approximate upper bound by greedy searching"""
            included.clear()
            neighbours.clear()
            included.add(v)
            neighbours.add(v)
            likelihood = 1
            w = {}  # effective propagation probabilities: node->w
            w_key_sorted = blist()
            w[v] = 1
            w_key_sorted.append(v)
            while len(included) < n and len(w_key_sorted) > 0:
                # print('邻居用来计算所谓的neighbours')
                # print(neighbours)
                w_sum = sum([w[j] for j in neighbours])
                u = w_key_sorted.pop(
                )  # pop out the last element from w_key_sorted with the largest w
                likelihood += w[u] / w_sum
                # print('分母是?')
                # print(w_sum)
                # print('likelihood')
                # print(likelihood)
                included.add(u)
                neighbours.remove(u)
                new = nx.neighbors(simple_subgraph, u)
                # print('new也就是在总图中的邻居')
                # print(new)
                for h in new:
                    # print('遍历到某个邻居')
                    # print(h)
                    if h in included:
                        continue
                    neighbours.add(h)
                    # compute w for h
                    w_h2u = weights[self.data.node2index[u],
                                    self.data.node2index[h]]
                    # w_h2u = weights[self.data.node2index[u]][self.data.node2index[h]]
                    if h in w.keys():
                        # print('------')
                        # print(w[h])
                        # print(w_h2u)
                        w[h] = 1 - (1 - w[h]) * (1 - w_h2u)
                        # print('w[h],,,,h在keys')
                        # print(w[h])
                    else:
                        # print('h不在keys')
                        w[h] = w_h2u
                        # print(w[h])

                    # print('w是什么')
                    # print(w)
                    # h_neighbor = nx.neighbors(self.data.graph, h)
                    # w_h = 1
                    # for be in included.intersection(h_neighbor):
                    #     w_h *= 1 - self.data.get_weight(h, be)
                    # w[h] = 1 - w_h
                    """insert h into w_key_sorted, ranking by w from small to large"""
                    if h in infected_nodes:
                        # print('开始排序了')
                        if h in w_key_sorted:
                            w_key_sorted.remove(h)  # remove the old w[h]
                        k = 0

                        while k < len(w_key_sorted):
                            if w[w_key_sorted[k]] > w[h]:
                                break
                            k += 1
                        # print(w_key_sorted)
                        w_key_sorted.insert(k, h)  # 安排降序加入,就是排列可能性加入,安排顺序插入进去
                        # print('w_key_sorted')
                        # print(w_key_sorted)

                        # w_key_sorted[k:k] = [h]
            # print('每次开始的是那个节点呢?')
            # print(v)
            # print('每一个的可能性是likehood')
            # print(likelihood)
            posterior[v] = (decimal.Decimal(
                decimal.Decimal(likelihood) * self.prior[v] *
                rumor_centralities[v]))

        # print('w_key_sorted')
        # print(w_key_sorted)
        #
        # print('------------')
        # print(coverage_centralities)
        # print('看下这里的posterior')
        # print(posterior)
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()
예제 #9
0
    def detect(self):
        """detect the source with GSBA.

        Returns:
            @rtype:int
            the detected source
        """
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph,
                                                    'centrality')

        self.reset_centrality()
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)
        posterior = {}
        included = set()
        neighbours = set()
        # weights = self.data.weights
        weights = 0.1  # for unweighted graphs
        for v in infected_nodes:
            """find the approximate upper bound by greedy searching"""
            included.clear()
            neighbours.clear()
            included.add(v)
            neighbours.add(v)
            likelihood = 1
            while len(included) < n - 1:
                max_num_bridge_edges = -1
                max_num_bridge_edges_ratio = -1
                max_id = None
                for u in neighbours:
                    if max_id is None:
                        max_id = u
                    count = 0
                    new = nx.neighbors(self.data.graph, u)
                    for h in new:
                        if h in included:
                            count += 1
                    temp = count * 1.0 / (len(neighbours) + len(new) -
                                          2 * count)
                    if temp > max_num_bridge_edges_ratio:
                        max_num_bridge_edges_ratio = temp
                        max_num_bridge_edges = count
                        max_id = u
                if max_num_bridge_edges == 0:
                    likelihood = 1
                else:
                    likelihood *= max_num_bridge_edges * 1.0 / len(neighbours)
                neighbours.remove(max_id)
                included.add(max_id)
                new = nx.neighbors(self.subgraph, max_id)
                for h in new:
                    if h not in included:
                        neighbours.add(h)
            posterior[v] = (decimal.Decimal(self.prior[v]) *
                            decimal.Decimal(likelihood) *
                            rumor_centralities[v])
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()
예제 #10
0
    def detect(self):
        """detect the source with GSBA.
        Returns:
            @rtype:int
            the detected source
        """

        ''' 
        '''
        self.reset_centrality()
        self.prior_detector.set_data(self.data)
        self.prior_detector.detect()
        self.prior = nx.get_node_attributes(self.subgraph, 'centrality')

        '''
        谣言中心性做先验
        '''
        self.reset_centrality()
        rc = rumor_center.RumorCenter()
        rc.set_data(self.data)
        rc.detect()
        rumor_centralities = nx.get_node_attributes(self.subgraph, 'centrality')
        # #print('先验加进去,试试看')

        # '''
        # 覆盖率因子
        #
        # '''
        # self.reset_centrality()
        # cc_object = cc.CoverageCenter()
        # cc_object.set_data(self.data)
        # cc_object.detect()
        # coverage_centralities = nx.get_node_attributes(self.subgraph, 'centrality')
        # # print('先验加进去,试试看')
        # print('覆盖率的检测器')
        # print(coverage_centralities)




        self.reset_centrality()
        infected_nodes = set(self.subgraph.nodes())
        n = len(infected_nodes)

        posterior = {}
        included = set()
        neighbours = set()
        weights = self.data.weights
        for v in infected_nodes:

            """find the approximate upper bound by greedy searching"""
            included.clear()
            neighbours.clear()
            included.add(v)
            neighbours.add(v)
            likelihood = 1
            w = {}  # effective propagation probabilities: node->w
            w_key_sorted = blist()
            w[v] = 1
            w_key_sorted.append(v)
            while len(included) < n:
                #print('邻居用来计算所谓的neighbours')
                #print(neighbours)
                w_sum = sum([w[j] for j in neighbours])
                u = w_key_sorted.pop()  # pop out the last element from w_key_sorted with the largest w
                likelihood *= w[u] / w_sum
                #print('分母是?')
                #print(w_sum)
                #print('likelihood')
                #print(likelihood)
                included.add(u)
                neighbours.remove(u)
                new = nx.neighbors(self.data.graph, u)
                #print('new也就是在总图中的邻居')
                #print(new)
                for h in new:
                    #print('遍历到某个邻居')
                    #print(h)
                    if h in included:
                        continue
                    neighbours.add(h)
                    # compute w for h
                    w_h2u = weights[self.data.node2index[u],self.data.node2index[h]]
                    # w_h2u = weights[self.data.node2index[u]][self.data.node2index[h]]
                    if h in w.keys():
                        #print('------')
                        #print(w[h])
                        #print(w_h2u)
                        w[h] = 1-(1-w[h])*(1-w_h2u)
                        #print('w[h],,,,h在keys')
                        #print(w[h])
                    else:
                        #print('h不在keys')
                        w[h] = w_h2u
                        #print(w[h])

                    #print('w是什么')
                    #print(w)
                    # h_neighbor = nx.neighbors(self.data.graph, h)
                    # w_h = 1
                    # for be in included.intersection(h_neighbor):
                    #     w_h *= 1 - self.data.get_weight(h, be)
                    # w[h] = 1 - w_h
                    """insert h into w_key_sorted, ranking by w from small to large"""
                    if h in infected_nodes:
                        #print('开始排序了')
                        if h in w_key_sorted:
                            w_key_sorted.remove(h)  # remove the old w[h]
                        k = 0

                        while k < len(w_key_sorted):
                            if w[w_key_sorted[k]] > w[h]:
                                break
                            k += 1
                        #print(w_key_sorted)
                        w_key_sorted.insert(k,h)    #安排降序加入,就是排列可能性加入,安排顺序插入进去
                        #print('w_key_sorted')
                        #print(w_key_sorted)


                        #w_key_sorted[k:k] = [h]
            #print('每次开始的是那个节点呢?')
            #print(v)
            #print('每一个的可能性是likehood')
            #print(likelihood)
            posterior[v] = (decimal.Decimal(self.prior[v])* decimal.Decimal(likelihood) * rumor_centralities[v])
            # print('两百个点的时候会出现什么?')
            # print(posterior[v])
        #print('w_key_sorted')
        #print(w_key_sorted)

        #print('------------')
        #print(rumor_centralities)
        #print('看下这里的posterior')
        #print(posterior)
        nx.set_node_attributes(self.subgraph, 'centrality', posterior)
        return self.sort_nodes_by_centrality()