def _init_spread(self, first_time, patient_zero):
        if first_time == True:
            eigenvalue, _ = gt.eigenvector(self.g)
            self.spread_beta = 0.5
            self.spread_delta = 0.8
            print("Spreading:\n    Largest eigenvalue: {}\n    beta: {}\n    delta: {}"\
                  .format(eigenvalue, self.spread_beta, self.spread_delta))
            print("\n-----------------------------------------")

            for e in self.g.edges():
                self.e_spread_beta[e] = self.spread_beta

        for v in self.g.vertices():
            self.v_reinfected[v] = 0
            self.v_infected[v] = 0
            self.activate[v] = self.threshold
        for e in self.g.edges():
            self.e_reinfected[e] = 0

        for virus, patient in enumerate(patient_zero):
            self.v_infected[patient] = virus + 1
Esempio n. 2
0
    def _init_extinction(self, first_time):
        if first_time == True:
            eigenvalue, _ = gt.eigenvector(self.g)
            self.extinct_beta = 0.9 / eigenvalue
            self.extinct_delta = 0.9
            print("Extinction:\n    Largest eigenvalue: {}\n    beta: {}\n    delta: {}"\
                  .format(eigenvalue, self.extinct_beta, self.extinct_delta))
            print("\n-----------------------------------------")

            for e in self.g.edges():
                for i in range(self.diversity):
                    self.e_extinct_beta[i][e] = self.extinct_beta

        self.infected_list = [{v: None for v in self.g.vertices()}, {}]

        for v in self.g.vertices():
            for i in range(self.diversity):
                self.v_infected[i][v] = True
                self.v_reinfected[i][v] = 0
        for e in self.g.edges():
            for i in range(self.diversity):
                self.e_reinfected[i][e] = 0
Esempio n. 3
0
    def _init_spread(self, first_time, patient_zero):
        if first_time == True:
            eigenvalue, _ = gt.eigenvector(self.g)
            self.spread_beta = 0.1
            self.spread_delta = 0.1
            print("Spreading:\n    Largest eigenvalue: {}\n    beta: {}\n    delta: {}"\
                  .format(eigenvalue, self.spread_beta, self.spread_delta))
            print("\n-----------------------------------------")

            for e in self.g.edges():
                for i in range(self.diversity):
                    self.e_spread_beta[i][e] = self.spread_beta

        self.infected_list = [{
            patient_zero[i]: None
        } for i in range(self.diversity)]

        for v in self.g.vertices():
            for i in range(self.diversity):
                self.v_reinfected[i][v] = 0
                self.v_infected[i][v] = False
        for e in self.g.edges():
            for i in range(self.diversity):
                self.e_reinfected[i][e] = 0
Esempio n. 4
0
A, b = AdjacencyMatrix(ids, links)
lon, lat = lon[b], lat[b]
n = A.shape[0]
A[np.tril_indices(n)] = 0
edges = np.transpose(A.nonzero())
A = A[np.triu_indices(n, 1)]

print('get all link distances ..')
D, x = IntegerDistances(lat, lon)

print('derive link probability ..')
p = LinkProbability(A, D)

print('original measure ..')
g = Graph(edges, n)
_, v = gt.eigenvector(g)
vo = np.array(v.a)

nserns = 1000
var = np.zeros((nserns, len(vo)))

print('measure on SERNs ..')
for i in range(var.shape[0]):
    e = SernEdges(D, p, n)
    g = Graph(e, n)
    _, v = gt.eigenvector(g)
    v = np.array(v.a)
    var[i] = v

print('plot full example ..')
fg, ax = pl.subplots(2, 2, figsize=(19.2, 10.8))
Esempio n. 5
0
def ev_centrality_dist(net, core, show_plot = False, save_plot = True, outfile = None):
    '''
    Calculate eigenvector centrality, an empirical CDF, and ranking for each vertex.  
    Plot both centrality x empirical CDF and centrality x ranking, highlighting core vertices.
    Note that the plot is saved as a file only if *both* `save_plot` is true and
    output filename are given.  
    
    :param net: The network whose degree distribution we'd like to plot
    :param core: The property map of core vertices
    :param show_plot: Show the plot on the screen?
    :param save_plot: Save the plot as a file?
    :param outfile: Filename to use to save the plot
    
    :return: The CDF and ranking plots. 
    '''# Calculate eigenvector centrality and write it into the graph
    print('Calculating eigenvector centrality')
    net.vp['evc'] = gt.eigenvector(net, epsilon=1e-03)[1]
    print('Done')
    # Extract them into a useful format
    eigen_central = net.vp['evc'].get_array().tolist()
    # x values: centralities
    centralities = list(set(eigen_central))
    # Use the ecdf to build the y values
    eigen_central_ecdf = ecdf(eigen_central)
    # Use 1-ecdf for legibility when most nodes have centrality near 0
    centrality_distribution = \
        [1 - eigen_central_ecdf(centrality) for centrality in centralities]
    # Write 1-ecdf into the graph
    net.vp['evc ecdf'] = \
        net.new_vertex_property('float',
            vals = [1 - eigen_central_ecdf(net.vp['evc'][vertex])
                        for vertex in net.vertices()])

    # Rank the vertices by eigenvector centrality
    vertex_ranking = len(eigen_central) - bn.rankdata(eigen_central) + 1
    # Write them into the graph
    net.vp['evc rank'] = net.new_vertex_property('int', vals = vertex_ranking)
    #print(vertex_ranking)
    print('Mapping rankings to centralities')
    # Map these against `centralities`:  
    #  for each degree, get the index of its first occurrence in the 
    #  vertex-level list `eigen_central`; that index corresponds to the 
    #  index in `vertex_ranking`
    ranking = [vertex_ranking[eigen_central.index(centrality)] 
                for centrality in centralities]
    
     # Combine into a single data frame
    centrality_dist = pd.DataFrame({'centrality': centralities,
                                    'density': centrality_distribution,
                                    'rank': ranking})
    #print(centrality_dist.head())

    # Grab centralities and rankings for the core vertices
    centralities_core = [net.vp['evc'][vertex] for vertex in core]
    centrality_distribution_core = [net.vp['evc ecdf'][vertex] for vertex in core]
    ranking_core = [net.vp['evc rank'][vertex] for vertex in core]
    centrality_dist_core = \
        pd.DataFrame({'centrality': centralities_core,
                        'density': centrality_distribution_core,
                        'rank': ranking_core})
    #print(centrality_dist_core)
    print('Summary statistics for core vertex centralities:')
    print(pd.DataFrame({k: summary(centrality_dist_core[k]) for k in centrality_dist_core}))
    
    # Build the plot
    density_plot = ggplot(aes(x = 'centrality'), data = centrality_dist) +\
            geom_area(aes(ymin = 0, ymax = 'density', fill = 'blue'), alpha = .3) +\
            geom_line(aes(y = 'density'), color = 'blue', alpha = .8) +\
            xlab('Eigenvector centrality') +\
            ylab('1 - Cumulative probability density') +\
            scale_x_log10() + scale_y_log10() +\
            theme_bw()
    #Add a rug for the core vertices
    density_plot = density_plot + \
        geom_point(aes(x = 'centrality', y = 'density'),
                shape = '+', size = 250, alpha = .8, color = 'red',
                data = centrality_dist_core)
    
    # If requested, show the plot
    if show_plot:
        print(density_plot)
    
    # Save to disk
    if outfile is not None and save_plot:
        ggsave(filename = outfile + '.evc_density' + '.pdf', plot = density_plot)
    
    # Same thing for degree x ranking
    ranking_plot = ggplot(aes(x = 'centrality'), data = centrality_dist) +\
            geom_area(aes(ymin = 0, ymax = 'rank', fill = 'blue'), alpha = .3) +\
            geom_line(aes(y = 'rank'), color = 'blue', alpha = .8) +\
            xlab('Eigenvector centrality') +\
            ylab('Rank') +\
            scale_x_log10() + scale_y_log10() +\
            theme_bw()
    ranking_plot = ranking_plot +\
        geom_point(aes(x = 'centrality', y = 'rank'),
                shape = '+', size = 250, alpha = .8, color = 'red',
                data = centrality_dist_core)
    if show_plot:
        print(ranking_plot)
    if outfile is not None and save_plot:
        ggsave(filename = outfile + '.evc_rank' + '.pdf', plot = ranking_plot)
    
    return(density_plot, ranking_plot)
Esempio n. 6
0
    def __init__(self, g, author, filename, entity_graph):  
                
        # Basics
        self.author_ = author
        self.edge_weights_ = g.edge_properties["weights"] 
        self.pos_ = g.vertex_properties["pos"]
        self.v_count_ = g.vertex_properties["v_count"]
        self.gt_graph_ = g
        self.filename_ = re.sub('_entgraph','',filename)
               
        # Number of edges and verties, density
        self.num_edges_ = g.num_edges()
        self.num_edges  = g.num_edges()
        self.num_nodes_ = g.num_vertices()
        
        self.num_poss_edges_ = (self.num_nodes_*(self.num_nodes_-1))/2
        self.density_ = self.num_edges_/self.num_poss_edges_        
        self.density_norm = self.density_/self.num_edges_

        # Degree
        self.vertex_avg_, self.vertex_avg_var = gt.graph_tool.stats.vertex_average(g, "total")
        self.vertex_avg_in_, self.vertex_avg_in_var_ = gt.graph_tool.stats.vertex_average(g, "in")
        self.vertex_avg_out_, self.vertex_avg_out_var_ = gt.graph_tool.stats.vertex_average(g, "out")
        self.edge_avg, self.edge_avg_var = gt.graph_tool.stats.edge_average(g, eprop=self.edge_weights_)
        
        self.vertex_avg_norm = self.vertex_avg_/self.num_edges_
        self.edge_avg_norm = self.edge_avg/self.num_edges_

        # Vertex and edge histograms
        self.vertex_hist_ = gt.graph_tool.stats.vertex_hist(g, deg='total', )
        self.vertex_hist_in_ = gt.graph_tool.stats.vertex_hist(g, deg='in', bins=range(0,self.num_nodes_))
        self.vertex_hist_out_ = gt.graph_tool.stats.vertex_hist(g, deg='out', bins=range(0,self.num_nodes_))
        self.edge_hist_ = gt.graph_tool.stats.edge_hist(g,eprop=self.edge_weights_, bins=np.arange(0.0,1.0,0.01))
        
        self.degrees_ = get_values_from_histo(self.vertex_hist_)
        self.degrees_mean, self.degrees_var, self.degrees_skew, self.degrees_kurtosis = get_moments(self.degrees_)

        self.degrees_in_ = get_values_from_histo(self.vertex_hist_in_)
        self.degrees_in_mean_, self.degrees_in_var, self.degrees_in_skew, self.degrees_in_kurtosis = get_moments(self.degrees_in_)

        self.degrees_out_ = get_values_from_histo(self.vertex_hist_out_)
        self.degrees_out_mean_, self.degrees_out_var, self.degrees_out_skew, self.degrees_out_kurtosis = get_moments(self.degrees_out_)

        self.weights_ = get_values_from_histo(self.edge_hist_)
        self.weights_mean, self.weights_var, self.weights_skew, self.weights_kurtosis = get_moments(self.weights_)
        
        self.degrees_mean_norm = self.degrees_mean/self.num_edges_
        self.weights_mean_norm = self.weights_mean/self.num_edges_
        
        self.edge_weights_mean_, self.edge_weights_var, self.edge_weights_skew, self.edge_weights_kurtosis = get_moments(self.edge_weights_.a)
        self.edge_weights_mean_norm = self.edge_weights_mean_/self.num_edges_                    
            
        # Distance metrices
        self.dist_histogram_ = gt.graph_tool.stats.distance_histogram(g, bins = range(0,10))
        self.avg_shortest_path = np.mean(get_values_from_histo(self.dist_histogram_))

        self.diameter = np.max(get_values_from_histo(self.dist_histogram_))
        self.pseudo_diameter_ = gt.pseudo_diameter(g)[0] 
        
        self.diameter_norm = self.diameter/self.num_edges_
        self.avg_shortest_path_norm = self.avg_shortest_path/self.num_edges_
        
        # Centrality measures
        self.max_eigen_, self.eigenvectors_ = gt.eigenvector(g, weight=self.edge_weights_)
        self.eigenvectors_ = self.eigenvectors_.a
        self.katz_ = gt.graph_tool.centrality.katz(g, weight=self.edge_weights_).a
        self.pageranks_ = gt.graph_tool.centrality.pagerank(g, weight=self.edge_weights_).a
        
        self.eigenvectors_mean, self.eigenvectors_var, self.eigenvectors_skew, self.eigenvectors_kurtosis = get_moments(self.eigenvectors_)
        self.katz_mean, self.katz_var, self.katz_skew, self.katz_kurtosis = get_moments(self.katz_)
        self.pageranks_mean, self.pageranks_var, self.pageranks_skew, self.pageranks_kurtosis = get_moments(self.pageranks_)

        self.eigenvectors_mean_norm = self.eigenvectors_mean/self.num_edges_
        self.katz_mean_norm = self.katz_mean/self.num_edges_
        self.pageranks_mean_norm = self.pageranks_mean/self.num_edges_
        
        # HITS: authority centrality, hub centrality
        self.hits_eig, self.auth_centr_, self.hub_centr_ = gt.graph_tool.centrality.hits(g, weight=self.edge_weights_)
        self.hits_eig = self.hits_eig
        self.auth_centr_ = self.auth_centr_.a
        self.hub_centr_ = self.hub_centr_.a    

        self.auth_centr_mean, self.auth_centr_var, self.auth_centr_skew, self.auth_centr_kurtosis = get_moments(self.auth_centr_)
        self.hub_centr_mean, self.hub_centr_var, self.hub_centr_skew, self.hub_centr_kurtosis = get_moments(self.hub_centr_)

        self.hits_eig_norm = self.hits_eig/self.num_edges_
        self.auth_centr_mean_norm = self.auth_centr_mean/self.num_edges_
        self.hub_centr_mean_norm = self.hub_centr_mean/self.num_edges_

        # Closeness and betweenness
        self.closeness_ = gt.graph_tool.centrality.closeness(g, weight=self.edge_weights_)
        self.closeness_ = self.closeness_.a

        self.vertex_betweenness_ , self.edge_betweenness_ = gt.graph_tool.centrality.betweenness(g, weight=self.edge_weights_)
        self.vertex_betweenness_ = self.vertex_betweenness_.a
        self.edge_betweenness_ = self.edge_betweenness_.a

        self.closeness_mean_, self.closeness_var_, self.closeness_skew_, self.closeness_kurtosis_ = get_moments(self.closeness_)
        self.vertex_betweenness_mean, self.vertex_betweenness_var, self.vertex_betweenness_skew, self.vertex_betweenness_kurtosis = get_moments(self.vertex_betweenness_)
        self.edge_betweenness_mean, self.edge_betweenness_var, self.edge_betweenness_skew, self.edge_betweenness_kurtosis = get_moments(self.edge_betweenness_)
        
        self.vertex_betweenness_mean_norm = self.vertex_betweenness_mean/self.num_edges_
        self.edge_betweenness_mean_norm = self.edge_betweenness_mean/self.num_edges_            
            
        # Reciprocity
        self.edge_reciprocity_ = gt.graph_tool.topology.edge_reciprocity(g)
        self.edge_reciprocity_norm = self.edge_reciprocity_/self.num_edges_

        # Components
        self.largest_component = gt.graph_tool.topology.label_largest_component(g, directed=False).a
        self.fraction_largest_component_ =  np.sum(self.largest_component)/self.largest_component.shape[0]
        self.largest_component = np.sum(self.largest_component)
        
        self.largest_component_norm = self.largest_component/self.num_edges_
        
        # Booleans
        self.is_bipartite_ = gt.graph_tool.topology.is_bipartite(g)
        self.is_DAG_ = gt.graph_tool.topology.is_DAG(g)
        #self.is_planar = gt.graph_tool.topology.is_planar(g)
        
        # Clustering 
        self.local_clustering_coefficient_ = gt.graph_tool.clustering.local_clustering(g).a
        self.global_clustering_coefficient, self.global_clustering_coefficient_var = gt.graph_tool.clustering.global_clustering(g)
        self.local_clustering_coefficient_mean, self.local_clustering_coefficient_var_, self.local_clustering_coefficient_skew, self.local_clustering_coefficient_kurtosis = get_moments(self.local_clustering_coefficient_)

        self.k_core_ = gt.graph_tool.topology.kcore_decomposition(g).a
        self.k_core_mean = np.mean(self.k_core_)
        self.k_core_mean_norm = self.k_core_mean/self.num_edges_
        
        self.local_clustering_coefficient_mean_norm = self.local_clustering_coefficient_mean/self.num_edges_
        self.global_clustering_coefficient_norm = self.global_clustering_coefficient/self.num_edges_

        # Assortivity
        self.assortivity, self.assortivity_var = gt.graph_tool.correlations.assortativity(g, deg="total")
        self.scalar_assortivity, self.scalar_assortivity_var = gt.graph_tool.correlations.scalar_assortativity(g, deg="total")

        self.assortivity_norm = self.assortivity/self.num_edges_
        self.scalar_assortivity_norm = self.scalar_assortivity/self.num_edges_
        
        ## MAX FLOW
        
        # The capacity will be defined as the inverse euclidean distance
        cap = g.new_edge_property("double")
        pos = self.pos_
        edges = list(g.edges())
        for e in edges:
            cap[e] = min(1.0 / norm(pos[e.target()].a - pos[e.source()].a), 10)
        g.edge_properties["cap"] = cap

        cap = g.edge_properties["cap"]
        cap = self.edge_weights_
        
        # Max flow 
        src, tgt = g.vertex(0), g.vertex(self.num_nodes_-1)
        res = gt.graph_tool.flow.edmonds_karp_max_flow(g, src, tgt, cap)
        res.a = cap.a - res.a  # the actual flow
        self.max_flow = sum(res[e] for e in tgt.in_edges())
        
        self.min_st_cut_partition = np.sum(gt.graph_tool.flow.min_st_cut(g, src, cap, res).a)
        self.min_st_cut_partition_norm = self.min_st_cut_partition/self.num_edges_
        self.max_flow_norm = self.max_flow/self.num_edges_
        
        # First vertex features        
        self.fv_degree_ = self.degrees_[0]
        self.fv_eigenvector_ = self.eigenvectors_[0]
        self.fv_katz_ = self.katz_[0]
        self.fv_pagerank_ = self.pageranks_[0]
        self.fv_auth_centr_ = self.auth_centr_[0]
        self.fv_hub_centr_ = self.hub_centr_[0]
        self.fv_closeness_ = self.closeness_[0]
        self.fv_betweenness_ = self.vertex_betweenness_[0]
        self.fv_local_clustering_coeff_ = self.local_clustering_coefficient_[0]
        
        # Min cut       
        g.set_directed(False)        
        self.min_cut, self.partition = gt.graph_tool.flow.min_cut(g, weight=self.edge_weights_)
        self.partition = np.sum(self.partition.a)
        
        self.min_cut_norm = self.min_cut/self.num_edges_
        self.partition_norm = self.partition/self.num_edges_
        
        self.ent_graph_ = entity_graph
                  edge_pen_width=gt.prop_to_size(eprop_betweenness,
                                                 mi=0.5,
                                                 ma=5),
                  vcmap=plt.cm.gist_heat,
                  vorder=vprop_betweenness,
                  output="betweenness_g_friend_LC_ap1.pdf")

#-- Eigenvector Distribution of Largest Component --#

if descEV_bool == True:

    print("\n\n#-- Eigenvector Distribution --#\n")

    w = g_friend_LC.new_edge_property("double")
    w.a = np.random.random(len(w.a)) * 42
    ee, x = gt.eigenvector(g_friend_LC, w)

    eigenVec_array = np.array(x.a)
    eigenVec_array_LC = eigenVec_array[close_array_index_LC]

    print("Eigenvalue of Largest Component: ", ee)  # 3028.056712035545

    print("Avg Eigenvector Centrality: ",
          sum(eigenVec_array_LC) /
          len(eigenVec_array_LC))  #    0.0023861077211674566
    print("Median Eigenvector Centrality: ",
          np.median(eigenVec_array_LC))  #    0.0002809208037315768
    print("Mode Eigenvector Centrality: ",
          stats.mode(eigenVec_array_LC))  #    5.780324396860505e-18

    plt.hist(
df_betweenness_centrality = df_betweenness_2008.append([df_betweenness_2011, df_betweenness_2013, df_betweenness_2015, df_betweenness_2018])

df_degree_centrality = df_degree_2008.append([df_degree_2011, df_degree_2013, df_degree_2015, df_degree_2018])

# Merging data 
ctrlity_frame = [df_eigen_centrality, df_harmnic_centrality, df_betweenness_centrality, df_degree_centrality]
ctrlity_merged = reduce(lambda left,right: pd.merge(left, right, on=['ctry', 'year'],
                                            how='inner'), ctrlity_frame).fillna('0')

ctrlity_merged.to_csv("/content/drive/MyDrive/G11-MEA-Diffusion/dataMEA_Ctrlity/ctrlity_output.csv")

"""### visualization"""

#eigenvector centrality

ee, x = gt.eigenvector(gt_2018_univ)
x.a /= (x.a*10 - 0.7)/0.04 # follow the formula in the book 
gt.graph_draw(gt_2018_univ, vertex_fill_color=x, vcmap=matplotlib.cm.gist_earth, vorder=x) #

gc = gt.GraphView(gt_2018_univ, vfilt=gt.label_largest_component(gt_2018_univ))
c = gt.closeness(gc)
c.a /= c.a / 232
gt.graph_draw(gc, vertex_fill_color=c, vcmap=matplotlib.cm.Oranges, vorder=c)

#betweenness centrality 

bv, be = betweenness(gt_2018_univ)
graph_draw(gt_2018_univ, pos=None, vertex_fill_color=bv, vcmap=matplotlib.cm.summer)

deg = gt_2018_univ.degree_property_map("total")
gt.graph_draw(gt_2018_univ, vertex_fill_color=deg, vorder=deg)
Esempio n. 9
0

G = gt.load_graph_from_csv(FILENAME, csv_options={"delimiter": "\t"})
plot_log_log_dist(G, "dist.png")
# state1 = gt.minimize_blockmodel_dl(G, verbose=True)
N = len(G.get_vertices())
print(len(G.get_edges()))

knock_count = int(KNOCKOUT * N)
# to_remove = np.random.randint(0, N, knock_count)
# G.remove_vertex(to_remove)

# top_degree_nodes = [[idx[0], elem] for idx, elem in np.ndenumerate(G.get_total_degrees(G.get_vertices()))]
# top_degree_nodes.sort(key=lambda x: x[1], reverse=True)
# top_degree_nodes = top_degree_nodes[0:knock_count]
# top_degree_nodes = [i[1] for i in top_degree_nodes]
# G.remove_vertex(top_degree_nodes)

eival, eivec = gt.eigenvector(G)
to_remove = list(np.argsort(-eivec.get_array())[0:knock_count])
G.remove_vertex(to_remove)
plot_log_log_dist(G, "dist2.png")

# print(len(G.get_vertices()))
# print(len(G.get_edges()))
# plot_log_log_dist(G, "dist2.png")
# state2 = gt.minimize_blockmodel_dl(G, verbose=True)

# print(state1)
# print(state2)