def compute_global_indices(self, weighted=False):
        """Computes Average Clustering Coefficient and Average Path Length
        of the graph (binary or weighted).

        Args:
        ----------
        weighted : boolean (default=False)
            If True, use the weighted graph, otherwise use the binary one.

        Returns:
        ----------
        avg_cl_coef : float
            Average clustering coefficient
        avg_path_len : float
            Average path length
        """
        if weighted:
            avg_cl_coef = average_clustering(self.Gw, weight='weight')
            avg_path_len = average_shortest_path_length(self.Gw,
                                                        weight='weight')
        else:
            avg_cl_coef = average_clustering(self.G)
            avg_path_len = average_shortest_path_length(self.G)
        return avg_cl_coef, avg_path_len
Example #2
0
def average_clustering(G, nodes=None, weight=None, count_zeros=True):
    @project_to_simple
    def _average_clustering(G):
        ctx = AppAssets(algo="avg_clustering", context="tensor")(G)
        return ctx.to_numpy("r")[0]

    if weight is not None:
        # forward to networkx.average_clustering
        return nxa.average_clustering(G, nodes, weight, count_zeros)
    if nodes or not count_zeros or not G.is_directed():
        c = clustering(G, nodes=nodes).values()
        if not count_zeros:
            c = [v for v in c if abs(v) > 0]
        return sum(c) / len(c)
    return _average_clustering(G)
    def compute_SMI(self):
        """Compute the small-worldness index of the binary directed graph.

        Returns:
        -----------
        SMI : float
        """
        randMetrics = {"C": [], "L": []}
        print("Computing random graphs...")
        for i in range(10):
            rand_adj = bct.randmio_dir(self.binary_adjacency_matrix, 10, i)[0]
            G_r = nx.convert_matrix.from_numpy_array(rand_adj,
                                                     create_using=nx.DiGraph)
            randMetrics["C"].append(average_clustering(G_r))
            randMetrics["L"].append(average_shortest_path_length(G_r))

        print("Computing SMI...")
        C, L = self.compute_global_indices()
        Cr = np.mean(randMetrics["C"])
        Lr = np.mean(randMetrics["L"])

        SMI = (C / Cr) / (L / Lr)
        print("Completed!")
        return SMI
def calculate_metrics(network):
    '''
    Calcula las métricas más importantes sobre la red y las devuelve en forma de diccionario

    Parameters
    ----------
        network : nx.Graph
            Red de la que se quiere calcular las metricas 

    Returns
    -------
        metrics : dict
            Diccionario que almacena las métricas para la red pasada por parámetro
    '''

    # Incialización del diccionario que almacena las métricas
    metrics = {}

    # Obtenemos los nombres y afiliaciones de la red
    names = nx.get_node_attributes(network, 'name')
    affiliation = nx.get_node_attributes(network, 'affiliation')

    # Función para obtener el nombre en una tupla
    getprops = lambda author_tuple: (names[author_tuple[0]], affiliation[
        author_tuple[0]], author_tuple[1])

    # Número de nodos y aristas
    n = len(network.nodes.data())
    m = len(network.edges.data())

    metrics['n'] = n
    metrics['m'] = m

    # Tamaño total de la red (suma de pesos)
    metrics['size'] = network.size(weight='weight')

    # Grado promedio, densidad y grado máximo
    metrics['av_degree'] = round((2 * m) / n, 5)
    metrics['density'] = (2 * m) / (n * (n - 1))
    metrics['max_degree'] = getprops(
        max(dict(network.degree()).items(), key=lambda degree: degree[1]))[0]

    # Distribución de probabilidad del grado
    degree_distribution = [
        (i,
         len([author
              for (author, degree) in network.degree() if degree == i]) /
         len(network.nodes.data())) for i in range(
             max(dict(network.degree()).items(), key=lambda degree: degree[1])
             [1])
    ]
    metrics['max_degree_p'] = max(degree_distribution,
                                  key=lambda degree_p: degree_p[1])[0]

    # Coeficiente de clustering promedio
    metrics['clustering_coefficient'] = average_clustering(network)

    # Nodo con mayor centralidad promedio
    metrics['max_closeness_centrality'] = getprops(
        max(centrality.closeness_centrality(network).items(),
            key=lambda pair: pair[1]))[0]

    return metrics
# showing number of nodes, edges and average degree of internet graph
print("Internet Graph info: ", nx.info(internet_graph))
print("Erdos Graph info: ", nx.info(Erdős))

# checking if the graph is directed or undirected
print("is Internet graph directed?", (nx.is_directed(internet_graph)))
print("is Erdős graph directed?", (nx.is_directed(Erdős)))

# diameter(G, e=None)
# degree_centrality(G)
# For average degree: #_edges * 2 / #_nodes;
# comparing average_clustering of both graphs
# print(average_clustering(internet_graph),(average_clustering(Erdős)))

# compute average clustering for the graphs
print("Average clustering of Internet graph is: ", average_clustering(internet_graph),
      "\nAverage clustering of Erdos graph is:  ", average_clustering(Erdős))

print("Transitivity of Internet graph is: ", nx.transitivity(internet_graph), "\nTransitivity of Erdos graph is: ",
      nx.transitivity(Erdős))

# compute clustering of the graphs
print("clustering of Internet graph is ", clustering(internet_graph), "clustering of Erdos graph is  ", clustering(Erdős))

# compute Degree_centrality for nodes
print("Degree_centrality of Internet graph is: ", degree_centrality(internet_graph), "\nDegree_centrality of Erdos graph is: ", degree_centrality(Erdős))


# compute Diameter of the Graphs
print("Diameter of Erdos graph is: ", diameter(Erdős), "\nDiameter of Internet Graph is: ", diameter(internet_graph))
# print ("diameter of Erdos is ",nx.diameter(Erdős))
Example #6
0
# 寻找社区/联通子图
from networkx.algorithms import number_connected_components, connected_components

print(number_connected_components(G))
for subG in connected_components(G):
    print(subG)

# 获取联通子图的图结构
from networkx.algorithms import connected_component_subgraphs

for i, subG in enumerate(connected_component_subgraphs(G)):
    print('G%s' % i, subG.number_of_nodes(), subG.number_of_edges())

# 通过三角计算强化社区发现
# 三角计数(triangles counts)和集束系数/聚类系数(clustering coefficient)衡量社区/子图的紧密程度
from networkx.algorithms import triangles, transitivity, average_clustering

# 三角计数
print(triangles(G))
# 平均三角计数
print(transitivity(G))
# 平均集束系数
print(average_clustering((G)))

# 利用pagerank发现影响力中心
from collections import Counter
from networkx.algorithms import pagerank

pr = pagerank(G)
for p in Counter(pr).most_common():
    print(p)
Example #7
0
def run_GT_calcs(G, just_data, Do_kdist, Do_dia, Do_BCdist, Do_CCdist, Do_ECdist, Do_GD, Do_Eff, \
                               Do_clust, Do_ANC, Do_Ast, Do_WI, multigraph):

    # getting nodes and edges and defining variables for later use
    klist = [0]
    Tlist = [0]
    BCdist = [0]
    CCdist = [0]
    ECdist = [0]
    if multigraph:
        Do_BCdist = 0
        Do_ECdist = 0
        Do_clust = 0

    data_dict = {"x": [], "y": []}

    nnum = int(nx.number_of_nodes(G))
    enum = int(nx.number_of_edges(G))

    if Do_ANC | Do_dia:
        connected_graph = nx.is_connected(G)

    # making a dictionary for the parameters and results
    just_data.append(nnum)
    data_dict["x"].append("Number of nodes")
    data_dict["y"].append(nnum)
    just_data.append(enum)
    data_dict["x"].append("Number of edges")
    data_dict["y"].append(enum)
    multi_image_settings.progress(35)

    # calculating parameters as requested

    # creating degree histogram
    if (Do_kdist == 1):
        klist1 = nx.degree(G)
        ksum = 0
        klist = np.zeros(len(klist1))
        for j in range(len(klist1)):
            ksum = ksum + klist1[j]
            klist[j] = klist1[j]
        k = ksum / len(klist1)
        k = round(k, 5)
        just_data.append(k)
        data_dict["x"].append("Average degree")
        data_dict["y"].append(k)

    multi_image_settings.progress(40)

    # calculating network diameter
    if (Do_dia == 1):
        if connected_graph:
            dia = int(diameter(G))
        else:
            dia = 'NaN'
        just_data.append(dia)
        data_dict["x"].append("Network Diameter")
        data_dict["y"].append(dia)

    multi_image_settings.progress(45)

    # calculating graph density
    if (Do_GD == 1):
        GD = nx.density(G)
        GD = round(GD, 5)
        just_data.append(GD)
        data_dict["x"].append("Graph density")
        data_dict["y"].append(GD)

    multi_image_settings.progress(50)

    # calculating global efficiency
    if (Do_Eff == 1):
        Eff = global_efficiency(G)
        Eff = round(Eff, 5)
        just_data.append(Eff)
        data_dict["x"].append("Global Efficiency")
        data_dict["y"].append(Eff)

    multi_image_settings.progress(55)

    if (Do_WI == 1):
        WI = wiener_index(G)
        WI = round(WI, 1)
        just_data.append(WI)
        data_dict["x"].append("Wiener Index")
        data_dict["y"].append(WI)

    multi_image_settings.progress(60)

    # calculating clustering coefficients
    if (Do_clust == 1):
        Tlist1 = clustering(G)
        Tlist = np.zeros(len(Tlist1))
        for j in range(len(Tlist1)):
            Tlist[j] = Tlist1[j]
        clust = average_clustering(G)
        clust = round(clust, 5)
        just_data.append(clust)
        data_dict["x"].append("Average clustering coefficient")
        data_dict["y"].append(clust)

    # calculating average nodal connectivity
    if (Do_ANC == 1):
        if connected_graph:
            ANC = average_node_connectivity(G)
            ANC = round(ANC, 5)
        else:
            ANC = 'NaN'
        just_data.append(ANC)
        data_dict["x"].append("Average nodal connectivity")
        data_dict["y"].append(ANC)

    multi_image_settings.progress(65)

    # calculating assortativity coefficient
    if (Do_Ast == 1):
        Ast = degree_assortativity_coefficient(G)
        Ast = round(Ast, 5)
        just_data.append(Ast)
        data_dict["x"].append("Assortativity Coefficient")
        data_dict["y"].append(Ast)

    multi_image_settings.progress(70)

    # calculating betweenness centrality histogram
    if (Do_BCdist == 1):
        BCdist1 = betweenness_centrality(G)
        Bsum = 0
        BCdist = np.zeros(len(BCdist1))
        for j in range(len(BCdist1)):
            Bsum += BCdist1[j]
            BCdist[j] = BCdist1[j]
        Bcent = Bsum / len(BCdist1)
        Bcent = round(Bcent, 5)
        just_data.append(Bcent)
        data_dict["x"].append("Average betweenness centrality")
        data_dict["y"].append(Bcent)
    multi_image_settings.progress(75)

    # calculating closeness centrality
    if (Do_CCdist == 1):
        CCdist1 = closeness_centrality(G)
        Csum = 0
        CCdist = np.zeros(len(CCdist1))
        for j in range(len(CCdist1)):
            Csum += CCdist1[j]
            CCdist[j] = CCdist1[j]
        Ccent = Csum / len(CCdist1)
        Ccent = round(Ccent, 5)
        just_data.append(Ccent)
        data_dict["x"].append("Average closeness centrality")
        data_dict["y"].append(Ccent)

        multi_image_settings.progress(80)

        # calculating eigenvector centrality
        if (Do_ECdist == 1):
            try:
                ECdist1 = eigenvector_centrality(G, max_iter=100)
            except:
                ECdist1 = eigenvector_centrality(G, max_iter=10000)
            Esum = 0
            ECdist = np.zeros(len(ECdist1))
            for j in range(len(ECdist1)):
                Esum += ECdist1[j]
                ECdist[j] = ECdist1[j]
            Ecent = Esum / len(ECdist1)
            Ecent = round(Ccent, 5)
            just_data.append(Ecent)
            data_dict["x"].append("Average eigenvector centrality")
            data_dict["y"].append(Ecent)

    data = pd.DataFrame(data_dict)

    return data, just_data, klist, Tlist, BCdist, CCdist, ECdist
Example #8
0
 ccID = 0
 for (cl, char) in [
     ('BetwCent', nx.betweenness_centrality),
     ('AvgDegConn',
      algs.assortativity.average_degree_connectivity),
     ('EigCentr', algs.centrality.eigenvector_centrality),
     ('CloseCentr', algs.centrality.closeness_centrality),
     ('LoadCentr', nx.algorithms.centrality.load_centrality),
     ('Triangles', algs.cluster.triangles)
 ]:
     v = list(char(target).values())
     for (prefix, selector) in [('max', max), ('min', min),
                                ('avg', np.mean),
                                ('med', np.median)]:
         print(label, l, prefix + cl, selector(v))
 print(label, l, 'avgClust', algs.average_clustering(target))
 print(
     label, l, 'vertexCover',
     len(approx.vertex_cover.min_weighted_vertex_cover(target)))
 print(
     label, l, 'degAssort',
     algs.assortativity.degree_assortativity_coefficient(
         target))
 print(label, l, 'trans', algs.cluster.transitivity(target))
 print(label, l, 'greedyCol',
       len(set(algs.coloring.greedy_color(target).values())))
 if isolated == 0:
     print(label, l, 'edgeCover',
           len(algs.covering.min_edge_cover(target)))
 smallest = n
 biggest = 0
Example #9
0
path = os.path.abspath('..')
f = open(path + "/data/klcommunity4_0.txt")
lines = f.readlines()
cluster_0 = []
cluster_1 = []
for i, line in enumerate(lines):
    id, name, rank = line.split()
    cluster_0.append({'id': id, 'name': name, 'rank': rank})

graph = build_unweighted_rank_graph()
nodes_0 = []
for i in cluster_0:
    nodes_0.append(int(i['id']))
print(len(nodes_0))
graph = graph.to_undirected()
p_0 = alg.average_clustering(graph, nodes_0)
print(p_0)

sum_0 = 0

for i in cluster_0:
    sum_0 += float(i['rank'])

print(sum_0)

f = open(path + "/data/klcommunity4_1.txt")
lines = f.readlines()
for i, line in enumerate(lines):
    id, name, rank = line.split()
    cluster_1.append({'id': id, 'name': name, 'rank': rank})