def extended_prefential_attachment(num_nodes, p, r): """Returns a random graph according to the Barabasi-Albert preferential attachment model with the extension explained by Cooper et al. A graph of ``num_nodes`` nodes is grown by attaching new nodes each with ``r`` edges that are preferentially attached to existing nodes with high degree. Parameters ---------- num_nodes : int Number of nodes p : float Probability of doing preferential attachment; with 1 - p, we add an edge to a random neighbour. r : int Number of edges to add for every new vertex. Returns ------- G : Graph """ # Add r initial nodes (m0 in barabasi-speak) G = complete_graph(r) G.name = "extended_barabasi_albert_graph(%s,%s)" % (num_nodes, r) # List of existing nodes, with nodes repeated once for each adjacent edge repeated_nodes = range(0, r) * (r - 1) # Start adding the other n-r nodes. The first node is r. source = r while source < num_nodes: # First edge is (source, vertex_chosen_preferentially) i = random.randint(0, len(repeated_nodes) - 1) x = repeated_nodes[i] G.add_edge(source, x) repeated_nodes.extend([source, x]) # Add the remaining r - 1 edges for i in range(0, r - 1): curr_p = uniform() if curr_p <= p: # Attach new vertex to an existing vertex (by preferential # attachment) i = random.randint(0, len(repeated_nodes) - 1) target = repeated_nodes[i] G.add_edge(source, target) repeated_nodes.extend([source, target]) else: # Attach new vertex to random neighbour of x i = random.randint(0, len(G.neighbors(x)) - 1) target = G.neighbors(x)[i] G.add_edge(source, target) repeated_nodes.extend([source, target]) source += 1 if source % 1000 == 0: print(info(G)) return G
def load_graph(gen=True): if gen: G = read_gpickle('HTCM.gpickle') else: G = read_gpickle('Google.gpickle') G = convert_node_labels_to_integers(G) print info(G) print "Triangles in gen. graph:", sum(nx.triangles(G).values()) / 3 max_deg1 = 0, max_deg2 = 0 u1 = 0, u2 = 0 for node in G.nodes(): if max_deg < G.degree(node): max_deg = G.degree(node) u = node print "Max degree", max_deg, "at node", u, "(belonging to",\ nx.triangles(G, u), "triangles)" return G
target = G.neighbors(x)[i] G.add_edge(source, target) repeated_nodes.extend([source, target]) source += 1 if source % 1000 == 0: print(info(G)) return G def Google_graph(): # Create the Google graph object and return the largest connected subgraph G = read_edgelist('web-Google.txt', comments='#') G = max(nx.connected_component_subgraphs(G), key=len) G.name = "Google_graph" return G if __name__ == '__main__': num_nodes = 600000 p = 0.6 r = 3 G = extended_prefential_attachment(num_nodes, p, r) print info(G) write_gpickle(G, "HTCM.gpickle") G = Google_graph() print info(G) write_gpickle(G, "Google.gpickle")
def ver_medidas(G): print(function.info(G)) """ Numero minimo de nodos que deben ser removidos para desconectar G """ print("Numero minimo de nodos que deben ser removidos para desconectar G :"+str(approximation.node_connectivity(G))) """ average clustering coefficient of G. """ print("average clustering coefficient of G: "+str(approximation.average_clustering(G))) """ Densidad de un Grafo """ print("Densidad de G: "+str(function.density(G))) """ Assortativity measures the similarity of connections in the graph with respect to the node degree. Valores positivos de r indican que existe una correlacion entre nodos con grado similar, mientras que un valor negativo indica correlaciones entre nodos de diferente grado """ print("degree assortativity:"+str(assortativity.degree_assortativity_coefficient(G))) """ Assortativity measures the similarity of connections in the graph with respect to the given attribute. """ print("assortativity for node attributes: "+str(assortativity.attribute_assortativity_coefficient(G,"crime"))) """ Grado promedio vecindad """ plt.plot(assortativity.average_neighbor_degree(G).values()) plt.title("Grado promedio vecindad") plt.xlabel("Nodo") plt.ylabel("Grado") plt.show(); """ Grado de Centralidad de cada nodo """ plt.plot(centrality.degree_centrality(G).values()) plt.title("Grado de centralidad") plt.xlabel("Nodo") plt.ylabel("Centralidad") plt.show(); """ Calcular el coeficiente de agrupamiento para nodos """ plt.plot(cluster.clustering(G).values()) plt.title("coeficiente de agrupamiento") plt.xlabel("Nodo") plt.show(); """ Media coeficiente de Agrupamiento """ print("Coeficiente de agrupamiento de G:"+str(cluster.average_clustering(G))) """ Centro del grafo El centro de un grafo G es el subgrafo inducido por el conjunto de vertices de excentricidad minima. La excentricidad de v in V se define como la distancia maxima desde v a cualquier otro vertice del grafo G siguiendo caminos de longitud minima. """ print("Centro de G:"+ str(distance_measures.center(G))) """ Diametro de un grafo The diameter is the maximum eccentricity. """ print("Diametro de G:"+str(distance_measures.diameter(G))) """ Excentricidad de cada Nodo The eccentricity of a node v is the maximum distance from v to all other nodes in G. """ plt.plot(distance_measures.eccentricity(G).values()) plt.title("Excentricidad de cada Nodo") plt.xlabel("Nodo") plt.show(); """ Periferia The periphery is the set of nodes with eccentricity equal to the diameter. """ print("Periferia de G:") print(distance_measures.periphery(G)) """ Radio The radius is the minimum eccentricity. """ print("Radio de G:"+str(distance_measures.radius(G))) """ PageRank calcula una clasificacion de los nodos en el grafico G en funcion de la estructura de los enlaces entrantes. Originalmente fue disenado como un algoritmo para clasificar paginas web. """ plt.plot(link_analysis.pagerank_alg.pagerank(G).values()) plt.title("Puntaje de cada Nodo") plt.xlabel("Nodo") plt.show(); """ Coeficiente de Small World. A graph is commonly classified as small-world if sigma>1. """ print("Coeficiente de Small World: " + str(smallworld.sigma(G))) """ The small-world coefficient (omega) ranges between -1 and 1. Values close to 0 means the G features small-world characteristics. Values close to -1 means G has a lattice shape whereas values close to 1 means G is a random graph. """ print("Omega coeficiente: "+str(smallworld.omega(G)))
from botdet.data.dataset_botnet import BotnetDataset from networkx.classes import function as fn if __name__ == '__main__': dataset = BotnetDataset(name='p2p', split='train', graph_format='nx', in_memory=False) print(dataset) print(len(dataset)) print(fn.info(dataset[0])) breakpoint()
from botdet.data.dataset_botnet import BotnetDataset import plotly.express as px import networkx as nx from networkx.classes.function import info import random data = BotnetDataset(name='p2p', split='train', graph_format='nx', in_memory=False) bots = [n for n, attr in data[0].nodes(data=True) if attr['is_bot'] == 1] botnet = data[0].subgraph(bots) print(info(botnet)) seg = range(10) #print(data[0].graph['num_evils']) px.line(x=seg, y=[data[i].graph['num_evils'] for i in seg], labels={ 'x': 'tiempo', 'y': 'nĂºmero de bots' }).show()