def top_rank(k,rank): pq = PriorityQueue() for u in rank.keys(): pq.add(u, -rank[u]) # We use negative value because PriorityQueue returns first values whose priority value is lower out=[] for i in range(k): out.append(pq.pop()) return out
def top(G,measure,k): pq = PriorityQueue() cen=measure(G) for u in G.nodes(): pq.add(u, -cen[u]) # We use negative value because PriorityQueue returns first values whose priority value is lower out=[] for i in range(k): out.append(pq.pop()) return out
def top_hits_parall(G,k,num_node,j): pq = PriorityQueue() pq2=PriorityQueue() auth_n,hubs_n=parallel_hits(G,k,j) for u in G.nodes(): pq.add(u, -auth_n[u]) # We use negative value because PriorityQueue returns first values whose priority value is lower for u in G.nodes(): pq2.add(u, -hubs_n[u]) # We use negative value because PriorityQueue returns first values whose priority value is lower out=[] out2=[] for i in range(num_node): out.append(pq.pop()) out2.append(pq2.pop()) return out,out2
def top_parallel(G,k,j): pq = PriorityQueue() with Parallel(n_jobs=j) as parallel: #Run in parallel diameter function on each processor by passing to each processor only the subset of nodes on which it works result=parallel(delayed(closeness_par)(G,X) for X in chunks(G.nodes(), math.ceil(len(G.nodes())/j))) for u in result:#u is a dict for el in u.keys(): pq.add(el, -u[el]) # We use negative value because PriorityQueue returns first values whose priority value is lower out=[] for i in range(k): out.append(pq.pop()) return out
def top_betweenness(G,k,j): #PARALLELIZZAZIONE pq=PriorityQueue() with Parallel(n_jobs=j) as parallel: #Run in parallel diameter function on each processor by passing to each processor only the subset of nodes on which it works lista=parallel(delayed(betweenness_par)(G,X) for X in chunks(G.nodes(), math.ceil(len(G.nodes())/j))) #Aggregates the results for j in lista: for i in j[1].keys(): pq.add(i,-j[1][i])#Fase di assemblaggio out=[] for i in range(k): out.append(pq.pop()) return out
def bwt_cluster_naive(G): eb, nb = betweenness(G) pq = PriorityQueue() for i in eb.keys(): pq.add(i, -eb[i]) graph = G.copy() done = False while not done: edge = tuple(sorted(pq.pop())) graph.remove_edges_from([edge]) list_connected_comp = list(nx.connected_components(graph)) if len(list(nx.connected_components(graph))) == 4: done = True return list_connected_comp
def bwt_cluster_parallel(G,j): #PARALLELIZZAZIONE pq=PriorityQueue() with Parallel(n_jobs=j) as parallel: #Run in parallel diameter function on each processor by passing to each processor only the subset of nodes on which it works lista=parallel(delayed(betweenness_par)(G,X) for X in chunks(G.nodes(), math.ceil(len(G.nodes())/j))) #Aggregates the results for j in lista: for i in j[0].keys(): pq.add(i,-j[0][i])#Fase di assemblaggio graph=G.copy() done=False while not done: edge=tuple(sorted(pq.pop())) graph.remove_edges_from([edge]) list_connected_comp=list(nx.connected_components(graph)) if len(list(nx.connected_components(graph))) == 4: done = True return list_connected_comp
def hierarchical(G,sample=None): if sample is None: sample=G.nodes() # Create a priority queue with each pair of nodes indexed by distance pq = PriorityQueue() for u in sample: for v in sample: if u != v: if (u, v) in G.edges() or (v, u) in G.edges(): pq.add(frozenset([frozenset([u]), frozenset([v])]), 0) else: pq.add(frozenset([frozenset([u]), frozenset([v])]), 1) # Start with a cluster for each node clusters = set(frozenset([u]) for u in sample) done = False while not done: # Merge closest clusters s = list(pq.pop()) clusters.remove(s[0]) clusters.remove(s[1]) # Update the distance of other clusters from the merged cluster for w in clusters: e1 = pq.remove(frozenset([s[0], w])) e2 = pq.remove(frozenset([s[1], w])) if e1 == 0 or e2 == 0: pq.add(frozenset([s[0] | s[1], w]), 0) else: pq.add(frozenset([s[0] | s[1], w]), 1) clusters.add((s[0] | s[1])) if len(clusters) ==4: done = True return clusters