def smallWorldness(graph): return_values = [] #Small-worldness criteria n = len(nx.nodes(graph)) e = len(nx.edges(graph)) #probability of edges: (number of edges in real graph)/possible edges p = e/float((n*(n-1)/2.0)) ## #generate random graph using probability rand_graph = nx.fast_gnp_random_graph(n, p, seed=1) #calculate values for real graph and random graph Creal = nx.transitivity(graph) #float Crand = nx.transitivity(rand_graph) #float Lreal = 0 Lrand = 0 real_sum = 0 rand_sum = 0 splReal = shortest_path_lengths(graph) splRand = shortest_path_lengths(rand_graph) for i in range(len(splReal)): real_sum += splReal[i] rand_sum += splRand[i] Lreal = real_sum / len(splReal) Lrand = rand_sum / len(splRand) #compare with actual graph if(Lreal != 0 and Lrand !=0 and Crand !=0): S = (Creal)/(Crand) / (float(Lreal)/(Lrand)) else: S = 0 return_values.append(S) return return_values
def test_edges(self): assert_edges_equal(self.G.edges(), list(nx.edges(self.G))) assert_equal(sorted(self.DG.edges()), sorted(nx.edges(self.DG))) assert_edges_equal(self.G.edges(nbunch=[0, 1, 3]), list(nx.edges(self.G, nbunch=[0, 1, 3]))) assert_equal(sorted(self.DG.edges(nbunch=[0, 1, 3])), sorted(nx.edges(self.DG, nbunch=[0, 1, 3])))
def bfs_bd_ucs(graph, start, goal): if start == goal: return {'cost' : 0, 'expanded' : 0} frontier0 = [start] frontier1 = [goal] explored0 = [] explored1 = [] num_explored = 0 commonNodes = [] while len(frontier0) + len(frontier1) > 0: linked01 = len(commonNodes) > 0 ## 0 front if frontier0: node = popClosest(frontier0) explored0.append(node) if node in explored0 and node in explored1: commonNodes += [node] for edge in networkx.edges(graph, node.node['data'].id): child = State(graph.node[edge[1]], node, node.cost + nodeDist(node.node, graph.node[edge[1]])) if child not in explored0: if child not in frontier0: if not linked01: frontier0.append(child) num_explored = num_explored + 1 else: swapIfBetter(frontier0, child) ## 0 front if frontier1: node = popClosest(frontier1) explored1.append(node) if node in explored0 and node in explored1: commonNodes += [node] for edge in networkx.edges(graph, node.node['data'].id): child = State(graph.node[edge[1]], node, node.cost + nodeDist(node.node, graph.node[edge[1]])) if child not in explored1: if child not in frontier1: if not linked01: frontier1.append(child) num_explored = num_explored + 1 else: swapIfBetter(frontier1, child) if commonNodes: bestNode = commonNodes[0] for node in commonNodes: if commonCost(node, explored0, explored1) < commonCost(bestNode, explored0, explored1): bestNode = node return {'cost' : commonCost(bestNode, explored0, explored1), 'expanded' : num_explored} print "No path found, explored: ", num_explored return None
def update(self, dev = 0 ): if not dev: return else: print dev, dev.neighbours, nx.edges( self.proxNet, dev ) print self.proxNet.nodes(), self.proxNet.edges() self.proxNet.remove_edges_from( nx.edges( self.proxNet, dev ) ) registered = [] for item in dev.neighbours: neighbour = self.deviceManager.getDevice( item ) if neighbour != 0: registered.append( neighbour ) self.proxNet.add_edges_from( [(dev, item) for item in registered] )
def get_links(g, link_type): if link_type == 'path': edges = nx.edges(g) l = [] for edge in edges: x1 = data_dict[str(id_key[str(edge[0])])] x2 = data_dict[str(id_key[str(edge[1])])] l.append({'source': x1[1]+'('+str(x1[0])+')', 'target':x2[1]+'('+str(x2[0])+')'}) return l else: edges = nx.edges(st_links) l = [] for edge in edges: l.append({'source': str(get_name(edge[0])), 'target':str(get_name(edge[1]))}) return l
def random_pairs(G, n=100000, frac=0.1, saving=True): """ Select a training set of n send/receive pairs, with frac of them representing actual transactions. Calculate stats for each pair using the get_features() helper. For those send/receive pairs that represent actual transactions in the training data, remove the link from the graph before calculating the features to simulate transaction prediction. """ # initialize numpy arrays for holding features features = np.empty((n, n_features), dtype=int) mask = np.empty(n, dtype=bool) # Grab edges and nodes edges_list = nx.edges(G) np.random.shuffle(edges_list) nodes_list = nx.nodes(G) # number of real transactions to consider n_true = int(n*frac) # pick random transacting pairs and calculate features w/o transaction for i in range(n_true): # output progress prog = 100*(i+1)/n print('Considering random pairs: %.1f%% complete\r' % prog, end='') # set mask to true mask[i] = True # Pick pair from list of transactions (snd, rcv) = edges_list[i] # Save edge weight and remove from graph n_trans = G[snd][rcv]['trans'] G.remove_edge(snd, rcv) # get features features[i, :] = get_features(G, snd, rcv) # re-add link G.add_edge(snd, rcv, trans=n_trans) # Calculate stats for unconnected nodes for i in range(n_true, n): # output progress prog = 100*(i+1)/n print('Considering random pairs: %.1f%% complete\r' % prog, end='') # set mask to false mask[i] = False # Pick two addresses and ensure they are unconnected snd, rcv = np.random.choice(nodes_list, 2) while G.has_edge(snd, rcv): snd, rcv = np.random.choice(nodes_list, 2) # get features features[i, :] = get_features(G, snd, rcv) # save to csv file if saving: feats_and_mask = np.hstack(features, mask.astype(int)) np.savetxt('random_pairs.txt', feats_and_mask, delimiter=',') print() # end line return features, mask
def dagToFile(self,dag): #file format: # (Tasks): id req act procs(always 1)) # (Edges): e id1 id2 # Keep ids contiguous, so ignore the given job ID # The ids will also be output as negative values. #MakeContiguous makes ids starting at 0, but evertthing is incremented #before output. Ie tasks will have ids -1, -2, -3. . . #The file name will be the given file base, followed by _p #and then its current number, in text file format. #i.e. example_p0.txt name = self.base + "_p" + str(self.permNo) + ".txt" self.permNo += 1 f = open(name,'w') #Make the nodes contiguous for ease of use later. nodes = self.makeContiguous(dag) edges = nx.edges(dag) string = '' #Writing out each string to the file. for n in nodes: string = "T -" + str(n.job_id+1) + " " + str(n.requested_time) + " " + str(n.actual_time) + " 1\n" f.write(string) for e in edges: string = "E -" + str(e[0].job_id+1) + " -" + str(e[1].job_id+1) + "\n" f.write(string) f.close()
def costo_longitud_random(grafo): for e in nx.edges(grafo): costo = rnd.randint(1,100) longitud = rnd.randint(1,100) (u, v) = e grafo.edge[u][v]['longitud'] = longitud grafo.edge[u][v]['costo'] = costo
def generate_G(graph, source_from, source_to): nodes = nx.nodes(graph) edges = nx.edges(graph) N_nodes = len(nodes) N_edges = len(edges) G = np.zeros((N_nodes - 1, N_nodes - 1)) # only elements in the diagonal for x in nodes: if x == 0: # do not consider GND continue connected_nodes = nx.all_neighbors(graph, x) conductances = 0 for i in connected_nodes: if (x == source_from and i == source_to) or (x == source_to and i == source_from): continue conductances += 1.0/(graph.get_edge_data(x, i)['weight']) G[x - 1][x - 1] = conductances # other, off diagonal elements for p in edges: if abs(p[0] - p[1]) == (p[0] + p[1]): # if one node is GND continue if (p[0] == source_from and p[1] == source_to) or (p[0] == source_to and p[1] == source_from): continue conductance = 1.0/graph.get_edge_data(p[0], p[1])['weight'] G[p[0] - 1][p[1] - 1] = conductance G[p[1] - 1][p[0] - 1] = conductance return G
def bfs_default(graph, start, goal): if start == goal: return {'cost' : 0, 'expanded' : 0} frontier = [start] explored = [] num_explored = 0 while len(frontier) > 0: node = frontier.pop(0) #print node.cost explored.append(node) for edge in networkx.edges(graph, node.node['data'].id): child = State(graph.node[edge[1]], node, node.cost + nodeDist(node.node, graph.node[edge[1]])) if child not in explored and child not in frontier: if child == goal: return {'cost' : child.cost, 'expanded' : num_explored} else: frontier.append(child) num_explored = num_explored + 1 return None
def bfs_ucs(graph, start, goal): if start == goal: return {'cost' : 0, 'expanded' : 0} frontier = [start] explored = [] num_explored = 0 while len(frontier) > 0: node = popClosest(frontier) #print node.cost explored.append(node) if node == goal: return {'cost' : node.cost, 'expanded' : num_explored} for edge in networkx.edges(graph, node.node['data'].id): child = State(graph.node[edge[1]], node, node.cost + nodeDist(node.node, graph.node[edge[1]])) if child not in explored: if child not in frontier: frontier.append(child) num_explored += 1 else: swapIfBetter(frontier, child) return None
def print_output(graph, x): nodes_out = [] edges_out = [] edges = nx.edges(graph) for edge in edges: R = graph.get_edge_data(edge[0], edge[1])['weight'] # resistance between 2 nodes U = abs(x[edge[0]] - x[edge[1]]) # calculate voltage as potential difference if R == 0: I = 0 # let's assume so else: I = float(U) / R # from Ohm's law edges_out.append({'from': edge[0], 'to': edge[1], 'value': I, 'label': R}) nodes = nx.nodes(graph) for node in nodes: nodes_out.append({'id': node, 'label': str(node)}) f = open('data.jsonp', 'w') f.write("nodes = " + str(nodes_out) + ";\n") f.write("edges = " + str(edges_out) + ";") f.close()
def merge_trees(tree1=nx.DiGraph(), tree2=nx.DiGraph()): # print(tree1.edges(data=True)) # print(tree2.edges(data=True)) edges2 = [(v, u, tree2[v][u]['weight']) for v, u in nx.edges(tree2)] tree1.add_weighted_edges_from(edges2) # print(tree1.edges(data=True)) return tree1
def analyseConnectedComponents(graphs): conflicts = {} for cc in graphs: nodes = nx.nodes(cc) for node in nodes: if nx.degree(cc,node) > 1: edges = nx.edges(cc,node) for pair in itertools.combinations(edges,2): leftSet = cc[pair[0][0]][pair[0][1]]["species"] left = set() for elem in leftSet: left.add(elem) rightSet = cc[pair[1][0]][pair[1][1]]["species"] right = set() for elem in rightSet: right.add(elem) #identify conflicts if not left.isdisjoint(right): conflict = left.intersection(right) for elem in conflict: if elem in conflicts: conflicts[elem].append(pair) else: conflicts[elem] = [pair] #combineConflicts(conflicts) print " " for species in conflicts: print "Number of conflicts in "+species+" : "+str(len(conflicts[species])) return conflicts
def write_gspan(graph, outfile): # get all subgraphs only works with undirected subgraphs=nx.connected_components(graph.to_undirected()) id_count=1 node_count=0 #get labels label_dic=nx.get_node_attributes(graph,'label') for s in subgraphs: node_count_tree=0 node_dict={} outfile.write("t # id "+str(id_count)+"\n") # for every node in subgraph for v in sorted(s): # node id restart from 0 for every sub graph node_dict[v]=node_count_tree outfile.write("v "+str(node_count_tree)+" "+label_dic[v]+" \n") node_count_tree+=1 node_count+=1 # all edges adjacent to a node of s edges=nx.edges(graph, s) for e in sorted(edges): #print(graph[e[0]][e[1]]) try: outfile.write("e "+str(node_dict[e[0]])+" "+str(node_dict[e[1]])+" "+graph[e[0]][e[1]]['label']+"\n") except KeyError: outfile.write("e "+str(node_dict[e[0]])+" "+str(node_dict[e[1]])) id_count+=1
def calculate(g, voltage): edges_num = nx.number_of_edges(g) # sort nodes in edges edges = [edge if edge[0] < edge[1] else (edge[1], edge[0]) for edge in nx.edges(g)] a = np.zeros((edges_num, edges_num)) b = np.zeros((edges_num, 1)) i = 0 # first law for node in [node for node in nx.nodes(g) if node != 0]: for neighbor in nx.all_neighbors(g, node): edge = tuple(sorted((node, neighbor))) a[i][edges.index(edge)] = 1 if neighbor < node else -1 i += 1 # second law cycles = nx.cycle_basis(g, 0) for cycle in cycles: for j in range(0, len(cycle)): node = cycle[j] next_node = cycle[(j + 1) % len(cycle)] edge = tuple(sorted((node, next_node))) resistance = g[node][next_node]['weight'] a[i][edges.index(edge)] = resistance if node < next_node else -resistance if 0 in cycle: b[i] = voltage i += 1 # solve x = np.linalg.solve(a, b) for (x1, x2), res in zip(edges, x): g[x1][x2]['current'] = res[0]
def single_source_number_of_walks(G, source, walk_length): """Returns a dictionary whose keys are the vertices of `G` and whose values are the numbers of walks of length exactly `walk_length` joining `source` to that node. Raises :exc:`ValueError` if `walk_length` is negative. """ if walk_length < 0: raise ValueError('walk length must be a positive integer') # Create a counter to store the number of walks of length # `walk_length`. Ensure that even unreachable vertices have count zero. result = Counter({v: 0 for v in G}) queue = deque() queue.append((source, 0)) # TODO We could reduce the number of iterations in this loop by performing # multiple `popleft()` calls at once, since the queue is partitioned into # slices in which all enqueued vertices in the slice are at the same # distance from the source. In other words, if we keep track of the # *number* of vertices at each distance, we could just immediately dequeue # all of those vertices. while queue: (u, distance) = queue.popleft() if distance == walk_length: result[u] += 1 else: # Using `nx.edges()` accounts for multiedges as well. queue.extend((v, distance + 1) for u_, v in nx.edges(G, u)) # Return the result as a true dictionary instead of a Counter object. return dict(result)
def forwarder_down(self, fwID): tunelIDs = [] for v in DynamicGraph[fwID].keys(): tunelIDs += DynamicGraph[fwID][v]['tunely'] for tunelID in tunelIDs: vymaz_tunel(tunelID) self.DynamicGraph.remove_edges_from(nx.edges(DynamicGraph, fwID))
def delete_links(G): ''' Delete links in the graph if they disavantage the person. The link is deleted if (r1-r2)/affect(n1, n2)*deg(n1) > alpha where r1 > r2 and alpha is a random number between 0 and a given paramater beta. ''' beta = 100 reput = nx.get_node_attributes(G, "reput") affect = nx.get_edge_attributes(G, "affect") n = 0 for edge in nx.edges(G): # Calculate alpha alpha = beta*random.random() # Define who has the higher reputation n1 = edge[1] n2 = edge[0] if(reput[edge[0]] > reput[edge[1]]): n1 = edge[0] n2 = edge[1] # Compute the coefficient coef = (reput[n1]-reput[n2])/affect[edge]*G.degree(n1) # Decide wether we delete the link if(coef > alpha): G.remove_edge(edge[0], edge[1]) del affect[edge] # Set the new affect dict and return the graph nx.set_edge_attributes(G, "affect", affect) return G
def CopyDAG(G_train, data_total, pmin, pmax): edges = nx.edges(G_train) kern = dict.fromkeys(edges) M = len(data_total) kern_temp = {} for edge in edges: kern[edge] = np.zeros(M) for m in range(M): print('Data item: %d' % m) data = data_total[m] for i in range(0, len(data)-pmin+1+1): for j in range(pmin-1, pmax+1): if data[i:i+j] in kern_temp: kern_temp[data[i:i+j]][m] += 1 else: kern_temp[data[i:i+j]] = np.zeros(M) kern_temp[data[i:i+j]][m] = 1 for edge in edges: key = edge[0]+edge[1][-1] if key in kern_temp: kern[edge] = kern_temp[key] G = nx.DiGraph() G.add_edges_from(edges) nx.set_edge_attributes(G, 'kern_unnorm', kern) return G
def add_speed_attribute(GD): vertices_pos = nx.get_node_attributes(GD, 'pos') edge_layer_attributes = nx.get_edge_attributes(GD, "layer") for curr_edge in nx.edges(GD): u = curr_edge[0] v = curr_edge[1] u_pos = vertices_pos[u] v_pos = vertices_pos[v] u_x = float(u_pos.split(",")[0]) v_x = float(v_pos.split(",")[0]) u_y = float(u_pos.split(",")[1]) v_y = float(v_pos.split(",")[1]) distance = math.sqrt( (u_x - v_x)**2 + (u_y - v_y)**2) curr_edge_layer_attr = edge_layer_attributes[curr_edge] speed = beta if (int(re.findall('\d+', curr_edge_layer_attr.split(":")[0])[0])==1): speed = alpha travel_time = distance * speed GD[u][v]['travel_time'] = travel_time GD[u][v]['length'] = distance
def random_rewiring(network): """ Rewires a pair of edges such that the degree sequence is preserved. Arguments: network => The input network. Returns: A network with one pair of edges randomly rewired. """ # Don't terminate until the rewiring is performed. while True: # Store the number of edges in the network to avoid repeated computation. network_edges = nx.edges(network) # If there isn't at least 1 edge, break out and return. if len(network_edges) == 0: break # Randomly selected a link from the network. link1 = (source1, target1) = random.choice(network_edges) # Find all the edges that share no nodes with link1. disjoint_links = [link for link in network_edges if not any(node in link for node in link1)] # If there are no disjoint links, it would be impossible to randomize the network while # still preserving the degree sequence, so break out and return. if len(disjoint_links) == 0: break # Randomly selected a DIFFERENT link from the network (no sharing of nodes allowed). link2 = (source2, target2) = random.choice(disjoint_links) # If the graph is directed, there is only one option. # If the graph is undirected, there are two options, each with a 50-50 chance. if not nx.is_directed(network) and random.random() < 0.5: # Rewire links A-B and C-D to A-C and B-D. new_link1 = (source1, source2) new_link2 = (target1, target2) else: # Rewire links A-B and C-D to A-D and C-B. new_link1 = (source1, target2) new_link2 = (source2, target1) # If the new links aren't in the network already, replace the old links with the new links. if not network.has_edge(*new_link1) and not network.has_edge(*new_link2): # Remove the old links. network.remove_edges_from([link1, link2]) # Add the new links. network.add_edges_from([new_link1, new_link2]) # Returned the slightly altered new network. return network
def verify_graph(nx_graph): g_gen = Graph() for edge in nx.edges(nx_graph): g_gen.add_edge(*edge) diameter, center = g_gen.diameter_center() assert (center in nx.center(nx_graph)) == True assert diameter == nx.diameter(nx_graph)
def multi_bonds(atoms): multibonded = [atom for atom in atoms.nodes() if len(nx.edges(atoms, atom)) < valences[atom.element]] for i, atom in enumerate(multibonded): paired = False for other in atoms.neighbors(atom): if isinstance(other, GhostAtom): paired = True continue if len(nx.edges(atoms, other)) < valences[other.element]: ghost_atom = GhostAtom(**(atom.__dict__)) ghost_atom.name = atom.name + "*" ghost_other = GhostAtom(**(other.__dict__)) ghost_other.name = other.name + "*" atoms.add_edge(other, ghost_atom) atoms.add_edge(atom, ghost_other) paired = True
def test_draw_domino(self): game_state, bot_game_state = self.get_test_game_state() d = Domino(3, 3) bot_game_state.draw_domino(d) self.assertIn(d, bot_game_state.dominoes) self.assertIn(BotMove(d, bot_game_state.all_trains[4]), bot_game_state.get_all_valid_moves()) self.assertIn(d, bot_game_state.dominoes_for_number[3]) self.assertIn((3, 3), edges(bot_game_state.graph))
def distance(users_vectors, net): distances = {} # for user in itertools.combinations(users_vectors, 2): for user in nx.edges(net): try: distances[(user[0], user[1])] = np.linalg.norm(users_vectors[user[0]] - users_vectors[user[1]]) except KeyError: continue return sorted(distances.items(), key=operator.itemgetter(1))
def to_dict(self): """ Serialize graph edges back into JSON """ d = collections.defaultdict(list) for leaf, node in nx.edges(self.G): d[node].append(leaf) return dict(d)
def tetravalent_atoms(atoms): """ Identifies possible candidates (those with 4 bonds) :param atoms: Graph of atoms :return: List of atoms with 4 bonds """ candidates = [atom for atom in atoms.nodes() if len(nx.edges(atoms, atom)) == 4] return candidates
def chiral_candidates(atoms): """ Returns those atoms which have 4 neighbours in the atom graph (and thus might be chiral). :param atoms: atom graph :return: list of read_amber.Atom objects which have 4 neighbours in the atoms graph """ candidates = [atom for atom in atoms.nodes() if len(nx.edges(atoms, atom)) == 4] return candidates
def get_graphinfo(graph_fn): graph = nx.read_gml(graph_fn) cells_inhib = np.array(nx.get_node_attributes(graph, 'inh').values(), dtype=np.int) graph_edges = nx.edges(graph) number_of_nodes = nx.number_of_nodes(graph) degree_histogram = nx.degree_histogram(graph) return cells_inhib, graph_edges,number_of_nodes,degree_histogram
def edges_from(self, node): return nx.edges(self.__nxgraph, [node])
def MI(graph_file): G = nx.read_edgelist(graph_file) node_num = nx.number_of_nodes(G) edge_num = nx.number_of_edges(G) print(node_num) print(edge_num) sim_dict = {} # �洢���ƶȵ��ֵ� I_pConnect_dict = {} pDisConnect = 1 edges = nx.edges(G) ebunch = nx.non_edges(G) # 需要经常获取顶点的度,因此,可以事先存储下来 # degree_list = [nx.degree(G, v) for v in range(G.number_of_nodes())] # 下面的两个循环计算$P(L^1_{xy})$,其实我们只需要计算不同度的值$P(L^1_{kxky})$ for u, v in edges: uDegree = nx.degree(G, u) vDegree = nx.degree(G, v) for i in range(1, vDegree + 1): pDisConnect = pDisConnect * (((edge_num - uDegree) - i + 1) / (edge_num - i + 1)) pConnect = 1 - pDisConnect if pConnect == 0: I_pConnect = -math.log2(0.0001) else: I_pConnect = -math.log2(pConnect) I_pConnect_dict[(u, v)] = I_pConnect I_pConnect_dict[(v, u)] = I_pConnect pDisConnect = 1 for m, n in ebunch: mDegree = nx.degree(G, m) nDegree = nx.degree(G, n) for i in range(1, nDegree + 1): pDisConnect = pDisConnect * (((edge_num - mDegree) - i + 1) / (edge_num - i + 1)) pConnect = 1 - pDisConnect if pConnect == 0: I_pConnect = -math.log2(0.0001) else: I_pConnect = -math.log2(pConnect) I_pConnect_dict[(m, n)] = I_pConnect I_pConnect_dict[(n, m)] = I_pConnect pDisConnect = 1 ebunchs = nx.non_edges(G) i = 0 # $I(L^1_{xy};z) = I(L^1;z)$,与x, y没有关系,可以先计算出来 for u, v in ebunchs: pMutual_Information = 0 I_pConnect = I_pConnect_dict[(u, v)] for z in nx.common_neighbors(G, u, v): neighbor_num = len(list(nx.neighbors(G, z))) neighbor_list = nx.neighbors(G, z) for m in range(len(neighbor_list)): for n in range(m + 1, len(neighbor_list)): if m != n: I_ppConnect = I_pConnect_dict[(neighbor_list[m], neighbor_list[n])] if nx.clustering(G, z) == 0: pMutual_Information = pMutual_Information + ( 2 / (neighbor_num * (neighbor_num - 1))) * ((I_ppConnect) - (-math.log2(0.0001))) else: pMutual_Information = pMutual_Information + ( 2 / (neighbor_num * (neighbor_num - 1))) * ( (I_ppConnect) - (-math.log2(nx.clustering(G, z)))) sim_dict[(u, v)] = -(I_pConnect - pMutual_Information) i = i + 1 print(i) print(str(u) + "," + str(v)) print(sim_dict[(u, v)]) return sim_dict
def introduceNode(lastF, introduced, i, nicePath, G, k): ''' Introduces a new node and calculates characteristics for it. See page 8 of [1]. Introduce-edge is implemented seperately for convenience. INPUT lastF: The full set of characteristics for G_{i-1} introduced: A set of length 1 containing the new vertex i: The current bag in the path nicePath: The nice path decomposition of G G: The networkx graph in question k: The k for which we are checking linear width OUTPUT FSi: The full set (in list form) of characteristics for G_i ''' global firstEdge #Make subgraph G_i and find the edge set of our new node Gi = isubgraph(i, nicePath, G) newVertex = list(introduced)[0] edges = nx.edges(Gi, newVertex) #If there are no new edges, our FS has not changed if len(edges) == 0: FSi = lastF return FSi #Now we go edge by edge creating characteristics FSi = lastF for p in range(0, len(edges)): #Check if we are adding the first edge if firstEdge and p == 0: firstEdge = False FSi = [[[set(edges[0])], [set(edges[0])], [[0]]]] continue newFS = [] #Find the pendant vertices in G^p_i pendant = findPendant(edges, p, Gi) #Go through each characteristic and make new ones for char in FSi: I = char[0] A = char[2] for j in range(0, len(I)): for m in range(0, len(A[j])): print 'The characteristic is: ', char print 'We are inserting: ', edges[p], 'from', edges newChar = compress( introduceEdge(char, j, m, edges[p], pendant)) Anew = newChar[2] #Check if the new characteristic has lin width <= k if maxSeq(Anew) <= k: newFS.append(newChar) FSi = newFS return FSi
with open(arguments['input_file'], 'rb') as csvfile: f = csv.reader(csvfile, delimiter=' ') for row in f: i += 1 n1 = row[0] n2 = row[1] add_indicator = 0 if (len(G.nodes()) == 0): G.add_edge(n1, n2) else: for node in G.nodes(): if (node == n1 or node == n2): r = np.random.uniform() if (r <= p): G.add_edge(n1, n2) add_indicator = 1 break if (add_indicator == 0): r = np.random.uniform() if (r <= q): G.add_edge(n1, n2) no_of_edges += 1 print no_of_edges print nx.edges(G), len(G.edges()) print "|G|: %d |E|: %d" % (len(G.nodes()), len(G.edges())) nx.write_edgelist(G, '{}.1_sampleandhold'.format(arguments['input_file']), delimiter=' ', data=False)
else: labellist.append(glabel_list) sim_labeldict[gnode_list] = labellist #节点+标签字典 b_number = str(gnode_list) # print(gnode_list) # print(glabel_list) return sim_labeldict sim_labeldict = readfile('release-youtube-groupmemberships.txt') #print(sim_labeldict) #读入网络links,随机选择节点 G = nx.Graph(nx.read_edgelist('release-youtube-links.txt')) ledge_list = nx.edges(G) #edges=nx.edges(G) #ledge_list=edges lnode_list = nx.nodes(G) #nodes=nx.nodes(G) #lnode_list=nodes #print(ledge_list) #print(lnode_list) randomnode = random.choice(lnode_list) #随机节点 #print(randomnode) randomnode_neighbor = {randomnode} #随机节点邻居 #print(randomnode_neighbor) while (len(randomnode_neighbor) < 50000): for v in randomnode_neighbor: randomnode_neighbor = randomnode_neighbor | set(G.neighbors(v)) if len(randomnode_neighbor) > 50000: break #print(randomnode_neighbor) #print(len(randomnode_neighbor))
def getGraphNodes(adj,layout='spring'): """ Returns arrays containing cartesian coordinates of the nodes/connections \ contained in the input adjacency matrix. Args: adj (petsc4py.PETSc.Vec) : an :math:`N\\times N` PETSc-type adjacency matrix. layout (str): the format to store the position of the nodes (only used when running :func:`plotGraph`). * ``spring`` *(default)* - spring layout. * ``circle`` - nodes are arranged in a circle. * ``spectral`` - nodes are laid out according to the \ spectrum of the graph. * ``random`` - nodes are arranged in a random pattern. :rtype: tuple of arrays .. important:: Requires `NetworkX <http://networkx.github.io/>`_ .. admonition:: Example >>> nodePos, lineX, lineY = pyCTQW.MPI.plots.getGraphNodes(adj,layout='spring') where * :attr:`nodePos` contains the :math:`(x,y)` coordinates of the vertices * :attr:`lineX` contains the :math:`x` coordinates of edges connecting vertices * :attr:`lineY` contains the :math:`y` coordinates of edges connecting vertices """ try: import networkx as _nx except: print '\nNetworkX Python module required for graph plotting!' return graph = _nx.from_scipy_sparse_matrix(_io.matToSparse(adj).real) if layout == 'circle': pos = _nx.circular_layout(graph) elif layout == 'spectral': pos = _nx.spectral_layout(graph) elif layout == 'random': pos = _nx.random_layout(graph) elif layout == 'shell': pos = _nx.shell_layout(graph) else: pos = _nx.spring_layout(graph,dim=2) testpos = [] for i in pos.itervalues(): testpos.append(i.tolist()) testpos = _np.array(testpos) lineX = [] lineY = [] for i in _nx.edges(graph): lineX.append([testpos[i[0]][0], testpos[i[1]][0]]) lineY.append([testpos[i[0]][1], testpos[i[1]][1]]) return testpos, lineX, lineY
def relationship(book_text, input_char): sections = book_text.split('\n') cleaned_sections = [] for section in sections: quotes = re.findall("“.*?”", section) for quote in quotes: section = section.replace(quote, " ") cleaned_sections.append(section) characters = input_char characters = [character.title() for character in characters] sections_dictionary = {} iterative = 0 for section in cleaned_sections: iterative += 1 for char in characters: if char in section: if str(iterative) in sections_dictionary.keys(): sections_dictionary[str(iterative)].append(char) else: sections_dictionary[str(iterative)] = [char] df = pd.DataFrame(columns=characters, index=characters) df[:] = int(0) for value in sections_dictionary.values(): for character1 in characters: for character2 in characters: if character1 in value and character2 in value: df[character1][character2] += 1 df[character2][character1] += 1 edge_list = [] for index, row in df.iterrows(): i = 0 for col in row: weight = float(col) / 464 edge_list.append((index, df.columns[i], weight)) i += 1 updated_edge_list = [x for x in edge_list if not x[2] == 0.0] node_list = [] for i in characters: for e in updated_edge_list: if i == e[0] and i == e[1]: node_list.append((i, e[2] * 6)) for i in node_list: if i[1] == 0.0: node_list.remove(i) for i in updated_edge_list: if i[0] == i[1]: updated_edge_list.remove(i) plt.subplots(figsize=(14, 10)) G = nx.Graph() for i in sorted(node_list): G.add_node(i[0], size=i[1]) G.add_weighted_edges_from(updated_edge_list) node_order = characters updated_node_order = [] for i in node_order: for x in node_list: if x[0] == i: updated_node_order.append(x) test = nx.get_edge_attributes(G, 'weight') updated_again_edges = [] for i in nx.edges(G): for x in test.keys(): if i[0] == x[0] and i[1] == x[1]: updated_again_edges.append(test[x]) node_scalar = 800 edge_scalar = 10 sizes = [x[1] * node_scalar for x in updated_node_order] widths = [x * edge_scalar for x in updated_again_edges] pos = nx.spring_layout(G, k=0.42, iterations=17) nx.draw(G, pos, with_labels=True, font_family="malgun gothic", font_size=8, font_weight='bold', node_size=sizes, width=widths) plt.axis("off") image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) # rewind the data string = base64.b64encode(image.read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) return image_64
def bridges(density, restrictions, costs, topological_correction_value): binary_map = np.greater(density, 0.5) save_binary_map = binary_map.copy() pad_density = np.pad(density, ((1, 1), (1, 1)), mode='constant') pad_binary_map = np.greater(pad_density, 0.5) density_shape = density.shape width = density_shape[0] height = density_shape[1] pad_costs = np.pad(costs, ((1, 1), (1, 1)), mode='constant') [solid_labels, num_solid_labels] = skim.label(pad_binary_map, neighbors=4, return_num=True) if num_solid_labels <= 1: return density density_graph = nx.MultiDiGraph() for x_idx in range(0, width): for y_idx in range(0, height): center_node_id = (x_idx + 1) * (pad_density.shape[1]) + (y_idx + 1) for x_offset in range(0, 3): for y_offset in range(0, 3): if ((x_offset == 1) and (y_offset == 1)) or ( (np.abs(x_offset - 1) + np.abs(y_offset - 1)) > 1): continue next_x_idx = x_idx + x_offset next_y_idx = y_idx + y_offset if ((next_x_idx == 0) or (next_y_idx == 0) or (next_x_idx == (pad_density.shape[0] - 1)) or (next_y_idx == (pad_density.shape[1] - 1))): continue next_node_id = next_x_idx * ( pad_density.shape[1]) + next_y_idx next_density_value = pad_binary_map[next_x_idx, next_y_idx] cost_value = pad_costs[next_x_idx, next_y_idx] if next_density_value: cost_value = 0 density_graph.add_edge(center_node_id, next_node_id, weight=cost_value) label_to_representative_pt = {} for x_idx in range(0, width): for y_idx in range(0, height): density_value = pad_density[1 + x_idx, 1 + y_idx] component_label = solid_labels[1 + x_idx, 1 + y_idx] if (component_label in label_to_representative_pt.keys()) or ( not density_value): continue label_to_representative_pt[component_label] = [x_idx, y_idx] mst_graph = nx.Graph() for label_idx_start in range(0, num_solid_labels): component_start = 1 + label_idx_start source_pt = label_to_representative_pt[component_start] source_node_id = (source_pt[0] + 1) * (pad_density.shape[1]) + ( source_pt[1] + 1) min_path_all = nx.shortest_path(density_graph, source=source_node_id, weight='weight') for label_idx_end in range(1 + label_idx_start, num_solid_labels): component_end = 1 + label_idx_end target_pt = label_to_representative_pt[component_end] target_node_id = (target_pt[0] + 1) * (pad_density.shape[1]) + ( target_pt[1] + 1) min_path = min_path_all[target_node_id] min_path_distance = 0 for path_idx in range(1, (len(min_path) - 1)): node_id = min_path[path_idx] source_x = int(node_id / pad_density.shape[1]) - 1 source_y = node_id % pad_density.shape[1] - 1 min_path_distance += pad_costs[source_x, source_y] mst_graph.add_edge(component_start, component_end, weight=min_path_distance) mst = nx.minimum_spanning_tree(mst_graph) mst_edges = nx.edges(mst) for edge in mst.edges(): edge_start, edge_end = edge source_pt = label_to_representative_pt[edge_start] target_pt = label_to_representative_pt[edge_end] source_node_id = (source_pt[0] + 1) * (pad_density.shape[1]) + ( source_pt[1] + 1) target_node_id = (target_pt[0] + 1) * (pad_density.shape[1]) + ( target_pt[1] + 1) min_path = nx.shortest_path(density_graph, source=source_node_id, target=target_node_id, weight='weight') for path_idx in range(1, (len(min_path) - 1)): node_id = min_path[path_idx] source_x = int(node_id / pad_density.shape[1]) - 1 source_y = node_id % pad_density.shape[1] - 1 density[source_x, source_y] = topological_correction_value pad_density[1 + source_x, 1 + source_y] = topological_correction_value binary_map[source_x, source_y] = True pad_binary_map[1 + source_x, 1 + source_y] = True restrictions = np.logical_not(np.logical_xor(binary_map, save_binary_map))
def add_to_prod_rules(production_rules, lhs, rhs, s): prod_rules = production_rules letter = 'a' d = {} for x in lhs: d[x] = letter letter = chr(ord(letter) + 1) lhs_s = set() for x in lhs: lhs_s.add(d[x]) if len(lhs_s) == 0: lhs_s.add("S") i = 0 rhs_s = nx.Graph() for n in rhs.nodes(): if n in d: n = d[n] else: d[n] = i n = i i = i + 1 rhs_s.add_node(n) for e in rhs.edges(): u = d[e[0]] v = d[e[1]] rhs_s.add_edge(u, v) lhs_str = "(" + ",".join(str(x) for x in sorted(lhs_s)) + ")" nodes = set() rhs_term_dict = [] for c in sorted(nx.edges(rhs_s)): rhs_term_dict.append((",".join(str(x) for x in sorted(list(c))), "T")) nodes.add(c[0]) nodes.add(c[1]) for c in s: rhs_term_dict.append((",".join(str(d[x]) for x in sorted(c)), "N")) for x in c: nodes.add(d[x]) for singletons in set(nx.nodes(rhs_s)).difference(nodes): rhs_term_dict.append((singletons, "T")) rhs_str = "" for n in rhs_term_dict: rhs_str = rhs_str + "(" + n[0] + ":" + n[1] + ")" nodes.add(n[0]) if rhs_str == "": rhs_str = "()" if lhs_str not in prod_rules: rhs_dict = {} rhs_dict[rhs_str] = 1 prod_rules[lhs_str] = rhs_dict else: rhs_dict = prod_rules[lhs_str] if rhs_str in rhs_dict: prod_rules[lhs_str][rhs_str] = rhs_dict[rhs_str] + 1 else: rhs_dict[rhs_str] = 1 ##sorting above makes rhs match perfectly if a match exists print lhs_str, "->", rhs_str
def E(G): return list(nx.edges(G))
'física', 'máquinas', 'trabalhar', 'imagens', 'condições', 'ramo', 'lida', 'limpa', 'biomassa', 'sustentabilidade', 'incorporação', 'criação', 'necessidades', 'população', 'sociedade', 'drenagem' ] #reorder node list updated_node_order = [] for i in node_order: for x in node_list: if x[0] == i: updated_node_order.append(x) #reorder edge list - this was a pain test = nx.get_edge_attributes(G, 'weight') updated_again_edges = [] for i in nx.edges(G): for x in test: if i[0] == x[0] and i[1] == x[1]: updated_again_edges.append(test[x]) #drawing custimization node_scalar = 800 edge_scalar = 100 sizes = [x[1] * node_scalar for x in updated_node_order] widths = [x * edge_scalar for x in updated_again_edges] #draw the graph pos = nx.spring_layout(G, k=0.42, iterations=17) nx.draw(G, pos,
def test_edges(self): assert_equal(self.G.edges(),networkx.edges(self.G)) assert_equal(self.DG.edges(),networkx.edges(self.DG)) assert_equal(self.G.edges(nbunch=[0,1,3]),networkx.edges(self.G,nbunch=[0,1,3])) assert_equal(self.DG.edges(nbunch=[0,1,3]),networkx.edges(self.DG,nbunch=[0,1,3]))
def save_giant(data: Data, name): labels = data.labels features = data.features edge_list = data.raw_edge_list # train_mask = data.train_mask # valid_mask = data.valid_mask # tests_mask = data.tests_mask # print(labels.shape) # print(features.shape) # print(edge_list.shape) # print(train_mask.shape) # print(valid_mask.shape) # print(tests_mask.shape) edge_list = edge_list.numpy() es = [] for i in range(edge_list.shape[1]): es.append((edge_list[0, i], edge_list[1, i])) G = nx.Graph() G.add_edges_from(es) G.remove_edges_from(nx.selfloop_edges(G)) # print("N:",len(nx.nodes(G))) # print("M:",len(nx.edges(G))) # print("M:",edge_list.shape[1]) # # assert len(nx.edges(G)) *2 == edge_list.shape[1] #print("number of componetns:", nx.number_connected_components(G)) if nx.number_connected_components(G) > 1: G = get_largest_component(G) # # sampling # sample_size = 1000 # chosen_nodes = np.random.permutation(np.array(G.nodes))[:sample_size] # G = nx.subgraph(G, chosen_nodes) # G = get_largest_component(G) # print("sampled size:",len(nx.nodes(G))) # fix sampled_nodes = np.array(sorted(nx.nodes(G)), dtype=np.int) num_nodes = len(sampled_nodes) labels = labels.numpy()[sampled_nodes] features = features.numpy()[sampled_nodes] # use old mask # train_mask = train_mask[sampled_nodes] # valid_mask = valid_mask[sampled_nodes] # tests_mask = tests_mask[sampled_nodes] # # gen new mask # idx = np.arange(num_nodes) # idx = np.random.permutation(idx) # train_num = int(0.3 * num_nodes) # valid_num = int(0.3 * num_nodes) # tests_num = num_nodes - train_num - valid_num # # train_mask = np.zeros(num_nodes, dtype=np.int) # train_mask[idx[:train_num]] = 1 # train_mask = train_mask.astype(bool) # # valid_mask = np.zeros(num_nodes, dtype=np.int) # valid_mask[idx[train_num:train_num + valid_num]] = 1 # valid_mask = valid_mask.astype(bool) # # tests_mask = np.zeros(num_nodes, dtype=np.int) # tests_mask[idx[train_num + valid_num:]] = 1 # tests_mask = tests_mask.astype(bool) # mapping remap = {} for i in range(num_nodes): remap[sampled_nodes[i]] = i G = nx.relabel_nodes(G, mapping=remap) # oubling edge_list edge_list = np.array(nx.edges(G), dtype=np.int).transpose() # 2,M directed = np.stack((edge_list[1], edge_list[0]), axis=0) edge_list = np.concatenate((edge_list, directed), axis=1) # print("N:", len(G.nodes())) # print("M:", len(G.edges())) data = Data(torch.tensor(edge_list, dtype=torch.long), torch.tensor(features, dtype=torch.float), torch.tensor(labels, dtype=torch.long), data.split_setting) data.print_statisitcs() data.save(name) return data
def transform(self): self.build_cfg() for root in self.program.functions: self.root = root exp_name = self.config.input.rsplit( '/', 1)[1][:-3] # get file input name -'.bs' cfg_file = open("%s/%s.cfg" % (self.config.output, exp_name), "w") cfg_file.write("NAME(%s.cfg)\n\n" % exp_name) # conditional groups cgs = dict() for bid, block in self.program.functions[root]['blocks'].items(): self.cblock = block write = False for instr in block.instructions: if instr.name in [ 'MIX', 'DISPENSE', 'DISPOSE', 'HEAT', 'SPLIT', 'DETECT' ]: write = True break if not write: continue # # we write the conditions to a list to be appended to the .cfg file # if block.dag is None: # cgs[bid] = set() # """ # for each conditional group, we have a variable number of COND, EXP, and TD nodes # COND: variable number of parameters as: # group number, # dep. dags, comma-sep-list of dep. dags, # branch dags, comma-sep-list # of branch dags, expression ID # EXP: parameters as: # expression ID (matches COND) # variable based on expression-type # TD: transfer droplet for each routed droplet, parameters are: # DAGfrom, nodeIdFrom, DAGto, nodeIDTo # # each transfer droplet will have a corresponding TRANSFER_OUT/_IN in the appropriate dags, # """ cfg_file.write("DAG(DAG%s)\n" % str(bid)) dag_file = open( "%s/%s_DAG%s.dag" % (self.config.output, exp_name, str(bid)), "w") dag_file.write("DagName (DAG%s)\n" % str(bid)) # for all uses without defs, we must transfer in already_transferred_in = dict() dispenses = set() for node in block.instructions: if node.name in ['DISPENSE']: dispenses.add(node.defs['var'].points_to.name) for node in block.instructions: if node.name in [ 'BINARYOP', 'CONDITIONAL', 'DISPENSE', 'MATH' ]: continue # for each use, we must check if predecessors in this block defined it # if no def from predecessor in this block, then must transfer in for use in node.uses: tn = None if type(use['var'].value) in {Module}: continue use = use['var'] if isinstance(use, RenamedSymbol): points_to = use.points_to.name else: points_to = use.name # points_to = use.name if points_to in dispenses or points_to in already_transferred_in: continue if use.name not in block.defs: already_transferred_in[points_to] = True dag_file.write( self.write_transfer(self.tid, points_to, False)) dag_file.write(self.write_edge(self.tid, node.iid)) tn = TransferNode(self.tid, bid, points_to, 'in') block.defs.add(use.name) self.tid += 1 if tn is not None: if bid in self.block_transfers: self.block_transfers[bid].add(tn) else: self.block_transfers[bid] = {tn} for node in block.instructions: self.opid = node.iid if node.op is IRInstruction.DETECT: dag_file.write(self.write_detect(node)) elif node.op is IRInstruction.MIX: dag_file.write(self.write_mix(node)) elif node.op is IRInstruction.SPLIT: dag_file.write(self.write_split(node)) elif node.op is IRInstruction.HEAT: dag_file.write(self.write_heat(node)) elif node.op is IRInstruction.DISPOSE: dag_file.write(self.write_dispose(node)) elif node.op is IRInstruction.DISPENSE: dag_file.write(self.write_dispense(node)) elif node.op is IRInstruction.PHI or node.op is IRInstruction.CONDITIONAL: continue else: if self.config.debug: self.log.warn( "Unrecognized/unsupported instruction type: %s" % node.name) # this is a bit of a hack, because SSA renaming doesn't quite work how we might hope for heats/detects # for all defs without uses, we must transfer out (if used elsewhere) # or dispose (if never used again) # For now, for each rdef in the block, we get the original variable name (_def) and instruction (i) # Then, we check if rdef is used in the block (we do not need to transfer) AFTER this instruction # if not, we check each successor block (succ): for each instruction si in succ, we check # if their uses points to _def, if so, we must transfer. for rdef in block.defs: _def = None skip = False defIndex = -1 for index in range(len(block.instructions)): i = block.instructions[index] if i.name in [ 'PHI', 'BINARYOP', 'CONDITIONAL', 'DISPOSE', 'MATH', 'NOP', 'DETECT' ]: skip = True continue if defIndex == -1: if rdef is i.defs['name']: defIndex = index skip = False # find the points_to def _def = i.defs['var'].points_to.name instr = i break continue # after finding the definition point, we traverse in reverse order to find the # last use. if it is not used after define, or if the last use is a detect/heat, we must transfer for index in reversed(range(len(block.instructions))): if index == defIndex: break i = block.instructions[index] # heat and detects use the droplet, but do not consume it, so may need to transfer still if i.name in ['HEAT', 'DETECT'] and rdef in [ x['name'] for x in i.uses ]: skip = False if _def is None: x = [x for x in i.uses if x['name'] == rdef] _def = x[0]['var'].points_to.name break x = [x for x in i.uses if x['name'] == rdef] if x: # we use this variable after it is defined in this block skip = True break if skip: continue transferred = False tn = None # we've made it here, we must transfer this rdef if block.dag is not None: # list of reachable block ids reachable = ({ x for v in dict( nx.bfs_successors(self.cfg['graph'], bid)).values() for x in v }) for s in reachable: if transferred: break # get successor block sblock = self.program.functions[root]['blocks'][s] for si in sblock.instructions: if si.op in {IRInstruction.PHI}: continue if transferred: break x = [ x['var'].points_to.name for x in si.uses if type(x['var']) is not Symbol ] if _def in x: dag_file.write( self.write_edge(instr.iid, self.tid)) dag_file.write( self.write_transfer( self.tid, _def, True)) tn = TransferNode(self.tid, bid, _def, 'out') self.tid += 1 transferred = True break else: transferred = True if not transferred: # what to do with this droplet? if self.config.debug: self.log.warn( "No more operations for {}, warning will appear in {}" .format(_def, dag_file.name)) dag_file.write( "// **NO MORE OPERATIONS FOR {}; SHOULD SAVE OR DISPOSE**" .format(_def)) if tn is not None: if bid in self.block_transfers: self.block_transfers[bid].add(tn) else: self.block_transfers[bid] = {tn} dag_file.close() # now build the conditions, with their expressions and potential transfer droplets # COND/EXP cases: # 1) edge on CFG from n to n' with no conditional = UNCOND transfer. # 2) edges on CFG from n to {t, f} with a conditional in a block with no executable instructions # this is a loop. if repeat, we have "LOOP_NUM" condition. if not a repeat, need to link # "ONE_SENSOR" to the appropriate detect instruction # This is the most difficult case, as MFSim treats all loops as do-while loops. we'll need # to make transfer from pred(n) to t, edge from t to t and t to f. # 3) edge on CFG from n to n' with a conditional in a block with executable instructions # this is an if/else block. translation is straightforward # TD -- each transferred droplet should be accounted for in self.block_transfers. need to map COND/EXP to # the appropriate transferred droplet # store elements as: bid: (true branch, false branch) # for each edge in bb_graph, must have corresponding in .cfg edges_not_translated = list(nx.edges(self.cfg['graph'])) conditional_groups = dict() while len(edges_not_translated) > 0: for bid, block in self.program.functions[root]['blocks'].items( ): if block.instructions is None: continue cond = False executable = False if block.dag is not None: for instr in block.instructions: if cond and executable: break if instr.name is 'CONDITIONAL': cond = True if instr.name in [ 'MIX', 'DETECT', 'SPLIT', 'HEAT', 'DISPENSE', 'DISPOSE' ]: executable = True # deal with successor loop headers, which will take care of [current : translated]: # bid->succ : bid->true # succ->true : [no translation] # succ->false : true->false # back-edge(s) to succ : back1->true...backn->true # for succ_id in self.program.functions[root]['graph'].succ[bid]: for succ_id in self.cfg['graph'].successors(bid): # we know there is an edge from bid to succ_id, need to check if bid or succ_id is a loop # header if (bid, succ_id) not in edges_not_translated: continue if bid in self.loop_headers: # bid is loop header print( "if I take care of everything already, then continue?" ) continue elif succ_id in self.loop_headers: # unconditional branch into loop (MFSim interprets all loops as do-while) conditional_groups[self.cgid] = [] conditional_groups[self.cgid].append( self.write_cond( self.loop_headers[succ_id]['instr'], self.cgid, [bid], 1, [self.loop_headers[succ_id]['t']], self.expid, 1)) edges_not_translated.remove((bid, succ_id)) edges_not_translated.remove( (succ_id, self.loop_headers[succ_id]['t'])) self.cgid += 1 conditional_groups[self.cgid] = [] # find all back edges -- this is probably wrong, as these may fall into different CGs back_edges = [ x for x in edges_not_translated if x[1] is succ_id ] for be in back_edges: conditional_groups[self.cgid].append( self.write_cond( self.loop_headers[succ_id] ['instr'], self.cgid, [be[0]], 1, [self.loop_headers[succ_id]['t']], self.expid, 1, 'LOOP')) edges_not_translated.remove( (be[0], succ_id)) # deal with exit from loop conditional_groups[self.cgid].append( self.write_cond( self.loop_headers[succ_id]['instr'], self.cgid, [self.loop_headers[succ_id]['t']], 1, [self.loop_headers[succ_id]['f']], self.expid, 1, )) edges_not_translated.remove( (succ_id, self.loop_headers[succ_id]['f'])) self.cgid += 1 else: # dealing with if conditional if cond and executable: self.cblock = block conditional_groups[self.cgid] = [] for instr in self.program.functions[root][ 'blocks'][bid].instructions: if instr.op is IRInstruction.CONDITIONAL: conditional_groups[ self.cgid].append( self.write_cond( self.cfg[bid] [instr.iid]['instr'], self.cgid, [bid], 1, [ self.cfg[bid][ instr.iid]['t'] ], self.expid, 1, 'IF')) edges_not_translated.remove(( bid, self.cfg[bid][instr.iid]['t'])) if self.cfg[bid][instr.iid][ 'f'] is not None: conditional_groups[ self.cgid].append( self.write_cond( self.cfg[bid][ instr.iid] ['instr'], self.cgid, [bid], 1, [ self.cfg[bid][ instr.iid] ['f'] ], self.expid, 1)) edges_not_translated.remove( (bid, self.cfg[bid][ instr.iid]['f'])) self.cgid += 1 cfg_file.write("\nNUMCGS(%s)\n\n" % conditional_groups.__len__()) for cg in conditional_groups.values(): for val in cg: cfg_file.write(val) cfg_file.close() return True
#nx.set_node_attributes(G_t1, t1_label_dict, 'label') #nx.set_node_attributes(G_t2, t2_label_dict, 'label') # Extract common vertices commonVertices = set(set(G_t1.nodes()) & set(G_t2.nodes())) t2_only_vertices = set(G_t2.nodes()).difference(set(G_t1.nodes())) forest_vertices = set() forest_vertices = set(G_t2.nodes()).difference(set(G_t1.nodes())) for v_id in commonVertices: v_adjs = set(nx.neighbors(G_t2, v_id)) adjs_new = v_adjs.difference(commonVertices) if len(adjs_new) > 0: # v is connected to new vertices forest_vertices.add(v_id) # print("forest vertices", forest_vertices) t2_forest = nx.Graph(nx.subgraph(G_t2, forest_vertices)) t2_forest.remove_edges_from(nx.edges(G_t1)) # print("forest", nx.info(t2_forest)) write_dot(t2_forest, outputpath + t2_name + "_forest.dot") print("done")
''' Return the set of qubits in a circuit D Args: D (list): a sublist of CNOT gates of the input circuit C Returns: Q (set): the set of qubits in D ''' Q = set() for gate in D: Q.add(gate[0]) Q.add(gate[1]) return Q #\__/#\#/\#\__/#\#/\__/--\__/#\__/#\#/~\ # The architecture graph G = q20() #G = nx.Graph.to_undirected(H) EG = nx.edges(G) '''Generate the shortest_path_length dict for Q20 ''' def SPLQ20(): G = q20() spl_dic = dict() V = list(range(20)) for p in V: for q in V: if (q,p) in spl_dic: d = spl_dic[(q,p)] else: d = nx.shortest_path_length(G,p,q) spl_dic[(p,q)] = d return spl_dic
def getSegments(self): return nx.edges(self.adjmatrix)
def net_plot(title, AAT, theta, Z, r, lambda_A, lambda_R, layout='fruchterman', plotting=True, graphScale=1.0, color_threshold=0.7, *args, **kwargs): """Provide the eigenvector covariances AAT from RESCAL_ALS output and Z the sampled network from one of the netCreate sampling algorithms """ # get system time to name figures time = str(dt.datetime.now().time()) time = time.replace(':', '') time = time.replace('.', '') # heatmap hm = ncFunctions.heatmap(AAT, plotting=plotting, color_threshold=color_threshold) if plotting: plt.suptitle( r'A(A^T) HAC for Induced Rank = %s, $\lambda_{A}$ = %s, $\lambda_{R}$ = %s ' % (r, lambda_A, lambda_R), fontweight='bold', fontsize=14) plt.savefig(title + '_heatmap_' + time, figsize=(6, 6)) # NETWORK # Create networkx graph from Z g = nx.Graph() #add nodes with colors of group for n in np.arange(np.shape(hm['corder'])[0] - 1): g.add_node(hm['corder'][n], color=hm['group'][n]) nodeColorList = list(nx.get_node_attributes(g, 'color').values()) #add edges with weight of theta (probability the link exists) cardE = len(np.where(Z == 1)[1]) edgeList = [(np.where(Z == 1)[0][i], np.where(Z == 1)[1][i]) for i in np.arange(cardE)] edgeWeightList = theta[np.where(Z == 1)] * (2 / max( theta[np.where(Z == 1)])) #scaled link prob Pr(Z[i,j]=1) * weight for e in np.arange(len(edgeList) - 1): g.add_edge(edgeList[e][0], edgeList[e][1], weight=edgeWeightList[e]) # NODE SIZES # 1. cluster linkage importance #nodesizelist = cluster['linkage'] * (400 / max(cluster['linkage'])) # 2. betweenness centrality (wide range of sizes; very small on periphery) #nodesizelist = np.asarray(list(nx.betweenness_centrality(G,normalized=False).values())) * (400 / max(list(nx.betweenness_centrality(G,normalized=False).values()))) # 3. degree (smaller range of sizes; easier to see on the periphery) nodeSizeList = np.asarray(list(g.degree().values())) * (300 / max( list(g.degree().values()))) #scaled so the largest is size 350 # reproducibility np.random.seed(1) #bc = nx.betweenness_centrality(g) E = len(nx.edges(g)) V = len(g) k = round(E / V, 3) #size = np.array(list(bc.values())) * 1000 # here replacing the hierarchical magnitude hm['corder'] fignx = plt.figure(figsize=(6, 6)) ## use heatmap color groupings to color nodes and heatmap magnitudes to size nodes if layout == 'spring': nx.draw(g, pos=nx.spring_layout(g, scale=graphScale), node_color=nodeColorList, node_size=nodeSizeList, width=edgeWeightList) elif layout == 'fruchterman': nx.draw(g, pos=nx.fruchterman_reingold_layout(g, scale=graphScale), node_color=nodeColorList, node_size=nodeSizeList, width=edgeWeightList) else: print('Please indicate at a valid layout.') #else: #nx.graphviz_layout(g, prog=graphProg) plt.title( 'Network Created from Induced Rank = %s \n V = %s, E = %s, <k> = %s' % (r, V, E, k), fontweight='bold', fontsize=14) plt.savefig(title + '_graph_' + time, figsize=(6, 6)) #plot log degree sequence degree_sequence = sorted(nx.degree(g).values(), reverse=True) fig3 = plt.figure(figsize=(5, 3)) plt.loglog(degree_sequence) plt.title('Log Degree Distribution', fontweight='bold', fontsize=14) return { 'cluster': hm, 'graph': g, 'linkage': hm['linkage'], 'group': hm['group'] }
def init(graph): Es = [] for i in range(len(nx.edges(graph))): Es.append(rdm.randint(0, 1)) return Es
def overlay_differences_over_graph(g, sig_diffs, delta, add_test_info=True, coloring_scheme=False): if len(sig_diffs) == 0: return colors, stats, statstics_val = {}, {}, {} import networkx as nx labels = nx.get_node_attributes(g, 'label') edges = nx.edges(g) d = dict([(labels[l], l) for l in labels]) for diff in sig_diffs: source = tuple([x for x in diff[SOURCE_ATTR_NAME]]) target = tuple([x for x in diff[TARGET_ATTR_NAME]]) if source not in d or target not in d: continue src_id = d[source] trg_id = d[target] if (src_id, trg_id) not in edges: continue colors[(src_id, trg_id)] = 'red' if 'different_ids' in diff: ## handle n2KDiff results groups, means = split2groups(diff, delta) means_str = [round(mean, 2) for mean in means] grps_str = [sorted(gr) for gr in groups] groups2frq = sorted(zip(grps_str, means_str), key=lambda x: x[1]) grps2mean_str_rep = "" for pair in groups2frq: grps2mean_str_rep += "[" + ",".join(pair[0]) + "]: " + str( pair[1]) + "; " stats[(src_id, trg_id)] = grps2mean_str_rep # stats[(src_id, trg_id)] = str(round(float(diff[PVALUE_ATTR_NAME]), 2)) + ": " + str(grps_str) + " " + means_str # import math # most_sig_diff = sorted(diff['pairwise_comparisons'].items(), key=lambda test: 1 if math.isnan(test[1]['pvalue']) else test[1]['pvalue'])[0] # stats[(src_id, trg_id)] = str(round(float(diff[PVALUE_ATTR_NAME]), 2)) + ": " + str(most_sig_diff[0]) + \ # " - " + str(round(most_sig_diff[1]['p1'], 2)) + ", " + str(round(most_sig_diff[1]['p2'], 2)) # stats[(src_id, trg_id)] = str(diff['different_ids']) + \ # "; pvalue:" + str(round(float(diff[PVALUE_ATTR_NAME]), 2)) + \ # "; transitions per log:" + str(diff[EXPERIMENTS_PER_LOG_ATTR_NAME]) else: ## handle s2KDiff results stats[(src_id, trg_id)] = "p1:" + str(round(diff[P1_ATTR_NAME], 2)) + "; p2:" + str(round(diff[P2_ATTR_NAME], 2)) + \ "; m1:" + str(diff[M1_ATTR_NAME]) + "; m2:" + str(diff[M2_ATTR_NAME]) + \ "; pvalue:" + str(diff[PVALUE_ATTR_NAME]) statstics_val[(src_id, trg_id)] = np.log( float(diff[STATISTICS_ATTR_NAME])) if float( diff[STATISTICS_ATTR_NAME]) > 1 else float( diff[STATISTICS_ATTR_NAME]) if coloring_scheme: some_diff = list(diff['pairwise_comparisons'].keys())[0] p1 = diff['pairwise_comparisons'][some_diff][P1_ATTR_NAME] p2 = diff['pairwise_comparisons'][some_diff][P2_ATTR_NAME] colors[(src_id, trg_id)] = 'red' if p1 > p2 else 'green' nx.set_edge_attributes(g, colors, 'color') edge_labels = nx.get_edge_attributes(g, 'label') min_statistic = min(statstics_val.items(), key=lambda v: v[1])[1] max_statistic = max(statstics_val.items(), key=lambda v: v[1])[1] pen_widths = {} edges2remove = [] for e in edge_labels: if e in stats: if add_test_info: # edge_labels[e] = str(edge_labels[e]) + "_" + str(stats[e]) edge_labels[e] += ": " + str(stats[e]) pen_widths[e] = \ 1 + min(1.5 * (1-((statstics_val[e] - min_statistic) / (max_statistic - min_statistic))), 3) else: edges2remove.append(e) nx.set_edge_attributes(g, edge_labels, 'label') nx.set_edge_attributes(g, pen_widths, 'penwidth') filter_nodes = False if filter_nodes: print('------ graph filtering ------') node_labels = nx.get_node_attributes(g, 'label') for node in node_labels: if not len(node_labels[node]): continue node_label = node_labels[node][0] print(node_labels[node]) # if node_label in ['homepage', 'search', 'sales_anncs'] or 'sales_page' in node_label: # continue if node_label in [ 'sales_anncs', 'sales_page, page_1', 'sales_page, page_2', 'sales_page, page_3' ]: continue g.remove_node(node) # for e in edges2remove: # if e[0] in g.nodes() and e[1] in g.nodes(): # g.remove_edge(e[0], e[1]) # edge_labels = nx.get_edge_attributes(g, 'label') # for e in edge_labels: # if "edge_labels[e] print('------ ------ ------ ------') return g
def write_link_shaping(self): self.file_handler.write("\n// Link Traffic Shaping\n") edges = nx.edges(self.graph) tees = nx.get_edge_attributes(self.graph, 'tee') bws = nx.get_edge_attributes(self.graph, 'bw') delays = nx.get_edge_attributes(self.graph, 'delay') drops = nx.get_edge_attributes(self.graph, 'drop') losses = nx.get_edge_attributes(self.graph, 'loss') pull_elements = nx.get_edge_attributes(self.graph, 'l_elements') push_elements = nx.get_edge_attributes(self.graph, 's_elements') for edge in edges: if re.match("[oe][0-9]+", edge[0]) or re.match( "[oe][0-9]+", edge[1]): continue edge0 = int(edge[0]) edge1 = int(edge[1]) bandwidth = self.args.bw delay = self.args.delay drop = self.args.loss if edge in bws: bandwidth = bws[edge] if edge in delays: delay = delays[edge] if edge in drops: drop = drops[edge] elif edge in losses: drop = losses[edge] queue_length = 1000 self.file_handler.write( "link_%d_%d_queue :: ThreadSafeQueue(%d);\n" % (edge0, edge1, queue_length)) self.file_handler.write("link_%d_%d_bw :: LinkUnqueue(%s, %s);\n" % (edge0, edge1, delay, bandwidth)) self.file_handler.write( "link_%d_%d_loss :: RandomSample(DROP %s);\n" % (edge0, edge1, drop)) self.file_handler.write( "link_%d_%d_queue :: ThreadSafeQueue(%d);\n" % (edge1, edge0, queue_length)) self.file_handler.write("link_%d_%d_bw :: LinkUnqueue(%s, %s);\n" % (edge1, edge0, delay, bandwidth)) self.file_handler.write( "link_%d_%d_loss :: RandomSample(DROP %s);\n" % (edge1, edge0, drop)) if edge in pull_elements: for element in pull_elements[edge]: tokens = element.split('(') element_short = get_capital_letters(tokens[0]) self.file_handler.write( "link_%d_%d_%s :: %s;\n" % (edge0, edge1, element_short, element)) self.file_handler.write( "link_%d_%d_%s :: %s;\n" % (edge1, edge0, element_short, element)) if edge in push_elements: for element in push_elements[edge]: tokens = element.split('(') element_short = get_capital_letters(tokens[0]) self.file_handler.write( "link_%d_%d_%s :: %s;\n" % (edge0, edge1, element_short, element)) self.file_handler.write( "link_%d_%d_%s :: %s;\n" % (edge1, edge0, element_short, element)) if edge in tees: self.file_handler.write("link_%d_%d_tee :: Tee(2);\n" % (edge0, edge1)) self.file_handler.write("link_%d_%d_tee :: Tee(2);\n" % (edge1, edge0))
#!/usr/bin/env python import networkx as nx import sys print('Arguments: numberOfVertexes,outputFileName') print('Generating cycle graph graph: ') print('Vertexes: ' + sys.argv[1]) # print ('Edges: ' + sys.argv[2]) print('To file: ' + sys.argv[2]) G = nx.DiGraph() nx.cycle_graph(int(sys.argv[1]), G) print('Turns out to have: ' + str(nx.number_of_edges(G)) + ' edges') f = open(sys.argv[2], 'w') f.write(str(nx.number_of_nodes(G))) f.write(" ") f.write(str(nx.number_of_edges(G))) f.write("\n") for edge in nx.edges(G): f.write(str(edge[0] + 1)) f.write(" ") f.write(str(edge[1] + 1)) f.write("\n") # for week2/acyclic result of the acyclicity (1-thereis a cycle; 0-no cycle;2-unknown) f.write("1") f.close()
def filter_none_vertices(nxg): for e in nx.edges(nxg): if e[0] == "None" or e[1] == "None": nxg.remove_edge(e[0],e[1])
import readAdjMatrix from readAdjMatrix import readAdjMatrix numpy.random.seed(int(time.time())) n = numpy.random.randint(7, 9) p = numpy.random.uniform(0.3, 0.7) G = nx.fast_gnp_random_graph(n, p, int(time.time())) subG = nx.k_core(G, 1) # nx.draw_networkx(G,with_labels=True) # plt.show() print("This graph has", nx.number_of_nodes(subG), "nodes and ", nx.number_of_edges(subG), "edges.") print("The nodes of G are:") print(nx.nodes(subG)) print("The edges of G are:") print(nx.edges(subG, None)) print("The core numbers of G are:") print(nx.core_number(subG)) print( "The optimal 2 dimensional structural information and core based partition of G are:" ) print(td.optimalTwoDimensionalStructuralInformation(subG)) # print("All of the partitions are:") # for idx, item in enumerate(td.partition(list(nx.nodes(subG)))): # print(item) print( "The 2 dimensional structural information and corresponding partition of G are:" ) print(td.twoDimensionalStructuralInformation(subG))
# highest frequency among its neighbours for node in nodes: voisins = nx.neighbors(graph, node) set_label(node, voisins, labels) # Step 4:go to 2 as long as there exists a node with a labelthat # does not have the highest frequency among itsneighbours. if verifier_fin(graph, nodes, labels): fin = True return labels if __name__ == "__main__": filename = "./cleaned/graph.txt" graph = creat_graph(filename) nodes = list(graph.nodes()) edges = list(nx.edges(graph)) for tour in range(1000): # Step 1:give a unique label to each node in the network labels = {node: node for node in nodes} for i in range(0, 3): labels = LabelPropagation(graph, nodes, labels) #the numbers of communities obtained set_labels = set(labels.values()) l_labels = list(labels.values()) nb_communities = len(set_labels) print("tour={}, the numbers of communities: {}".format( tour, nb_communities)) # size_community = {x:l_labels.count(x) for x in l_labels} # print(size_community) # draw_graph(graph, labels)
return(alpha) def w(t): return(omega) #Initial Conditions init = 10 #Initial number of exposed and infectious nodes init_E = sorted(rand.sample(range(N),init)) #Randomly select exposed init_I = sorted(rand.sample(set(range(N))-set(init_E),init)) #Randomly select infectious #Total degree of exposed and infectious nodes deg_E = sum([G.degree(exp) for exp in init_E]) deg_I = sum([G.degree(inf) for inf in init_I]) #Initialize edge types edges = list(nx.edges(G)) init_EE = 2*len(set([(a,b) for a in init_E for b in init_E if b>a]).intersection(set(edges))) init_EI = len(set([(min(a,b),max(a,b)) for a in init_E for b in init_I]).intersection(set(edges))) init_II = 2*len(set([(a,b) for a in init_I for b in init_I if b>a]).intersection(set(edges))) init_SE = deg_E-init_EE-init_EI init_SI = deg_I-init_II-init_EI init_SS = N*k_0-(init_EE+init_II+2*(init_SE+init_SI+init_EI)) #Initial Condition for ODE adu_0 = [N-2*init,init,init,init_SS,init_SE,init_SI,init_EE,init_EI,init_II,k_0,k2_k_0,phi] #[S,E,I,SS,SE,SI,EE,EI,II,k,k^2-k,phi] adu_0_noclust = [N-2*init,init,init,init_SS,init_SE,init_SI,init_EE,init_EI,init_II,k_0,k2_k_0] #[S,E,I,SS,SE,SI,EE,EI,II,k,k^2-k,phi] #Solve ODE ode_solution = odeint(adSEIR_pairwise,adu_0,t,args=(beta,eta,gamma,a,w,N)) ode_solution_simp = odeint(adSEIR_pairwise_simp,adu_0,t,args=(beta,eta,gamma,a,w,N))
# create a graph using the edge inputs g = nx.DiGraph() g.add_weighted_edges_from(edgeInputs) # set max length to negative infinite NEGINF = float("-inf") length = {n: NEGINF for n in nx.nodes(g)} length[source] = 0 #print(length) # set backtrack of each node, initially all empty backtrack = {n: [] for n in nx.nodes(g)} # create predecessor list of all the nodes pred = {n: [] for n in nx.nodes(g)} for u, v in nx.edges(g): pred[v] = pred[v] + [u] pred[source] = [] print(pred) ## access all nodes in topological order and update length and backtrack nodes = nx.topological_sort(g) for n in nodes: print(n) if len(pred[n]) > 0: candidateLengths = [length[p] + g[p][n]['weight'] for p in pred[n]] length[n] = max(candidateLengths) if (length[n] > NEGINF): candidateIndex = candidateLengths.index(length[n]) backtrack[n] = pred[n][candidateIndex]
def training(community): # print "Training classifiers..." sampleSize=200 if nx.number_of_nodes(community)>sampleSize: print "Network sampling..." current = random.sample(community.nodes(),1)[0] sampleNodes = [current] graph=community.subgraph(sampleNodes) while(nx.number_of_nodes(graph)<sampleSize): w=random.sample(nx.edges(community,current),1) nxt=w[0][1] p = random.random() threshold = float(nx.degree(community,current))/float(nx.degree(community,nxt)) if p<threshold: if nxt not in sampleNodes: sampleNodes.append(nxt) current = nxt graph=community.subgraph(sampleNodes) else: graph=community degree=graph.out_degree(graph.nodes()) ordered=sorted(degree.items(),key=lambda item:item[1],reverse=True) numBins=20 binSize = int(nx.number_of_nodes(graph)/numBins) bins = dict() values = [] key = 0 # print ordered for index, i in enumerate(ordered): # if adding to current bin is greater than bin size if len(values)+1>binSize: # if the metric value of the current node is not the same as the last node # then increment to next bin if ordered[index-1][1] != i[1]: key = key+1 values = [] # append the node ID values.append(i[0]) bins[key]=values numSeeds=20 final_seeds=set() for binNo in bins.keys(): # final_seeds.add(random.sample(bins[binNo],1)[0]) possibleSeeds=bins[binNo] if len(possibleSeeds)>=numSeeds: seeds=random.sample(possibleSeeds,numSeeds) else: seeds=possibleSeeds if binNo+1 in bins.keys(): remainder=numSeeds-len(possibleSeeds) if remainder>len(bins[binNo+1]): s=bins[binNo+1] else: s=random.sample(bins[binNo+1],remainder) seeds.extend(s) # print seeds final_seeds.add(random.sample(seeds,1)[0]) # graph=community # final_seeds=community.nodes() sample_per_seed=1 if len(graph)==1: # print("yes") deg_centra={graph.nodes()[0]:1} else: deg_centra=nx.degree_centrality(graph) between_centra=nx.betweenness_centrality(graph) load_centra=nx.load_centrality(graph) # eigen_centra2=nx.eigenvector_centrality(graph) avg_neigh_deg=nx.average_neighbor_degree(graph) harmonic_centra=nx.harmonic_centrality(graph) close_centra=nx.closeness_centrality(graph) feature=np.zeros([len(final_seeds)*sample_per_seed,6]) influence=[] for i,seed in enumerate(list(final_seeds)): for n in xrange(0,sample_per_seed): inf=Evaluation.mc_method(graph,[seed],k=1,num_simu = 100) feature[i*sample_per_seed+n,0]=deg_centra[seed] feature[i*sample_per_seed+n,1]=between_centra[seed] feature[i*sample_per_seed+n,2]=load_centra[seed] # feature[i*20+n,3]=eigen_centra[seed] feature[i*sample_per_seed+n,3]=avg_neigh_deg[seed] feature[i*sample_per_seed+n,4]=harmonic_centra[seed] feature[i*sample_per_seed+n,5]=close_centra[seed] influence.append(inf) # print feature[n,],inf model=LinearRegression(normalize=True) classifier=model.fit(feature,influence) return classifier # if __name__=='__main__': # G = nx.DiGraph() # with open('../weighted_directed_nets/network3.dat','r') as f: # for i, line in enumerate(f): # node1,node2,weight=line.strip().split('\t') # G.add_edge(int(node1)-1,int(node2)-1,weight=float(weight)) # training(G)
def GWMI2(G): node_num = nx.number_of_nodes(G) edge_num = nx.number_of_edges(G) edges = nx.edges(G) nodes = nx.nodes(G) beta = -math.log2(0.0001) sim_dict = {} g = nx.Graph() a = (node_num * (node_num - 1)) / 2 weight_dic = {} for u, v in edges: s = len(list(nx.common_neighbors(G, u, v))) weight_dic[(u, v)] = s + 1 weight_dic[(v, u)] = s + 1 g.add_edge(u, v, weight=weight_dic[(u, v)]) #print(weight_dic) all_weight = 0 for u, v in edges: all_weight = all_weight + weight_dic[(u, v)] #print(all_weight) average_weight = all_weight / edge_num p_connect = (all_weight) / (average_weight * a) print(p_connect) m = -math.log2(p_connect) print(m) nodes_Weight_dict = {} # 得到每个点的“点权值” for v in nodes: node_weight = 0 v_neighbors = nx.neighbors(G, v) for u in v_neighbors: node_weight += weight_dic[(u, v)] nodes_Weight_dict[v] = node_weight self_Conditional_dict = {} for z in nodes: w_z = nodes_Weight_dict[z] #d_z = nx.degree(G,z) if w_z > 1: alpha = 2 / (w_z * (w_z - 1)) cc_z = wc3.weight_clustering3(g, z) #修改为加权聚类系数 #cc_z = nx.clustering(G, z) if cc_z == 0: log_c = beta else: log_c = -math.log2(cc_z) # end if s = 0 neighbor_list = nx.neighbors(G, z) size = len(neighbor_list) for i in range(size): m = neighbor_list[i] for j in range(i + 1, size): n = neighbor_list[j] (k_x, k_y) = pair(nodes_Weight_dict[m], nodes_Weight_dict[n]) if i != j: s += (m - log_c) self_Conditional_dict[z] = alpha * s #print(self_Conditional_dict) sim_dict = {} # 存储相似度的字典 ebunch = nx.non_edges(G) for x, y in ebunch: s = 0 for z in nx.common_neighbors(G, x, y): s += self_Conditional_dict[z] sim_dict[(x, y)] = s # end if # end for return sim_dict
# Find Minimum spanning tree # In[48]: #print(networkx.__version__) X= nx.to_numpy_matrix(U_whole) mst = minimum_spanning_tree(X) mst_new= nx.from_scipy_sparse_matrix(mst) mst_edges= nx.generate_edgelist(mst_new) temp_tup=tuple(nx.nodes(U_whole)) temp_U = nx.parse_edgelist(mst_edges) Z=nx.Graph() Z.add_node(temp_tup) edges = nx.edges(temp_U) Z.add_edges_from(edges) #print(list(mst_edges)) plt.axis('off') nx.draw(Z, node_size=4) plt.show() # 1a. In a degree trimmed network look for one identifier # In[49]: # response to stress in the degree trimmed network color in blue color_map = []