def expand_liquid_oncotree(onco_tree): """ Expand the _LIQUID_ oncotree node to all of its children :param onco_tree: Digraph of the Oncotree :returns liquid_children: All liquid tumor types in the Oncotree solid_children: All tumor types in the Oncotree minus "liquid_children" """ # build the nodes for liquid. node1 = oncotreenx.lookup_text(onco_tree, "Lymph") node2 = oncotreenx.lookup_text(onco_tree, "Blood") nodes1 = list(nx.dfs_tree(onco_tree, node1)) nodes2 = list(nx.dfs_tree(onco_tree, node2)) nodes = list(set(nodes1).union(set(nodes2))) primary_tumors = get_primary_tumors() liquid_children_codes = [] for n in nodes: liquid_children_codes.extend(list(nx.dfs_tree(onco_tree, n))) liquid_children = [onco_tree.node[nn]['text'] for nn in liquid_children_codes if onco_tree.node[nn]['text'].strip() not in primary_tumors] # solid nodes are all other nodes all_nodes = set(list(onco_tree.nodes())) tmp_nodes = all_nodes - set(nodes) solid_children_codes = list(tmp_nodes) solid_children = [onco_tree.node[nn]['text'] for nn in solid_children_codes if onco_tree.node[nn]['text'].strip() not in primary_tumors] return liquid_children, solid_children
def test_dfs_tree_isolates(self): G = nx.Graph() G.add_node(1) G.add_node(2) T = nx.dfs_tree(G, source=1) assert_equal(sorted(T.nodes()), [1]) assert_equal(sorted(T.edges()), []) T = nx.dfs_tree(G, source=None) assert_equal(sorted(T.nodes()), [1, 2]) assert_equal(sorted(T.edges()), [])
def test_dfs_tree(self): exp_nodes = sorted(self.G.nodes()) exp_edges = [(0, 1), (1, 2), (2, 4), (4, 3)] # Search from first node T = nx.dfs_tree(self.G, source=0) assert_equal(sorted(T.nodes()), exp_nodes) assert_equal(sorted(T.edges()), exp_edges) # Check source=None T = nx.dfs_tree(self.G, source=None) assert_equal(sorted(T.nodes()), exp_nodes) assert_equal(sorted(T.edges()), exp_edges) # Check source=None is the default T = nx.dfs_tree(self.G) assert_equal(sorted(T.nodes()), exp_nodes) assert_equal(sorted(T.edges()), exp_edges)
def all_dag_covers(_graph, condenseg, final_sccs, tree_type): initial_scc = _graph.node[_graph.graph["initial"]]["scc_index"] if tree_type=="bfs": condense_tree = networkx.bfs_tree(condenseg, initial_scc) elif tree_type=="dfs": condense_tree = networkx.dfs_tree(condenseg, initial_scc) rest_edges = [edge for edge in condenseg.edges() if edge not in condense_tree.edges()] all_tree_branch(_graph, condenseg, final_sccs, tree_type, condense_tree) dag_paths = condenseg.graph["condense_paths"] for rest_edge in rest_edges: path = networkx.shortest_path(condense_tree, initial_scc, rest_edge[0]) _node = rest_edge[1] while True: if condense_tree.out_degree(_node)==0 and condense_tree.in_degree(_node)==1: if "_final" in str(_node): path.append(_node) else: path = path + condense_tree.node[_node]["continue_path"] break else: path.append(_node) _node = condense_tree.edge[_node].keys()[0] dag_paths.append(path) condenseg.graph["condense_paths"] = dag_paths return dag_paths
def sanity_check_all_connected(tester, genome): tree = nx.dfs_tree(genome.graph, source=genome.starting_node) if len(tree) > 2: tester.assertEqual(len(tree), len(genome.graph))
def dfs_edges(G): """ (source,target) for edges in directed spanning tree resulting from depth first search """ DG = nx.dfs_tree(G) return [(src,targ) for targ in nx.dfs_postorder_nodes(DG) for src in DG.predecessors(targ)]
def get_independent_components(self): components = set() for node in self.G.nodes_iter(): components.add(frozenset(nx.dfs_tree(self.G, node))) return components
def _check_for_extreme_cases(G, G_copy, capacity, s, t): """ Pads the missing capacities and checks for infinite capacity paths. """ sum_capacities = sum([G_copy[u][v][capacity] for u,v in G_copy.edges_iter() if capacity in G_copy.get_edge_data(u,v)]) len_G_copy_edges = len(G_copy.edges()) if len_G_copy_edges > 1: for u, v in G_copy.edges_iter(): if capacity not in G_copy.get_edge_data(u,v): #pad the missing capacities with sum of all capacities G_copy[u][v][capacity] = sum_capacities # get edges that have no capacity attribute H = nx.DiGraph(((u,v) for u, v, eattr in G.edges_iter(data=True) if capacity not in eattr)) H.add_nodes_from(G_copy) for v in nx.dfs_tree(H,s): if v is t: raise nx.NetworkXUnbounded("Infinite capacity path, \ flow unbounded above.") elif len_G_copy_edges == 1: for u, v in G_copy.edges_iter(): if capacity not in G_copy.get_edge_data(u,v): # prune infinite capacities path raise nx.NetworkXUnbounded( "Infinite capacity path, flow unbounded above.")
def chow_liu(data, mi_estimator=discrete_mutual_information): arguments = list(data.columns) g = nx.Graph() g.add_nodes_from(arguments) for src, dst in combinations(arguments, 2): g.add_edge(src, dst, weight=-mi_estimator(data[[src]], data[[dst]])) return DGM(nx.dfs_tree(nx.minimum_spanning_tree(g), arguments[0]))
def extract_cancer_types(self): """ Returns all cancer types located in the match tree :param g: DiGraph match tree :return: List of cancer types """ diagnoses = [] cancer_types_expanded = [] primary_cancer_types = [] excluded_cancer_types = [] onco_tree = oncotreenx.build_oncotree(file_path=TUMOR_TREE) liquid_children_txt, solid_children_txt = expand_liquid_oncotree(onco_tree) # iterate through the graph for node_id in list(nx.dfs_postorder_nodes(self.g, source=1)): node = self.g.node[node_id] if node['type'] == 'clinical': if 'oncotree_primary_diagnosis' in node['value']: diagnosis = node['value']['oncotree_primary_diagnosis'] n = oncotreenx.lookup_text(onco_tree, diagnosis.replace('!', '')) children = list(nx.dfs_tree(onco_tree, n)) if diagnosis == '_SOLID_': children_txt = solid_children_txt primary_parent = 'All Solid Tumors' parents_txt = ['All Solid Tumors'] elif diagnosis == '_LIQUID_': children_txt = liquid_children_txt primary_parent = 'All Liquid Tumors' parents_txt = ['All Liquid Tumors'] else: children_txt = [onco_tree.node[nn]['text'] for nn in children] if n is not None: parents, parents_txt, primary_parent = get_parents(onco_tree, n) else: parents_txt = [] primary_parent = '' diagnoses.append(diagnosis) if diagnosis.startswith('!'): excluded_cancer_types.append(diagnosis.replace('!', '')) excluded_cancer_types.extend(children_txt) else: primary_tumors = get_primary_tumors() cancer_types_expanded.append(parse_diagnosis(diagnosis)) cancer_types_expanded.extend(children_txt) cancer_types_expanded.extend([i for i in parents_txt if i.split()[0] not in primary_tumors]) primary_cancer_types.append(primary_parent) return { 'diagnoses': list(set(i for i in diagnoses if i.strip() != 'root')), 'cancer_types_expanded': list(set(i for i in cancer_types_expanded if i.strip() != 'root')), 'primary_cancer_types': list(set(i for i in primary_cancer_types if i.strip() != 'root')), 'excluded_cancer_types': list(set(i for i in excluded_cancer_types if i.strip() != 'root')) }
def mincut(G,RG,s,t): dtree = nx.dfs_tree(RG,s) cut = [] for u in dtree.nodes_iter(): for v in G.edge[u].keys(): if v not in dtree.node: cut.append((u,v)) return cut
def DFSCode(self,t): vertices=t.nodes() tdfs=nx.dfs_tree(t) sorted_edges=sorted(tdfs.edges(),key=operator.itemgetter(0), reverse=False) dfscode="" for s in sorted_edges: dfscode = dfscode + "#" + str(s[0]) + "-" + str(s[1]) return dfscode
def petersenGraph(): print "petersenGraph()" # Sigui G = (Vg,Eg) el graf original # Vèrtex inicial d'exploració: v # Sigui H = (Vh,Eh) el graf resultant d'aplicar DFS # El graf H està buit G=nx.petersen_graph() print "number of edges: ",G.number_of_edges() print "number of nodes: ",G.number_of_nodes() print "edges: ",G.edges() print "nodes: ",G.nodes() print "neighbors: ",G.neighbors() H = nx.dfs_tree(G,0) Eh = {} Vh ={} visitats = {} explora(v) print "number of edges: ",H.number_of_edges() print "number of nodes: ",H.number_of_nodes() print "edges: ",H.edges() print "nodes: ",H.nodes() print "neighbors: ",H.neighbors() H = nx.dfs_tree(G,5) print "number of edges: ",H.number_of_edges() print "number of nodes: ",H.number_of_nodes() print "edges: ",H.edges() print "nodes: ",H.nodes() print "neighbors: ",H.neighbors() G.add_edge(1000,1001) print "number of edges: ",G.number_of_edges() print "number of nodes: ",G.number_of_nodes() print "edges: ",G.edges() print "nodes: ",G.nodes() print "neighbors: ",G.neighbors() H = nx.dfs_tree(G,0) print "number of edges: ",H.number_of_edges() print "number of nodes: ",H.number_of_nodes() print "edges: ",H.edges() print "nodes: ",H.nodes() print "neighbors: ",H.neighbors() H = nx.dfs_tree(G,1000) print "number of edges: ",H.number_of_edges() print "number of nodes: ",H.number_of_nodes() print "edges: ",H.edges() print "nodes: ",H.nodes() print "neighbors: ",H.neighbors()
def get(self, request, **kwargs): #kwargs字典中包含url中路径的参数 pk = kwargs['pk'] NodeObj = ipran_node.objects.get(pk=pk) NodeName = NodeObj.NeName Ring = NodeObj.Ring linkList = ipran_link.objects.filter(Q(ring=Ring)&Q(isDelete=False)).values_list("source", "dest") linkList = list(linkList) nodeTuple = reduce(lambda x,y:x+y, linkList) #[(1,2),(2,3)]-->(1,2,2,3) nodeTuple = tuple(set(nodeTuple)) #(1,2,2,3)-->{1,2,3}-->(1,2,3) rootNodeTuple = tuple(a for a in nodeTuple if re.match(r'^(HJ)|(HX)',a)) # 以HJ,HX开头的就是ASG rootLinkList = zip(*[iter(rootNodeTuple[i:]) for i in range(2)]) #(1,2,3,4)-->[(1,2),(2,3),(3,4)] (1,)-->[] ()-->[] print u"ASG:" for i in rootNodeTuple: print i linkList.extend(rootLinkList) G = nx.Graph() G.add_edges_from(linkList) try: CycleNode = nx.cycle_basis(G)[0] #根据图生成环 except: CycleNode = [] #无法生成环,则设环为空列表 # print u"环路节点:" # for i in CycleNode: # print i if NodeName in CycleNode: #如果想要查询的节点为环上节点,则移除其它环节点(不包括支链节点) CycleNode.remove(NodeName) G.remove_nodes_from(CycleNode) else: #如果想要查询的节点不为环上节点,则计算带环节点至该节点的最短路径经过的节点,并移除。 ShortestNode = nx.dijkstra_path(G,rootNodeTuple[0],NodeName) ShortestNode.remove(NodeName) G.remove_nodes_from(ShortestNode) # print u"剔除后余下的节点:" # for i in G.node: # print i H = nx.dfs_tree(G,NodeName) #最后即可通过生成树协议获得节点所下带的节点 Nnode = H.number_of_nodes() #接下来得分析下带的业务数及业务名称 BusinessDictList = {'ring': Ring, '2G':[], '3G':[], 'LTE':[]} for node in H.nodes(): print node NodeObj = ipran_node.objects.get(NeName=node) BusinessQuerySet = NodeObj.ipran_business_set.all() for BusinessObj in BusinessQuerySet: BusinessDictList.setdefault(BusinessObj.BusinessType, []).append(BusinessObj.TransStationName) return JsonResponse(BusinessDictList, safe=False)
def prune(G, min_weight, source): for vertex in G.nodes(): if sum(G.get_edge_data(*edge)['weight'] for edge in G.edges(vertex)) < min_weight: G.remove_node(vertex) tree = nx.dfs_tree(G, source) connected_vertices = {node for node in tree.nodes()} # Remove any orphaned vertexes. for vertex in G.nodes(): if vertex not in connected_vertices: G.remove_node(vertex)
def mutation(tree, graph): v1 = choice(tree.nodes()) v2 = choice(tree.neighbors(v1)) # choice could be made to be proportional to weight of removed edge (the bigger the weight the more probably to be removed) #removal of an edge splits tree into 2 subtrees tree.remove_edge(v1,v2) # we obtain nodes accesible from each vertex n1 = nx.dfs_tree(tree, v1).nodes() n2 = nx.dfs_tree(tree, v2).nodes() # we list possible candidates for new connection possible_connections = [(u,v) for u in n1 for v in n2 if graph.has_edge(u, v)] if possible_connections == []: tree.add_edge(v1, v2) # no changes else: possible_connections.remove((v1,v2)) # remove previous connection edge = choice(possible_connections) tree.add_edge(edge[0], edge[1]) # new node
def new_simple_paths(graph): nodes = graph.in_degree_iter() for node, in_degree in nodes: if in_degree == 0: tmp_tree = nx.dfs_tree(graph, node) out_degree = tmp_tree.out_degree() for tree_node in tmp_tree.nodes_iter(): if out_degree[tree_node] == 0: path = nx.shortest_path(graph, node, tree_node) if len(path) > 2: yield path
def induce_tree(graph, node): """ Given an undirected, acyclic graph, returns the tree induced by taking 'node' as the root node. Preserves edge and node metadata. """ tree = nx.dfs_tree(graph, node) # preserve node metadata for u in tree.nodes_iter(): tree.node[u] = graph.node[u] # preserve edge metadata for s in tree.edges_iter(): tree.edge[s[0]][s[1]] = graph.edge[s[0]][s[1]] return tree
def unilaterally_connected_components(G): graphs =sorted(nx.weakly_connected_component_subgraphs(G), key = len, reverse=True) weakly_connected_component_subgraphs_nodes=[] weakly_connected_component_subgraphs_edges=[] for gr in range(len(graphs)): graph='G' + str(gr+1) if len(graphs[gr].nodes()) >1 and len(graphs[gr].edges())>0: weakly_connected_component_subgraphs_nodes.append(graphs[gr].nodes()) weakly_connected_component_subgraphs_edges.append(graphs[gr].edges()) ucc=[] ucce=[] for i in range(len(weakly_connected_component_subgraphs_nodes)): j= weakly_connected_component_subgraphs_nodes[i] max_ccsn=[] max_ccse=[] for ki in j: Tki = nx.dfs_tree(G,ki) if len(Tki.nodes()) > len(max_ccsn): max_ccsn=Tki.nodes() max_ccse=Tki.edges() max_ki=ki rkwcc=list(set(j)-set(max_ccsn)) if len(rkwcc)==0: ucc.append(max_ccsn) ucce.append(max_ccse) else: ucc.append(max_ccsn) ucce.append(max_ccse) ccsn=set() ccse=set() for kki in rkwcc: Tkki = nx.dfs_tree(G,kki) if Tkki.nodes() not in ucc: ucc.append(Tkki.nodes()) ucce.append(Tkki.edges()) return ucc, ucce
def _sample_four_node_system(pzero_transition, pzero_other, fn_sample_data): nstates = 3 G = nx.Graph() G.add_edge('a', 'b') G.add_edge('a', 'c') G.add_edge('a', 'd') nodes = set(G) root = random.choice(list(nodes)) T = nx.dfs_tree(G, root) root_prior_distn = sample_distn1d(nstates, pzero_other) edge_to_P = {} for edge in T.edges(): edge_to_P[edge] = sample_P(nstates, pzero_transition) node_to_data = fn_sample_data(nodes, nstates, pzero_other) return (T, edge_to_P, root, root_prior_distn, node_to_data)
def extract_trees_from_graph(self): #print "starting topological sort at: ", time.time() #ntop = nx.topological_sort(self.graph) #print "ending topological sort at: ", time.time() #print "nodes by topology: ", ntop[:10] #return tree_hash = {} root_nodes = [x for x in self.graph if not self.graph.predecessors(x)] self.nodes_to_trees_hash = {} print "num root nodes: ", len(root_nodes) for rn in root_nodes: dfs_tree = nx.dfs_tree(self.graph, rn) self.nodes_to_trees_hash[rn] = dfs_tree self.sorted_nodes_to_trees = sorted(self.nodes_to_trees_hash.items(), key=lambda x:len(x[1].nodes()), reverse=True) self.sorted_nodes_to_trees = [x for x in self.sorted_nodes_to_trees if len(x[1].nodes()) > 50]
def is_spanning_tree(tree, network = None, source = 1, draw = True): S = nx.dfs_tree(tree, source).to_undirected() if network and tree.number_of_nodes() != network.number_of_nodes(): print "Tree has %d of %d necessary nodes." % (tree.number_of_nodes(), network.number_of_nodes()) return False if len(nx.connected_components(tree)) > 1: print "Tree is actually a forest." return False if tree.number_of_edges() != (tree.number_of_nodes() - 1): print "Tree contains at least one cycle. |E|=|V|-1 does not hold." return False return True
def optimal_placement(tree, budget): """ Place `budget` sensors on a tree in an optimal way. Parameters ---------- tree : networkx.Graph A tree (undirected) on which to place the sensors. budget : int The sensor budget, i.e. the number of nodes that can be chosen as sensors Returns ------- (perr, obs) : tuple `perr` is the error probability, and `obs` a tuple containing the sensors. """ #one single sensor is useless assert budget >= 2 assert nx.is_tree(tree) leaves = utilities.find_leaves(tree) if budget >= len(leaves): return (0, tuple(leaves)) #define a non-leaf root arbitrarily root = random.choice(filter(lambda x: x not in leaves, tree.nodes())) directed = nx.dfs_tree(tree, source=root) #dir DFS tree from source #compute the subtree sizes for x in directed: directed.node[x]['size'] = utilities.size_subtree(directed, x) utilities.size_subtree.cache_clear() #add the budget and the root to the tree as an attribute directed.graph['root'] = root directed.graph['budget'] = budget #place the sensors using the DP algorithm err, obs = _opt(directed, root, budget) _optc.cache_clear() _opt.cache_clear() return (float(err) / len(tree), obs)
def get_ontology(self, root_concept="Pharmaceutical / biologic product", relation_types=["Is a"]): if self.ontology is not None: return self.ontology self.ontology = networkx.DiGraph() root = self.get_concept_ids(root_concept)[0] valid_relations = set([self.get_concept_ids(relation)[0] for relation in relation_types]) f = open(self.file_name_rel) header = f.readline().strip("\n") col_to_idx = dict((val.lower(), i) for i, val in enumerate(header.split(self.delim))) for line in f: words = line.strip("\n").split(self.delim) if words[col_to_idx["typeid"]] in valid_relations: source_id = words[col_to_idx["sourceid"]] target_id = words[col_to_idx["destinationid"]] self.ontology.add_edge(target_id, source_id) self.ontology = networkx.dfs_tree(self.ontology, root) return self.ontology
def flow_errors(G, src, ud=None, stopnodes=None): """Returns the first edges that do not conform to the flow direction implicit in defined source node. G: target digraph src: source nodes ud: undirected graph (faster iteration with setdirection) stopnodes: break points in the network """ if not ud: ud = G.to_undirected() badedges = [] gnodes = trace(G, src, stopnodes) connected = G.edges(dfs_tree(ud, src).nodes()) for edge in connected: start = edge[0] end = edge[1] if end in gnodes and start not in gnodes: badedges.append(G.edges(start)[0]) return badedges
def any_conn(g, controller_node, link_fail_prob): # Store pairs of (probability, connectivity) uptime_dist = [] for failed_edge in g.edges(): lg.debug("------------------------") lg.debug("considering failed edge: %s" % str(failed_edge)) # Check switch-to-controller connectivity. gcopy = g.copy() gcopy.remove_edge(failed_edge[0], failed_edge[1]) reachable = nx.dfs_tree(gcopy, controller_node).nodes() if controller_node not in reachable: reachable += [controller_node] nodes = g.number_of_nodes() connectivity = float(len(reachable)) / nodes uptime_dist.append((link_fail_prob, connectivity)) return uptime_dist
def subtree_asymmetries(tree, roots, Delta, attr='asymmetry-simple'): """ Calculates the average asymmetry functions for the subtrees rooted at the nodes given in roots. """ subtrees = [nx.DiGraph(tree.subgraph(nx.dfs_tree(tree, r).nodes_iter())) for r in roots] reslt = [] for s, r in izip(subtrees, roots): s.graph['root'] = r degree = s.node[r]['subtree-degree'] degrees = array(sorted(list(set([ s.node[n]['subtree-degree'] for n in s.nodes_iter()])))) reslt.append([degrees, [average_asymmetry(s, d, Delta, attr=attr) for d in degrees]]) return reslt
def termsBelow(self, name, filters=[]): """ Returns a DFS-based generator of terms below a node. If filters (term -> boolean functions) are provided, yields only formulas satisfying all of them. Example of usage: chebi = ChebiOntology() filters = [containsElement('C')] for term in chebi.termsBelow('rat metabolite', filters): print(term['id']) """ metabolite = self._ids_by_name.get(name, None) if metabolite is None: return for id in nx.dfs_tree(self._g, metabolite): node = self._nodes_by_id[id] if all(f(node) for f in filters): yield node
def all_tree_branch(_graph, condenseg, final_sccs, tree_type, ct=None): initial_scc = _graph.node[_graph.graph["initial"]]["scc_index"] if not ct: if tree_type=="bfs": condense_tree = networkx.bfs_tree(condenseg, initial_scc) elif tree_type=="dfs": condense_tree = networkx.dfs_tree(condenseg, initial_scc) else: condense_tree = ct dag_paths = [] leaves = [node for node in condense_tree.nodes() if condense_tree.out_degree(node)==0 and condense_tree.in_degree(node)==1] for leaf in leaves: if "_final" in str(leaf): path = networkx.shortest_path(condense_tree, initial_scc, leaf) else: find_continue_path(condenseg, condense_tree, leaf) path = networkx.shortest_path(condense_tree, initial_scc, leaf) path = path + condense_tree.node[leaf]["continue_path"][1:] dag_paths.append(path) condenseg.graph["condense_paths"] = dag_paths return dag_paths
def FindAttractors(Counts,folder='Data'): '''Identifying attractors and basins using NetworkX''' print 'Now identifying attractors, please wait...' results = {} TransNet = nx.DiGraph() for source, target in Counts: # add source and target node to network objects TransNet.add_edge(source, target) TransNet.remove_edges_from(TransNet.selfloop_edges()) attractors = nx.attracting_components(TransNet) # find attractors (SCC with not out edge) ReTransNet = TransNet.reverse() # reverse the directd graph to creat a tree with attactors are the roots try: os.mkdir(folder) except: pass print 'Now identifying basins for each attractor...' for attractor in attractors: basin_tree=nx.dfs_tree(ReTransNet,list(attractor)[0]) #just need to find the sons of the first node in attractor results[tuple(attractor)]=basin_tree.nodes() #results_origin[tuple(attractor)]=[leaf for leaf in basin if ReTransNet.out_degree(leaf) == 0] # record initial states of attractors #AttNet=TransNet.subgraph(attractor) #nx.write_edgelist(AttNet,'%s/Attractor%s.txt'%(folder,attractors.index(attractor)),data=False) print 'Writing out transition graph in %s/TransGraph.txt'%folder nx.write_edgelist(TransNet,'%s/TransGraph.txt'%folder,data=False) return results
constraints = [decode_line(line) for line in input] graph = networkx.DiGraph() for constraint in constraints: graph.add_node(constraint.container_color) for number, contained_color in constraint.contained_colors: graph.add_node(contained_color) graph.add_edge(constraint.container_color, contained_color, count=number) origin = 'shiny gold' ancestors = len( networkx.dfs_tree(graph.reverse(), source=origin).reverse().nodes) print(ancestors - 1) # shiny gold is included in ancestors def count_bags(origin: str, graph) -> int: contained_bags = graph.out_edges(origin) count = 1 for _, contained_bag in contained_bags: count += graph[origin][contained_bag]['count'] * count_bags( contained_bag, graph) return count print(count_bags(origin, graph) - 1) # shiny gold bag is included in count_bags
def draw_cfg(p, recorder): cfg = None src_addr = set() executed_addr = set() lib_addr = set() try: signal.signal(signal.SIGALRM, handler) cfg = p.analyses.CFGEmulated() except: traceback.print_exc() if not cfg: try: cfg = p.analyses.CFGFast() except: traceback.print_exc() pass # a = set(map(lambda x:x._name.split("+")[0] if x._name else "", list(cfg.graph.nodes))) # f = list(map(lambda x:p.loader.find_symbol(x), a)) # b = list(filter(lambda x: not x.is_extern if x else False, f)) try: if cfg != None: g = cfg.graph main_obj = p.loader.main_object.get_symbol('main') if main_obj != None: own_addr = [main_obj.linked_addr] else: own_addr = [p.entry] own_node = [cfg.get_any_node(main_obj.linked_addr)] import networkx main_node = cfg.get_any_node(main_obj.linked_addr) print(networkx.dfs_tree(cfg.graph, main_node, depth_limit=10)) try: with open("/home/lsc/lsc/core6_src_function.json", "r") as f: data = f.read() fun_list = json.loads(data) own_node = list( filter( lambda x: x.name.split("+")[0] in fun_list[ 'function_list'] if x.name else False, cfg.graph.nodes)) own_addr = list(map(lambda x: x.addr, own_node)) lib_addr = set( map(lambda x: x.addr, filter(lambda x: x not in own_node, cfg.graph.nodes))) except: own_node = list( filter( lambda x: x.name.split("+")[0] == "main" if x.name else False, cfg.graph.nodes)) own_addr = list(map(lambda x: x.addr, own_node)) i = 0 while (i < len(own_addr)): # new_node = cfg.get_any_node(own_addr[i]) new_node = own_node[i] if new_node is not None: if new_node._name: if new_node._name in ["exit", "printf_parse", "quote"] \ or "quote" in new_node._name or "printf" in new_node._name: # if new_node._name in ["version_etc", "exit", "version_etc_va", "printf_parse", "quote"] \ # or "quote" in new_node._name or "printf" in new_node._name: if new_node._name != "quote_name": i += 1 continue for succ_block in new_node.successors: # if succ_block.addr not in own_addr and succ_block.addr > main_obj.rebased_addr and succ_block.addr < 0x700000: # if succ_block.addr not in own_addr: if succ_block not in own_node: own_addr.append(succ_block.addr) # print(succ_block) own_node.append(succ_block) else: # print(succ_block.name) pass i += 1 lib_addr = set( map(lambda x: x.addr, filter(lambda x: x not in own_node, cfg.graph.nodes))) # own_addr = set(map(lambda x:x.addr, cfg.graph.nodes)) src_addr = set(own_addr) print(p.filename, len(src_addr)) else: logger.error('cfg recover failed') src_addr.add(None) except: traceback.print_exc() pass try: query_list_num = 100 recorder.engine.exe_time = 0 recorder.successor.suc_time = 0 recorder.backend_z3.query_record.sol_time = 0 recorder.backend_z3.query_record.list_num = query_list_num except: pass try: sol_time_dir = cf.get("Path", "time") recorder.backend_z3.query_record.time_output_addr = os.path.join( sol_time_dir, "solver_time.log") except: pass return cfg, executed_addr, src_addr, lib_addr
def dfs(E, source): return nx.dfs_tree(E, source) # print(nx.all_pairs_dijkstra_path(G,a,b)) # print(nx.all_shortest_paths(G,a,b)) print(list(nx.dfs_edges(G, a))) print(list(nx.dfs_tree(G, a)))
def part1(G, node): H = nx.dfs_tree(G, node) #print(H.nodes()) #print(H.edges()) return len(H)
def random_ordered_tree(n, seed=None): tree = nx.dfs_tree(nx.random_tree(n, seed=seed)) otree = nx.OrderedDiGraph() otree.add_edges_from(tree.edges) return otree
def test_dls_tree(self): T = nx.dfs_tree(self.G, source=3, depth_limit=1) assert sorted(T.edges()) == [(3, 2), (3, 4)]
def weighted_bridge_augmentation(G, avail, weight=None): """Finds an approximate min-weight 2-edge-augmentation of G. This is an implementation of the approximation algorithm detailed in [1]_. It chooses a set of edges from avail to add to G that renders it 2-edge-connected if such a subset exists. This is done by finding a minimum spanning arborescence of a specially constructed metagraph. Parameters ---------- G : NetworkX graph An undirected graph. avail : set of 2 or 3 tuples. candidate edges (with optional weights) to choose from weight : string key to use to find weights if avail is a set of 3-tuples where the third item in each tuple is a dictionary. Yields ------ edge : tuple Edges in the subset of avail chosen to bridge augment G. Notes ----- Finding a weighted 2-edge-augmentation is NP-hard. Any edge not in ``avail`` is considered to have a weight of infinity. The approximation factor is 2 if ``G`` is connected and 3 if it is not. Runs in :math:`O(m + n log(n))` time References ---------- .. [1] Khuller, Samir, and Ramakrishna Thurimella. (1993) Approximation algorithms for graph augmentation. http://www.sciencedirect.com/science/article/pii/S0196677483710102 See Also -------- :func:`bridge_augmentation` :func:`k_edge_augmentation` Examples -------- >>> G = nx.path_graph((1, 2, 3, 4)) >>> # When the weights are equal, (1, 4) is the best >>> avail = [(1, 4, 1), (1, 3, 1), (2, 4, 1)] >>> sorted(weighted_bridge_augmentation(G, avail)) [(1, 4)] >>> # Giving (1, 4) a high weight makes the two edge solution the best. >>> avail = [(1, 4, 1000), (1, 3, 1), (2, 4, 1)] >>> sorted(weighted_bridge_augmentation(G, avail)) [(1, 3), (2, 4)] >>> # ------ >>> G = nx.path_graph((1, 2, 3, 4)) >>> G.add_node(5) >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 1)] >>> sorted(weighted_bridge_augmentation(G, avail=avail)) [(1, 5), (4, 5)] >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] >>> sorted(weighted_bridge_augmentation(G, avail=avail)) [(1, 5), (2, 5), (4, 5)] """ if weight is None: weight = "weight" # If input G is not connected the approximation factor increases to 3 if not nx.is_connected(G): H = G.copy() connectors = list(one_edge_augmentation(H, avail=avail, weight=weight)) H.add_edges_from(connectors) yield from connectors else: connectors = [] H = G if len(avail) == 0: if nx.has_bridges(H): raise nx.NetworkXUnfeasible("no augmentation possible") avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=H) # Collapse input into a metagraph. Meta nodes are bridge-ccs bridge_ccs = nx.connectivity.bridge_components(H) C = collapse(H, bridge_ccs) # Use the meta graph to shrink avail to a small feasible subset mapping = C.graph["mapping"] # Choose the minimum weight feasible edge in each group meta_to_wuv = { (mu, mv): (w, uv) for (mu, mv), uv, w in _lightest_meta_edges(mapping, avail_uv, avail_w) } # Mapping of terms from (Khuller and Thurimella): # C : G_0 = (V, E^0) # This is the metagraph where each node is a 2-edge-cc in G. # The edges in C represent bridges in the original graph. # (mu, mv) : E - E^0 # they group both avail and given edges in E # T : \Gamma # D : G^D = (V, E_D) # The paper uses ancestor because children point to parents, which is # contrary to networkx standards. So, we actually need to run # nx.least_common_ancestor on the reversed Tree. # Pick an arbitrary leaf from C as the root try: root = next(n for n, d in C.degree() if d == 1) except StopIteration: # no nodes found with degree == 1 return # Root C into a tree TR by directing all edges away from the root # Note in their paper T directs edges towards the root TR = nx.dfs_tree(C, root) # Add to D the directed edges of T and set their weight to zero # This indicates that it costs nothing to use edges that were given. D = nx.reverse(TR).copy() nx.set_edge_attributes(D, name="weight", values=0) # The LCA of mu and mv in T is the shared ancestor of mu and mv that is # located farthest from the root. lca_gen = nx.tree_all_pairs_lowest_common_ancestor( TR, root=root, pairs=meta_to_wuv.keys()) for (mu, mv), lca in lca_gen: w, uv = meta_to_wuv[(mu, mv)] if lca == mu: # If u is an ancestor of v in TR, then add edge u->v to D D.add_edge(lca, mv, weight=w, generator=uv) elif lca == mv: # If v is an ancestor of u in TR, then add edge v->u to D D.add_edge(lca, mu, weight=w, generator=uv) else: # If neither u nor v is a ancestor of the other in TR # let t = lca(TR, u, v) and add edges t->u and t->v # Track the original edge that GENERATED these edges. D.add_edge(lca, mu, weight=w, generator=uv) D.add_edge(lca, mv, weight=w, generator=uv) # Then compute a minimum rooted branching try: # Note the original edges must be directed towards to root for the # branching to give us a bridge-augmentation. A = _minimum_rooted_branching(D, root) except nx.NetworkXException as e: # If there is no branching then augmentation is not possible raise nx.NetworkXUnfeasible("no 2-edge-augmentation possible") from e # For each edge e, in the branching that did not belong to the directed # tree T, add the corresponding edge that **GENERATED** it (this is not # necesarilly e itself!) # ensure the third case does not generate edges twice bridge_connectors = set() for mu, mv in A.edges(): data = D.get_edge_data(mu, mv) if "generator" in data: # Add the avail edge that generated the branching edge. edge = data["generator"] bridge_connectors.add(edge) yield from bridge_connectors
import networkx as nx import matplotlib.pyplot as plt with open('graph.txt') as f: lines = f.readlines() edgeList = [line.strip().split() for line in lines] g = nx.Graph() g.add_edges_from(edgeList) pos = nx.planar_layout(g) nx.draw(g, pos, with_labels=True, node_color="#f86e00") dfs = nx.dfs_tree(g, source="go") nx.draw(dfs, pos, with_labels=True, node_color="#f86e00", edge_color="#dd2222") plt.show()
def allpairslca(G, pairs=None): """ Compute the lowest common ancestor for pairs of nodes. """ if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") elif len(G) == 0: raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") elif None in G: raise nx.NetworkXError("None is not a valid node.") if (not isinstance(pairs, (Mapping, Set)) and pairs is not None): pairs = set(pairs) sources = [n for n, deg in G.in_degree if deg == 0] if len(sources) == 1: root = sources[0] super_root = None else: G = G.copy() super_root = root = generate_unique_node() for source in sources: G.add_edge(root, source) spanning_tree = nx.dfs_tree(G, root) dag = nx.DiGraph((u, v) for u, v in G.edges if u not in spanning_tree or v not in spanning_tree[u]) spanning_tree.add_nodes_from(G) dag.add_nodes_from(G) counter = count() root_distance = {root: next(counter)} for edge in nx.bfs_edges(spanning_tree, root): for node in edge: if node not in root_distance: root_distance[node] = next(counter) euler_tour_pos = {} for node in nx.depth_first_search.dfs_preorder_nodes(G, root): if node not in euler_tour_pos: euler_tour_pos[node] = next(counter) pairset = set() if pairs is not None: pairset = set(chain.from_iterable(pairs)) for n in pairset: if n not in G: msg = "The node %s is not in the digraph." % str(n) raise nx.NodeNotFound(msg) ancestors = {} for v in dag: if pairs is None or v in pairset: my_ancestors = nx.dag.ancestors(dag, v) my_ancestors.add(v) ancestors[v] = sorted(my_ancestors, key=euler_tour_pos.get) def computedag(tree_lca, dry_run): """ Iterate through the in-order merge for each pair of interest. We do this to answer the user's query, but it is also used to avoid generating unnecessary tree entries when the user only needs some pairs. """ for (node1, node2) in pairs if pairs is not None else tree_lca: best_root_distance = None best = None indices = [0, 0] ancestors_by_index = [ancestors[node1], ancestors[node2]] def getnext(indices): """ Returns index of the list containing the next item Next order refers to the merged order. Index can be 0 or 1 (or None if exhausted). """ index1, index2 = indices if (index1 >= len(ancestors[node1]) and index2 >= len(ancestors[node2])): return None elif index1 >= len(ancestors[node1]): return 1 elif index2 >= len(ancestors[node2]): return 0 elif (euler_tour_pos[ancestors[node1][index1]] < euler_tour_pos[ancestors[node2][index2]]): return 0 else: return 1 i = get_next_in_merged_lists(indices) cur = ancestors_by_index[i][indices[i]], i while i is not None: prev = cur indices[i] += 1 i = get_next_in_merged_lists(indices) if i is not None: cur = ancestors_by_index[i][indices[i]], i # Two adjacent entries must not be from the same list # in order for their tree LCA to be considered. if cur[1] != prev[1]: tree_node1, tree_node2 = prev[0], cur[0] if (tree_node1, tree_node2) in tree_lca: ans = tree_lca[tree_node1, tree_node2] else: ans = tree_lca[tree_node2, tree_node1] if not dry_run and (best is None or root_distance[ans] > best_root_distance): best_root_distance = root_distance[ans] best = ans # If the LCA is super_root, there is no LCA in the user's graph. if not dry_run and (super_root is None or best != super_root): yield (node1, node2), best if pairs is None: # We want all pairs so we'll need the entire tree. tree_lca = dict(tarjan(spanning_tree, root)) else: # We only need the merged adjacent pairs by seeing which queries the # algorithm needs then generating them in a single pass. tree_lca = defaultdict(int) for _ in computedag(tree_lca, True): pass # Replace the bogus default tree values with the real ones. for (pair, lca) in tarjan(spanning_tree, root, tree_lca): tree_lca[pair] = lca return computedag(tree_lca, False)
def all_pairs_lowest_common_ancestor(G, pairs=None): """Compute the lowest common ancestor for pairs of nodes. Parameters ---------- G : NetworkX directed graph pairs : iterable of pairs of nodes, optional (default: all pairs) The pairs of nodes of interest. If None, will find the LCA of all pairs of nodes. Returns ------- An iterator over ((node1, node2), lca) where (node1, node2) are the pairs specified and lca is a lowest common ancestor of the pair. Note that for the default of all pairs in G, we consider unordered pairs, e.g. you will not get both (b, a) and (a, b). Notes ----- Only defined on non-null directed acyclic graphs. Uses the $O(n^3)$ ancestor-list algorithm from: M. A. Bender, M. Farach-Colton, G. Pemmasani, S. Skiena, P. Sumazin. "Lowest common ancestors in trees and directed acyclic graphs." Journal of Algorithms, 57(2): 75-94, 2005. See Also -------- tree_all_pairs_lowest_common_ancestor lowest_common_ancestor """ if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") elif len(G) == 0: raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") elif None in G: raise nx.NetworkXError("None is not a valid node.") # The copy isn't ideal, neither is the switch-on-type, but without it users # passing an iterable will encounter confusing errors, and itertools.tee # does not appear to handle builtin types efficiently (IE, it materializes # another buffer rather than just creating listoperators at the same # offset). The Python documentation notes use of tee is unadvised when one # is consumed before the other. # # This will always produce correct results and avoid unnecessary # copies in many common cases. # if (not isinstance(pairs, (Mapping, Set)) and pairs is not None): pairs = set(pairs) # Convert G into a dag with a single root by adding a node with edges to # all sources iff necessary. sources = [n for n, deg in G.in_degree if deg == 0] if len(sources) == 1: root = sources[0] super_root = None else: G = G.copy() super_root = root = generate_unique_node() for source in sources: G.add_edge(root, source) # Start by computing a spanning tree, and the DAG of all edges not in it. # We will then use the tree lca algorithm on the spanning tree, and use # the DAG to figure out the set of tree queries necessary. spanning_tree = nx.dfs_tree(G, root) dag = nx.DiGraph((u, v) for u, v in G.edges if u not in spanning_tree or v not in spanning_tree[u]) # Ensure that both the dag and the spanning tree contains all nodes in G, # even nodes that are disconnected in the dag. spanning_tree.add_nodes_from(G) dag.add_nodes_from(G) counter = count() # Necessary to handle graphs consisting of a single node and no edges. root_distance = {root: next(counter)} for edge in nx.bfs_edges(spanning_tree, root): for node in edge: if node not in root_distance: root_distance[node] = next(counter) # Index the position of all nodes in the Euler tour so we can efficiently # sort lists and merge in tour order. euler_tour_pos = {} for node in nx.depth_first_search.dfs_preorder_nodes(G, root): if node not in euler_tour_pos: euler_tour_pos[node] = next(counter) # Generate the set of all nodes of interest in the pairs. pairset = set() if pairs is not None: pairset = set(chain.from_iterable(pairs)) for n in pairset: if n not in G: msg = "The node %s is not in the digraph." % str(n) raise nx.NodeNotFound(msg) # Generate the transitive closure over the dag (not G) of all nodes, and # sort each node's closure set by order of first appearance in the Euler # tour. ancestors = {} for v in dag: if pairs is None or v in pairset: my_ancestors = nx.dag.ancestors(dag, v) my_ancestors.add(v) ancestors[v] = sorted(my_ancestors, key=euler_tour_pos.get) def _compute_dag_lca_from_tree_values(tree_lca, dry_run): """Iterate through the in-order merge for each pair of interest. We do this to answer the user's query, but it is also used to avoid generating unnecessary tree entries when the user only needs some pairs. """ for (node1, node2) in pairs if pairs is not None else tree_lca: best_root_distance = None best = None indices = [0, 0] ancestors_by_index = [ancestors[node1], ancestors[node2]] def get_next_in_merged_lists(indices): """Returns index of the list containing the next item Next order refers to the merged order. Index can be 0 or 1 (or None if exhausted). """ index1, index2 = indices if (index1 >= len(ancestors[node1]) and index2 >= len(ancestors[node2])): return None elif index1 >= len(ancestors[node1]): return 1 elif index2 >= len(ancestors[node2]): return 0 elif (euler_tour_pos[ancestors[node1][index1]] < euler_tour_pos[ancestors[node2][index2]]): return 0 else: return 1 # Find the LCA by iterating through the in-order merge of the two # nodes of interests' ancestor sets. In principle, we need to # consider all pairs in the Cartesian product of the ancestor sets, # but by the restricted min range query reduction we are guaranteed # that one of the pairs of interest is adjacent in the merged list # iff one came from each list. i = get_next_in_merged_lists(indices) cur = ancestors_by_index[i][indices[i]], i while i is not None: prev = cur indices[i] += 1 i = get_next_in_merged_lists(indices) if i is not None: cur = ancestors_by_index[i][indices[i]], i # Two adjacent entries must not be from the same list # in order for their tree LCA to be considered. if cur[1] != prev[1]: tree_node1, tree_node2 = prev[0], cur[0] if (tree_node1, tree_node2) in tree_lca: ans = tree_lca[tree_node1, tree_node2] else: ans = tree_lca[tree_node2, tree_node1] if not dry_run and (best is None or root_distance[ans] > best_root_distance): best_root_distance = root_distance[ans] best = ans # If the LCA is super_root, there is no LCA in the user's graph. if not dry_run and (super_root is None or best != super_root): yield (node1, node2), best # Generate the spanning tree lca for all pairs. This doesn't make sense to # do incrementally since we are using a linear time offline algorithm for # tree lca. if pairs is None: # We want all pairs so we'll need the entire tree. tree_lca = dict(tree_all_pairs_lowest_common_ancestor(spanning_tree, root)) else: # We only need the merged adjacent pairs by seeing which queries the # algorithm needs then generating them in a single pass. tree_lca = defaultdict(int) for _ in _compute_dag_lca_from_tree_values(tree_lca, True): pass # Replace the bogus default tree values with the real ones. for (pair, lca) in tree_all_pairs_lowest_common_ancestor(spanning_tree, root, tree_lca): tree_lca[pair] = lca # All precomputations complete. Now we just need to give the user the pairs # they asked for, or all pairs if they want them all. return _compute_dag_lca_from_tree_values(tree_lca, False)
def locations_builder(): template_name = 'frontend/location_builder.html' page_title = _('Administrative Divisions') if request.method == 'POST' and request.form.get('divisions_graph'): nx_graph = nx.DiGraph() divisions_graph = json.loads(request.form.get('divisions_graph')) nodes = filter( lambda cell: cell.get('type') == 'basic.Rect', divisions_graph.get('cells')) links = filter( lambda cell: cell.get('type') == 'link', divisions_graph.get('cells')) valid_node_ids = map( lambda cell: cell.get('id'), filter( lambda cell: helpers.is_objectid(cell.get('id')), nodes)) # 1. Delete non-referenced location types services.location_types.find(id__nin=valid_node_ids).delete() # 2. Create location types and update ids for i, node in enumerate(nodes): try: lt = services.location_types.find().get(id=node.get('id')) lt.is_administrative = node.get('is_administrative') lt.is_political = node.get('is_political') lt.name = node.get('label') lt.ancestors_ref = [] lt.save() except (models.LocationType.DoesNotExist, ValidationError): lt = services.location_types.create( name=node.get('label'), is_administrative=node.get('is_administrative'), is_political=node.get('is_political') ) # update graph node and link ids for j, link in enumerate(links): if link['source'].get('id', None) == node.get('id'): links[j]['source']['id'] = str(lt.id) if link['target'].get('id', None) == node.get('id'): links[j]['target']['id'] = str(lt.id) nodes[i]['id'] = str(lt.id) # 3. Build graph for node in nodes: nx_graph.add_node(node.get('id')) for link in links: if link['source'].get('id') and link['target'].get('id'): nx_graph.add_edge( link['source'].get('id'), link['target'].get('id')) # 4. Update ancestor relationships for link in links: if link['source'].get('id') and link['target'].get('id'): ancestors = nx.topological_sort( nx_graph.reverse(), nx_graph.reverse().subgraph( nx.dfs_tree( nx_graph.reverse(), link['target'].get('id')).nodes()).nodes()) services.location_types.find().get( id=link['target'].get('id')).update( add_to_set__ancestors_ref=filter( lambda ancestor: ancestor != link['target'].get('id'), ancestors)) # 5. Update ancestor count for location_type in services.location_types.find(): location_type.save() divisions_graph['cells'] = nodes + links g.deployment.administrative_divisions_graph = json.dumps( divisions_graph) g.deployment.save() g.deployment.reload() flash( _('Your changes have been saved.'), category='locations_builder' ) return render_template(template_name, page_title=page_title)
def split_into_branchless_segments(undirected_graph): """ Break the (possibly disjoint) nx.Graph into 'segments'. Here, a 'segment' is a string of connected nodes that don't include branch points of the original graph, except for the first/last node of the segment. Note: The graph must not contain cycles. For example: a - b - c - d \ e - f - g h - i - j The above graph would be split into four segments: ['a', 'b', 'c'], ['c', 'd'], ['c', 'e', 'f', 'g'] ['h', i', 'j'] """ # Choose an arbitrary tip to use as the tree root degrees = list(undirected_graph.degree_iter()) tip_degrees = [ node_degree for node_degree in degrees if node_degree[1] == 1 ] tip_nodes = [node_degree1[0] for node_degree1 in tip_degrees] tips_already_processed = set() segments = [] for tip_node in tip_nodes: if tip_node in tips_already_processed: continue tips_already_processed.add(tip_node) # Construct a tree (DiGraph) from the tip tree = nx.dfs_tree(undirected_graph, tip_node) def grow_segments(current_node, current_segments): """ Helper function. Add the current node to the current segment (the last item in 'current_segments'), and if the current node is a branch point, start two new segments. Continue recursively until the leaf nodes are found. """ # Append the current node to the current segment current_segments[-1].append(current_node) children = tree.neighbors(current_node) if len(children) == 0: tips_already_processed.add(current_node) elif len(children) == 1: grow_segments(children[0], current_segments) else: # We've found a branch point; the current segment is complete. # Start a new segment for each child branch. for child in children: current_segments.append([current_node]) grow_segments(child, current_segments) segments.append([]) grow_segments(tip_node, segments) return segments
def _get_graph_data(self, G): node_degree_list = [(n, d) for n, d in G.degree()] nodes = G.nodes(data=True) adj_0 = np.array(nx.to_numpy_matrix(G)) node_label_0 = np.array([nodes[x[0]]['label'] for x in nodes]) ### Degree descent ranking # N.B.: largest-degree node may not be unique degree_sequence = sorted( node_degree_list, key=lambda tt: tt[1], reverse=True) adj_1 = np.array( nx.to_numpy_matrix(G, nodelist=[dd[0] for dd in degree_sequence])) node_label_1 = np.array([nodes[dd[0]]['label'] for dd in degree_sequence]) ### Degree ascent ranking degree_sequence = sorted(node_degree_list, key=lambda tt: tt[1]) adj_2 = np.array( nx.to_numpy_matrix(G, nodelist=[dd[0] for dd in degree_sequence])) node_label_2 = np.array([nodes[dd[0]]['label'] for dd in degree_sequence]) ### BFS & DFS from largest-degree node CGs = [G.subgraph(c) for c in nx.connected_components(G)] # rank connected componets from large to small size CGs = sorted(CGs, key=lambda x: x.number_of_nodes(), reverse=True) node_list_bfs = [] node_list_dfs = [] for ii in range(len(CGs)): node_degree_list = [(n, d) for n, d in CGs[ii].degree()] degree_sequence = sorted( node_degree_list, key=lambda tt: tt[1], reverse=True) bfs_tree = nx.bfs_tree(CGs[ii], source=degree_sequence[0][0]) dfs_tree = nx.dfs_tree(CGs[ii], source=degree_sequence[0][0]) node_list_bfs += list(bfs_tree.nodes()) node_list_dfs += list(dfs_tree.nodes()) adj_3 = np.array(nx.to_numpy_matrix(G, nodelist=node_list_bfs)) node_label_3 = np.array([nodes[x]['label'] for x in node_list_bfs]) adj_4 = np.array(nx.to_numpy_matrix(G, nodelist=node_list_dfs)) node_label_4 = np.array([nodes[x]['label'] for x in node_list_dfs]) ### k-core num_core = nx.core_number(G) core_order_list = sorted(list(set(num_core.values())), reverse=True) degree_dict = dict(G.degree()) core_to_node = defaultdict(list) for nn, kk in num_core.items(): core_to_node[kk] += [nn] node_list = [] for kk in core_order_list: sort_node_tuple = sorted( [(nn, degree_dict[nn]) for nn in core_to_node[kk]], key=lambda tt: tt[1], reverse=True) node_list += [nn for nn, dd in sort_node_tuple] adj_5 = np.array(nx.to_numpy_matrix(G, nodelist=node_list)) node_label_5 = np.array([nodes[x]['label'] for x in node_list]) if self.num_canonical_order == 5: adj_list = [adj_0, adj_1, adj_3, adj_4, adj_5] node_label_list = [node_label_0, node_label_1, node_label_3, node_label_4, node_label_5] else: if self.node_order == 'degree_decent': adj_list = [adj_1] node_label_list = [node_label_1] elif self.node_order == 'degree_accent': adj_list = [adj_2] node_label_list = [node_label_2] elif self.node_order == 'BFS': adj_list = [adj_3] node_label_list = [node_label_3] elif self.node_order == 'DFS': adj_list = [adj_4] node_label_list = [node_label_4] elif self.node_order == 'k_core': adj_list = [adj_5] node_label_list = [node_label_5] elif self.node_order == 'DFS+BFS': adj_list = [adj_4, adj_3] node_label_list = [node_label_4, node_label_3] elif self.node_order == 'DFS+BFS+k_core': adj_list = [adj_4, adj_3, adj_5] node_label_list = [node_label_4, node_label_3, node_label_5] elif self.node_order == 'DFS+BFS+k_core+degree_decent': adj_list = [adj_4, adj_3, adj_5, adj_1] node_label_list = [node_label_4, node_label_3, node_label_5, node_label_1] elif self.node_order == 'all': adj_list = [adj_4, adj_3, adj_5, adj_1, adj_0] node_label_list = [node_label_4, node_label_3, node_label_5, node_label_1, node_label_0] else: adj_list = [adj_0] node_label_list = [node_label_0] # print('number of nodes = {}'.format(adj_0.shape[0])) return adj_list, node_label_list, G.graph['label'] - 1
def process(self): p_idx = 0 for raw_path in self.raw_paths: # Read data from `raw_path`. if raw_path.endswith(".json"): with open(raw_path, "r") as fin: replay_json = json.load(fin) else: with gzip.GzipFile(raw_path, "r") as fin: replay_json = json.load(fin) if replay_json["rawReward"] <= 0 or not replay_json["utterance"]: continue try: fields = fields_factory(self.task_name, replay_json["utterance"]) except ValueError as e: print(e) continue fields_e = encode_fields(fields) fields_matrix = from_networkx(fields_e) frames = [] states = replay_json["states"] current_string = "" last_dom_nx = None for j, state in enumerate(states): dom_json = state["dom"] dom_nx = miniwob_to_graph(dom_json) assert len(dom_nx) action_str = "wait" action = state["action"] current_node = None field_id = None # skip unrecognized actions if action and action["type"] not in ("click", "keypress"): continue if ( action and action["timing"] != 1 ): # timing indicates that the same action has multiple phases if action["type"] == "click": action_str = "click" if action["type"] == "keypress": code = action["keyCode"] or action["charCode"] char = chr(code) current_string += char # look at next event if (len(states) > j + 2 and states[j + 1]["action"] == "keypress"): # goto next action continue action_str = "paste_field" current_string = apply_backspaces(current_string) # identify field for i, (key, value) in enumerate(fields._d.items()): if value == current_string: field_id = i break current_string = "" elif current_string: # TODO identify field print("Warning: uncaught transition from keypress") current_string = "" # TODO offset by scroll if "x" in action and last_dom_nx: # find current node x, y = action["x"], action["y"] # returns a path for depth first search, reverse order to get leaves first t = list(nx.dfs_tree(dom_nx)) t.reverse() for node_id in t: node = dom_nx.nodes[node_id] # print("Node", node_id, node) if not node or node_id not in last_dom_nx: continue if (x > node["top"] and y > node["left"] and x < node["top"] + node["height"] and y < node["left"] + node["width"]): current_node = node break if current_node is not None and field_id is None: t = current_node.get("text") for i, (key, value) in enumerate(fields._d.items()): if value == t: field_id = i break data = encode_dom_graph(dom_nx, current_node) if self.pre_filter is not None and not self.pre_filter(data): continue if self.pre_transform is not None: data = self.pre_transform(data) frame = { "dom": from_networkx(data), "fields": fields_matrix, "action": action_str, "field_id": field_id, "node_id": list(last_dom_nx).index(current_node["ref"]) if current_node and last_dom_nx else None, "time": state["time"], "reward": 0.0, } frames.append(frame) last_dom_nx = dom_nx if len(frames) > 1: frames[-1]["reward"] = replay_json["rawReward"] torch.save( frames, osp.join(self.processed_dir, "data_{}.pt".format(p_idx))) p_idx += 1
#Graf.add_weighted_edges_from([(i,j,matrizadd[i,j])]) Graf.add_edges_from([(i,j)]) cont=0 lista_tiempos=[] lista_tiempos_completos={} tiempo_de_paro=0 tiempo_inicial=0 tiempo_final =0 tiempo_ejecucion=0 for r in range(30): lista_tiempos_completos[r+1]=[] tiempo_de_paro=0 while tiempo_de_paro<1: tiempo_inicial = time() T=nx.dfs_tree(Graf, source=0, depth_limit=None) tiempo_final = time() tiempo_ejecucion = tiempo_final - tiempo_inicial if tiempo_ejecucion>0.0: lista_tiempos_completos[r+1].append((tiempo_ejecucion*10000)) tiempo_de_paro+=tiempo_ejecucion #diccionario_inst[rog]=[] diccionario_inst_dfs_tree[rog]=[] for i in lista_tiempos_completos.keys(): #desviacion=statistics.stdev(lista_tiempos_completos[i]) media=np.mean(lista_tiempos_completos[i])
def test_dfs_tree(self): T = nx.dfs_tree(self.G, source=0) assert_equal(sorted(T.nodes()), sorted(self.G.nodes())) assert_equal(sorted(T.edges()), [(0, 1), (1, 2), (2, 4), (4, 3)])
def lukes_partitioning(G, max_size: int, node_weight=None, edge_weight=None) -> list: """Optimal partitioning of a weighted tree using the Lukes algorithm. This algorithm partitions a connected, acyclic graph featuring integer node weights and float edge weights. The resulting clusters are such that the total weight of the nodes in each cluster does not exceed max_size and that the weight of the edges that are cut by the partition is minimum. The algorithm is based on LUKES[1]. Parameters ---------- G : graph max_size : int Maximum weight a partition can have in terms of sum of node_weight for all nodes in the partition edge_weight : key Edge data key to use as weight. If None, the weights are all set to one. node_weight : key Node data key to use as weight. If None, the weights are all set to one. The data must be int. Returns ------- partition : list A list of sets of nodes representing the clusters of the partition. Raises ------- NotATree If G is not a tree. TypeError If any of the values of node_weight is not int. References ---------- .. Lukes, J. A. (1974). "Efficient Algorithm for the Partitioning of Trees." IBM Journal of Research and Development, 18(3), 217–224. """ # First sanity check and tree preparation if not nx.is_tree(G): raise nx.NotATree('lukes_partitioning works only on trees') else: if nx.is_directed(G): root = [n for n, d in G.in_degree() if d == 0] assert len(root) == 1 root = root[0] t_G = deepcopy(G) else: root = choice(list(G.nodes)) # this has the desirable side effect of not inheriting attributes t_G = nx.dfs_tree(G, root) # Since we do not want to screw up the original graph, # if we have a blank attribute, we make a deepcopy if edge_weight is None or node_weight is None: safe_G = deepcopy(G) if edge_weight is None: nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W) edge_weight = D_EDGE_W if node_weight is None: nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W) node_weight = D_NODE_W else: safe_G = G # Second sanity check # The values of node_weight MUST BE int. # I cannot see any room for duck typing without incurring serious # danger of subtle bugs. all_n_attr = nx.get_node_attributes(safe_G, node_weight).values() for x in all_n_attr: if not isinstance(x, int): raise TypeError('lukes_partitioning needs integer ' 'values for node_weight ({})'.format(node_weight)) # SUBROUTINES ----------------------- # these functions are defined here for two reasons: # - brevity: we can leverage global "safe_G" # - caching: signatures are hashable @not_implemented_for('undirected') # this is intended to be called only on t_G def _leaves(gr): for x in gr.nodes: if not nx.descendants(gr, x): yield x @not_implemented_for('undirected') def _a_parent_of_leaves_only(gr): tleaves = set(_leaves(gr)) for n in set(gr.nodes) - tleaves: if all([x in tleaves for x in nx.descendants(gr, n)]): return n @lru_cache(CLUSTER_EVAL_CACHE_SIZE) def _value_of_cluster(cluster: frozenset): valid_edges = [ e for e in safe_G.edges if e[0] in cluster and e[1] in cluster ] return sum([safe_G.edges[e][edge_weight] for e in valid_edges]) def _value_of_partition(partition: list): return sum([_value_of_cluster(frozenset(c)) for c in partition]) @lru_cache(CLUSTER_EVAL_CACHE_SIZE) def _weight_of_cluster(cluster: frozenset): return sum([safe_G.nodes[n][node_weight] for n in cluster]) def _pivot(partition: list, node): ccx = [c for c in partition if node in c] assert len(ccx) == 1 return ccx[0] def _concatenate_or_merge(partition_1: list, partition_2: list, x, i, ref_weigth): ccx = _pivot(partition_1, x) cci = _pivot(partition_2, i) merged_xi = ccx.union(cci) # We first check if we can do the merge. # If so, we do the actual calculations, otherwise we concatenate if _weight_of_cluster(frozenset(merged_xi)) <= ref_weigth: cp1 = list(filter(lambda x: x != ccx, partition_1)) cp2 = list(filter(lambda x: x != cci, partition_2)) option_2 = [merged_xi] + cp1 + cp2 return option_2, _value_of_partition(option_2) else: option_1 = partition_1 + partition_2 return option_1, _value_of_partition(option_1) # INITIALIZATION ----------------------- leaves = set(_leaves(t_G)) for lv in leaves: t_G.nodes[lv][PKEY] = dict() slot = safe_G.nodes[lv][node_weight] t_G.nodes[lv][PKEY][slot] = [{lv}] t_G.nodes[lv][PKEY][0] = [{lv}] for inner in [x for x in t_G.nodes if x not in leaves]: t_G.nodes[inner][PKEY] = dict() slot = safe_G.nodes[inner][node_weight] t_G.nodes[inner][PKEY][slot] = [{inner}] # CORE ALGORITHM ----------------------- while True: x_node = _a_parent_of_leaves_only(t_G) weight_of_x = safe_G.nodes[x_node][node_weight] best_value = 0 best_partition = None bp_buffer = dict() x_descendants = nx.descendants(t_G, x_node) for i_node in x_descendants: for j in range(weight_of_x, max_size + 1): for a, b in _split_n_from(j, weight_of_x): if a not in t_G.nodes[x_node][PKEY].keys() \ or b not in t_G.nodes[i_node][PKEY].keys(): # it's not possible to form this particular weight sum continue part1 = t_G.nodes[x_node][PKEY][a] part2 = t_G.nodes[i_node][PKEY][b] part, value = _concatenate_or_merge( part1, part2, x_node, i_node, j) if j not in bp_buffer.keys() or bp_buffer[j][1] < value: # we annotate in the buffer the best partition for j bp_buffer[j] = part, value # we also keep track of the overall best partition if best_value <= value: best_value = value best_partition = part # as illustrated in Lukes, once we finished a child, we can # discharge the partitions we found into the graph # (the key phrase is make all x == x') # so that they are used by the subsequent children for w, (best_part_for_vl, vl) in bp_buffer.items(): t_G.nodes[x_node][PKEY][w] = best_part_for_vl bp_buffer.clear() # the absolute best partition for this node # across all weights has to be stored at 0 t_G.nodes[x_node][PKEY][0] = best_partition t_G.remove_nodes_from(x_descendants) if x_node == root: # the 0-labeled partition of root # is the optimal one for the whole tree return t_G.nodes[root][PKEY][0]
def dfs_postorder(self, root): G = nx.Graph(self.E) tree_graph = nx.dfs_tree(G, root) clique_ordering = list(nx.dfs_postorder_nodes(tree_graph, root)) return clique_ordering
def bowtie_analysis(G): # reverse all direction of the graph GT = nx.reverse(G, copy=True) # calculate SSC scc = list(nx.strongly_connected_components(G)) if len(scc) == 0: return {} SSC = max(scc, key=len) # take any node n from SSC and do a depth first search # through directed graph beginning from node n v_any = list(SSC)[0] DFS_G = set(nx.dfs_tree(G, v_any).nodes()) DFS_GT = set(nx.dfs_tree(GT, v_any).nodes()) OUT = DFS_G - SSC IN = DFS_GT - SSC V_rest = set(G.nodes()) - SSC - OUT - IN TUBES = set() INTENDRILS = set() OUTTENDRILS = set() OTHER = set() for v in V_rest: # irv => in reaches node v irv = len(IN & set(nx.dfs_tree(GT, v).nodes())) is not 0 # vro => node v reaches out vro = len(OUT & set(nx.dfs_tree(G, v).nodes())) is not 0 if irv and vro: TUBES.add(v) elif irv and not vro: INTENDRILS.add(v) elif not irv and vro: OUTTENDRILS.add(v) elif not irv and not vro: OTHER.add(v) FRINGE = set() DISCONNECTED = set() for o in OTHER: # orIT => node o reaches INTENDRILS orIT = len(INTENDRILS & set(nx.dfs_tree(G, o))) is not 0 # OTro => OUTTERNDIRLS reaches node o OTro = len(OUTTENDRILS & set(nx.dfs_tree(GT, o))) is not 0 if orIT or OTro: FRINGE.add(o) else: DISCONNECTED.add(o) TENDRILS = INTENDRILS.union(OUTTENDRILS) def component_result(name, graph_nodes): return { name: len(graph_nodes), } result_dict = dict() result_dict.update(component_result("nodes", G.nodes())) result_dict.update(component_result("ssc", SSC)) result_dict.update(component_result("in", IN)) result_dict.update(component_result("out", OUT)) result_dict.update(component_result("tubes", TUBES)) result_dict.update(component_result("tendrils", TENDRILS)) result_dict.update(component_result("fringe", FRINGE)) result_dict.update(component_result("disconnected", DISCONNECTED)) return result_dict
def sanity_check_all_connected(genome): tree = nx.dfs_tree(genome.graph, source=genome.starting_node) if len(tree) > 2: return len(tree) == len(genome.graph)
def recover_reaching_conditions(self, region, with_successors=False, jump_tables=None): def _strictly_postdominates(inv_idoms, node_a, node_b): """ Does node A strictly post-dominate node B on the graph? """ return dominates(inv_idoms, node_a, node_b) edge_conditions = {} predicate_mapping = {} # traverse the graph to recover the condition for each edge for src in region.graph.nodes(): nodes = list(region.graph[src]) if len(nodes) >= 1: for dst in nodes: edge = src, dst edge_data = region.graph.get_edge_data(*edge) edge_type = edge_data.get('type', 'transition') try: predicate = self._extract_predicate( src, dst, edge_type) except EmptyBlockNotice: # catch empty block notice - although this should not really happen predicate = claripy.true edge_conditions[edge] = predicate predicate_mapping[predicate] = dst if jump_tables: self.recover_reaching_conditions_for_jumptables( region, jump_tables, edge_conditions) if with_successors and region.graph_with_successors is not None: _g = region.graph_with_successors else: _g = region.graph end_nodes = {n for n in _g.nodes() if _g.out_degree(n) == 0} inverted_graph: networkx.DiGraph = shallow_reverse(_g) if end_nodes: if len(end_nodes) > 1: # make sure there is only one end node dummy_node = "DUMMY_NODE" for end_node in end_nodes: inverted_graph.add_edge(dummy_node, end_node) endnode = dummy_node else: endnode = next(iter(end_nodes)) # pick the end node idoms = networkx.immediate_dominators(inverted_graph, endnode) else: idoms = None reaching_conditions = {} # recover the reaching condition for each node sorted_nodes = CFGUtils.quasi_topological_sort_nodes(_g) terminating_nodes = [] for node in sorted_nodes: preds = _g.predecessors(node) reaching_condition = None out_degree = _g.out_degree(node) if out_degree == 0: terminating_nodes.append(node) if node is region.head: # the head is always reachable reaching_condition = claripy.true elif idoms is not None and _strictly_postdominates( idoms, node, region.head): # the node that post dominates the head is always reachable reaching_conditions[node] = claripy.true else: for pred in preds: edge = (pred, node) pred_condition = reaching_conditions.get( pred, claripy.true) edge_condition = edge_conditions.get(edge, claripy.true) if reaching_condition is None: reaching_condition = claripy.And( pred_condition, edge_condition) else: reaching_condition = claripy.Or( claripy.And(pred_condition, edge_condition), reaching_condition) if reaching_condition is not None: reaching_conditions[node] = self.simplify_condition( reaching_condition) # My hypothesis to be proved: in any regioned graph, there must be a node with a 0 out-degree whose reaching # conditions can be marked as True. In other words, if all 0 out-degree nodes have non-trivial reaching # conditions, we can always pick one of them and change its reaching condition to True, without changing the # semantics of the regioned graph. if terminating_nodes and all(not reaching_conditions[node].is_true() for node in terminating_nodes if node in reaching_conditions): # pick the node with the greatest in-degree terminating_nodes = sorted(terminating_nodes, key=_g.in_degree) node_with_greatest_indegree = terminating_nodes[-1] if _g.in_degree(node_with_greatest_indegree) > 1: # forcing the in-degree to be greater than 1 allows us to skip the case blocks in switch-cases # otherwise structurer will fail to structure the control flow reaching_conditions[node_with_greatest_indegree] = claripy.true l.warning( "Marking node %r as trivially reachable. Disable this optimization in condition_processor.py " "if it leads to incorrect decompilation result.", node_with_greatest_indegree) # Another hypothesis: for nodes where two paths come together *and* those that cannot be further structured into # another if-else construct (we take the short-cut by testing if the operator is an "Or" after running our # condition simplifiers previously), we are better off using their "guarding conditions" instead of their # reaching conditions for if-else. see my super long chatlog with rhelmot on 5/14/2021. guarding_conditions = {} for the_node in sorted_nodes: preds = list(_g.predecessors(the_node)) if len(preds) != 2: continue # generate a graph slice that goes from the region head to this node slice_nodes = list(networkx.dfs_tree(inverted_graph, the_node)) subgraph = networkx.subgraph(_g, slice_nodes) # figure out which paths cause the divergence from this node nodes_do_not_reach_the_node = set() for node_ in subgraph: if node_ is the_node: continue for succ in _g.successors(node_): if not networkx.has_path(_g, succ, the_node): nodes_do_not_reach_the_node.add(succ) diverging_conditions = [] for node_ in nodes_do_not_reach_the_node: preds_ = list(_g.predecessors(node_)) for pred_ in preds_: if pred_ in nodes_do_not_reach_the_node: continue # this predecessor is the diverging node! edge_ = pred_, node_ edge_condition = edge_conditions.get(edge_, None) if edge_condition is not None: diverging_conditions.append(edge_condition) if diverging_conditions: # the negation of the union of diverging conditions is the guarding condition for this node cond = claripy.Or(*map(claripy.Not, diverging_conditions)) guarding_conditions[the_node] = cond self.reaching_conditions = reaching_conditions self.guarding_conditions = guarding_conditions
def aggregate(self, t, state): acc = nx.DiGraph() ustate = state.to_undirected() def debug(g, ref=None): pos = nx.spring_layout(ref if ref else g) if ref: nx.draw(ref, pos, node_size=200, node_color="green", arrows=True) labels = nx.get_node_attributes(g, "arrivalTime") nx.draw(g, pos, labels=labels, node_size=150, node_color="red", arrows=True) plt.show() # extract all subgraphs active_transports = [ state.subgraph(c) for c in nx.connected_components(ustate) ] def get_segment(_transport, _t): # Assumptions if not all([ not (_transport.has_edge(u, v) and _transport.has_edge(v, u)) for u, v in _transport.edges ]): # This can happen if a transport uses a segment twice pass # Find segment of current timestep t current_segment = None for u, v, data in _transport.edges(data=True): src = _transport.nodes[u].get("arrivalTime") dest = _transport.nodes[v].get("arrivalTime") if src <= _t < dest: current_segment = (u, v) return current_segment # Filter only transports that fit current timeframe first_time = None active_transports = [ tp for tp in active_transports if get_segment(tp, t) is not None ] for transport in active_transports: for _, data in transport.nodes(data=True): candidates = [ data.get("plannedArrivalTime"), data.get("arrivalTime"), data.get("plannedDepartureTime"), data.get("departureTime"), ] candidate = min(candidates) if first_time is None: first_time = candidate first_time = min(candidate, first_time) encoder = self.encoder( first_time=first_time, seq_len=self.seq_len, pred_seq_len=self.pred_seq_len, max_transports=len(active_transports), net=(self.net, self.net_mapping), ) for transport in active_transports: current_segment = get_segment(transport, t) if current_segment is None: # Does not fit current timeframe continue cu, cv = current_segment # Include some ancestors and descendants desc = list(nx.nodes(nx.dfs_tree(transport, cv)))[:self.pred_seq_len] anc = list(nx.nodes(nx.dfs_tree(transport.reverse(copy=True), cu)))[:self.seq_len] assert len(set(desc + anc)) <= self.pred_seq_len + self.seq_len + 1 debug_sg = desc + anc + [cu, cv] if False: debug(state.subgraph(debug_sg), ref=transport) encoder.add(transport, center_edge=(cu, cv), anc=anc, desc=desc) return encoder.encode()
def twotrees(urlShpFile): # roadGraphd = nx.read_shp(urlShpFile) # roadGraph = roadGraphd.to_undirected() roadGraphd = nx.read_shp(urlShpFile) roadGraphud = roadGraphd.to_undirected() roadGraph = nx.minimum_spanning_tree(roadGraphud) nodeList = roadGraph.nodes(data=True) _nodeList = roadGraph.nodes(data=False) _nodeLst = [] for node in _nodeList: if len(nx.neighbors(roadGraph, node)) > 3: _nodeLst.append(node) _node1 = random.choice(_nodeLst) mstsucc = nx.bfs_successors(roadGraph, source=_node1) for i, key in enumerate(mstsucc.keys()): print i, key, mstsucc[key] dfsT = nx.dfs_tree(roadGraph, source=_node1) bfsT = nx.bfs_tree(roadGraph, source=_node1) print bfsT.nodes() print bfsT.adjacency_list() print nx.bfs_successors(bfsT, source=_node1) plt.figure(1, figsize=(12, 12)) nNode = len(nodeList) pos = [] for i in xrange(nNode): pos.append(nodeList[i][0]) pass shpLayout = dict(zip(roadGraph, pos)) print "number of nodes: " + str(nx.number_of_nodes(roadGraph)) print "number of edges: " + str(nx.number_of_edges(roadGraph)) nx.draw_networkx_edges(roadGraph, pos=shpLayout, edgelist=None, width=1, edge_color='b') nx.draw_networkx_nodes(dfsT, pos=dict(zip(dfsT, dfsT.nodes())), nodelist=[_node1], node_size=60, node_color='k', node_shape='o') nx.draw_networkx(dfsT, pos=dict(zip(dfsT, dfsT.nodes())), nodelist=dfsT.nodes(), node_size=40, node_color='r', node_shape='d', edgewidth=10, edge_color='r', with_labels=False) plt.figure(2, figsize=(12, 12)) nx.draw_networkx(bfsT, pos=dict(zip(bfsT, bfsT.nodes())), nodelist=bfsT.nodes(), node_size=40, node_color='r', node_shape='d', edgewidth=10, edge_color='r', with_labels=False) nx.draw_networkx_nodes(bfsT, pos=dict(zip(bfsT, bfsT.nodes())), nodelist=[_node1], node_size=60, node_color='k', node_shape='o') plt.show() pass
print("") bfsShortHop = input("Enter two nodes to get the shortest hop path using BFS (e.g. shirt tie) (e.g. A B): ") print("") bfsShortHop = bfsShortHop.split() try: x = bidirectional_shortest_path(G, bfsShortHop[0], bfsShortHop[1]) print("The shortest hop path from " + str(bfsShortHop[0]) + " to " + str(bfsShortHop[1])+" is ", end='') print(x) except NetworkXNoPath: print("The is no possible path from "+str(bfsShortHop[0])+" to "+str(bfsShortHop[1])) # DFS Tree print("") dfsTree = input("Enter a source node for a DFS Tree (e.g. A) (e.g. shirt): ") print("The DFS Tree is: ", end='') T = nx.dfs_tree(G, source=dfsTree, depth_limit=99) # Print as deep as possible for case tests print(list(T.edges)) print("") # DFS Topological Sort choice = input("Would you like to see a topological sort tree of the graph? (Y/n): ") if choice == 'Y' or 'y': print("") print("The Topological sort based off of this graph is:") try: print(list(nx.topological_sort(G))) except NetworkXError: print("Error: Can't do Topological sort on undirected graphs") except NetworkXUnfeasable: print("Error: Can't get topological sort of a graph w/ a cycle changed during iteration")
def main(): # 1st row: Number of agents;Number of meetings;Number of variables useAgents = True # Open file # inputFilename = 'constraint_graphs/dcop_constraint_graph' # inputFilename = 'constraint_graphs/dcop_simple' inputFilename = 'constraint_graphs/DCOP_Problem_300' input = open(inputFilename, 'r') # Read first line [nrAgents, nrMeetings, nrVars] = uv2.readLine(input) print("Number of agents:%d \nNumber of meetings:%d \nNumber of variables:%d" %(nrAgents, nrMeetings, nrVars)) # Read variables global agentsList varList, agentsList = uv2.readVariables(input, nrVars) # Read preference agentsList = uv2.readPrerefence(input, agentsList) # Create internal/node matrix per meeting agentsList = uv2.buildPrefMatrixInternal(agentsList) print('-----------Variables Graph--------------') graphVariables = {} for v in varList: graphVariables[v.varId] = ptree.getAllVarsWithSameMeeting(varList, v.meetingId, v.varId) print (graphVariables) print('-----------Agents Graph--------------') graphAgents = {} for id, attr in agentsList.items(): graphAgents[id] = ptree.getAllAgentsWithSameMeeting(agentsList, attr.meetings, id) print (graphAgents) graph = graphVariables if useAgents == True: graph = graphAgents # Add all edges to graph graph_edges = [] for k, l in graph.items(): for v in l: graph_edges.append((k,v)) graph_edges = [list(tpl) for tpl in list(set([tuple(sorted(pair)) for pair in graph_edges]))] print(graph_edges) # Create graph G = nx.Graph() # Constraint graph for e in graph_edges: G.add_edge(*e, color = 'black') layout = graphviz_layout(G, prog="dot") nx.draw(G, layout, with_labels=True, node_color='#efedf2', arrowsize=1)#, connectionstyle="arc3,rad=0.1" output = "root_"+str(root_node)+".png" plt.savefig(output, format="PNG") # Create dfs tree with speficied node TreeDfs = nx.dfs_tree(G, root_node) print("----------------") back_edges = [] for node, connected in graph.items(): e = set(TreeDfs.edges([node])) shouldBe = [] for con in connected: if (node, con) in e: continue if (con, node) in e: continue if TreeDfs.has_edge(node,con): continue if TreeDfs.has_edge(con,node): continue shouldBe.append((node, con)) back = set(shouldBe) - e back_edges.append(back) back_edges = [item for sublist in back_edges for item in sublist] back_edges = [list(tpl) for tpl in list(set([tuple(sorted(pair)) for pair in back_edges]))] print(back_edges) for e in back_edges: TreeDfs.add_edge(*e, color = 'blue', style='dashed') # create relations based on tree edges create_relations(TreeDfs, agentsList) # find leaves in order to start compute util process leaves = find_leave_nodes(TreeDfs) edges = TreeDfs.edges.data('color', default='black') colors = [] for _,_,c in edges: colors.append(c) # layout = graphviz_layout(TreeDfs, prog="dot") # nx.draw(TreeDfs, layout, edge_color=colors, with_labels=True, node_color='#efedf2', arrowsize=1)#, connectionstyle="arc3,rad=0.1" # output = "root_"+str(root_node)+".png" # plt.savefig(output, format="PNG") # print(nx.shortest_path_length(TreeDfs,root_node)) print("Leaves are:", leaves) send_util_msg(TreeDfs, leaves, msgCounter, msgSizePerCycleCounter, cycleCounter) send_value_msg() #Constraints = Number of edges + Inequality constraints EQConstraints = TreeDfs.number_of_edges() #iterate through every agent and count number of inequality constraints NEQConstraints = 0 for id, attr in agentsList.items(): i = len(attr.meetings) - 1 count = 0 while i > 0: count += i i -= 1 NEQConstraints += count # Print table results, as in paper print("Number of agents:%d \nNumber of meetings:%d \nNumber of variables:%d" %(nrAgents, nrMeetings, nrVars)) print("Total Constraints", EQConstraints+NEQConstraints) print("\tEquality constraints", EQConstraints) print("\tInequality constraints", NEQConstraints) print("Total number of messages:%d" %(len(msgCountPerIteration) * 2)) print("Max message size:%d"% (max(MESSAGES_SIZE))) print("Cycles:%d"% (len(cyclePerLevel) * 2))
def updateNodeBusinessAmountByRing(ringName): import networkx as nx import copy import json import re from django.db.models import Q, F linkList = hw_ipran_link.objects.filter(Q(ring=ringName)&Q(isDelete=False)).values_list('source','dest') # 'ValuesListQuerySet' object has no attribute 'extend',所以得转为list linkList = list(linkList) nodeTuple = reduce(lambda x,y:x+y, linkList) #[(1,2),(2,3)]-->(1,2,2,3) nodeTuple = tuple(set(nodeTuple)) #(1,2,2,3)-->{1,2,3}-->(1,2,3) rootNodeTuple = tuple(a for a in nodeTuple if re.search(r'J\d{3,4}',a)) #过滤带环节点,不加tuple是一个生成器,无法计算长度 rootLinkList = zip(*[iter(rootNodeTuple[i:]) for i in range(2)]) #(1,2,3,4)-->[(1,2),(2,3),(3,4)] (1,)-->[] ()-->[] if len(rootNodeTuple)==0 or len(rootNodeTuple)>2: #ToDo:raise error print u"{0}环带环点大于2或等于0" # return None linkList.extend(rootLinkList) G = nx.Graph() G.add_edges_from(linkList) try: CycleNode = nx.cycle_basis(G)[0] #环的所有节点 CycleNodeNum = len(CycleNode)-len(rootNodeTuple) ChainNodeNum = len(nodeTuple)-len(CycleNode) except: CycleNode = [] #无法生成环,则设环为空列表 CycleNodeNum = 0 ChainNodeNum = len(nodeTuple)-len(rootNodeTuple) ring.objects.update_or_create( name = ringName, defaults = { 'topoStruct':json.dumps(linkList, ensure_ascii=False), #这样才是显示中文 'ringNodeNum':CycleNodeNum, 'chainNodeNum':ChainNodeNum } ) for NodeName in nodeTuple: I = G.copy() #得用一个临时变量来存G,不然每次循环都会改变G(G是不会改变的),得用copy CycleNodeCopy = copy.deepcopy(CycleNode) #得复制列表,不然remove会改变主列表情况 if NodeName in rootNodeTuple: #如果节点为带环节点,则跳出本次循环 continue elif NodeName in CycleNodeCopy: #如果想要查询的节点为环上节点,则移除其它环节点(不包括支链节点) CycleNodeCopy.remove(NodeName) I.remove_nodes_from(CycleNodeCopy) else: #如果想要查询的节点不为环上节点,则计算带环节点至该节点的最短路径经过的节点,并移除。 try: ShortestNode = nx.dijkstra_path(I,rootNodeTuple[0],NodeName) except: print u'无法计算环路节点{0}至带环点的最短路径,可能带环点有误'.format(NodeName) continue ShortestNode.remove(NodeName) I.remove_nodes_from(ShortestNode) H = nx.dfs_tree(I,NodeName) #最后即可通过生成树协议获得节点及其所下带的节点 TreeNodeList = H.nodes() #最终得到所有下带节点 #TreeNodeList.remove(NodeName) #为什么需要移除本节点? #print NodeName try: NodeObj = hw_ipran_node.objects.get(name=NodeName) except: #print NodeName #根据链接关系的节点名称,无法在节点表查到相应的节点。华为的原因,得问清楚 print u'无法更新{0}环"{1}"节点的下挂业务数,因为在节点表中未查到改点'.format(ring, NodeName) BusinessAmount = 0 for TreeNodeName in TreeNodeList: TreeNodeObj = hw_ipran_node.objects.get(name=TreeNodeName) BusinessAmount += TreeNodeObj.SelfBusinessAmount NodeObj.BusinessAmount = BusinessAmount NodeObj.save()
def get_subtrees(tree, roots, mode='all', area=0, degree=0): """ Extracts the subtrees rooted at roots from tree. If a mode is given, further restricts to a sub-subtree which has some desired property. Parameters: tree: The hierarchical, marked tree we are interested in. roots: The root node ids of the subtrees we are interested in mode: 'all': extract full subtree 'area': extract subtree whose loop area is closest to area 'degree': extract subtree whose degree is closest to degree area: The area for the 'area' mode degree: The degree for the 'degree' mode Returns: subtrees: List of requested subtrees roots: List of roots of the requested subtrees """ # Obtain subtrees as subgraphs and properly set root nodes subtrees = [ nx.DiGraph(tree.subgraph(nx.dfs_tree(tree, r).nodes_iter())) for r in roots ] if mode == 'area': roots = [] for st in subtrees: # Find node with area closest to area ar, root = min([(abs(data['cycle_area'] - area), r) for r, data in st.nodes_iter(data=True)]) ar = st.node[root]['cycle_area'] roots.append(root) print( "Subtree closest to {} has area {}, degree {}, root {}".format( area, ar, st.node[root]['subtree-degree'], root)) # Recalculate subtrees subtrees = [ nx.DiGraph(tree.subgraph(nx.dfs_tree(tree, r).nodes_iter())) for r in roots ] elif mode == 'degree': roots = [] for st in subtrees: # Find node with degree closest to degree de, root = min([(abs(data['subtree-degree'] - degree), r) for r, data in st.nodes_iter(data=True)]) de = st.node[root]['subtree-degree'] roots.append(root) print( "Subtree closest to {} has degree {}, area {}, root {}".format( degree, de, st.node[root]['cycle_area'], root)) # Recalculate subtrees subtrees = [ nx.DiGraph(tree.subgraph(nx.dfs_tree(tree, r).nodes_iter())) for r in roots ] # Save subtree roots in tree attributes for s, r in zip(subtrees, roots): s.graph['root'] = r s.node[r]['order'] = 0 return subtrees, roots
def trajectory_tree(self, adata, root_node=None, tree=None, groupby="leiden", method="euclid", dimension="pca", copy=False): # if adata.X is scipy.sparsematrix, adata.X is converted to numpy array. if not isinstance(adata.X, np.ndarray): adata_tmp = adata.copy() adata_tmp.X = adata.X.toarray() else: adata_tmp = adata.copy() # add numpy array:cluster centroids matrix to adata.uns["cluster_centroid"] # index of cluster_centorid is adata.obs["leiden"].cat.categories cluster_centroid_data = self.calculate_cluster_centroid( adata_tmp, dimension=dimension, groupby=groupby) adata.uns["cluster_centroid"] = cluster_centroid_data # add networkx.classes.digraph.DiGraph of trajectory to adata.uns["tree"]["tree"] # add postorder list to adata.uns["tree"]["postorder"] # add successors dictionary to adata.uns["tree"]["successors"] # add root node str to adata.uns["tree"]["root_node"] adata.uns["capital"] = {} adata.uns["capital"]["tree"] = {} tree_dict = adata.uns["capital"]["tree"] tree_dict["annotation"] = groupby if tree is None: if root_node is not None: tree_dict["root_node"] = root_node else: root_node = self.calculate_root_node(adata_tmp, threshold=0.5, groupby=groupby) tree_dict["root_node"] = root_node print("{} is set as a root node".format(root_node)) tree = self._convert_to_tree(adata_tmp, root_node, cluster_centroid_data, method, groupby=groupby) postorder, successors = self._set_postorder_successors( tree, root_node) tree_dict["tree"] = nx.convert_matrix.to_pandas_adjacency( tree, nodelist=tree.nodes()) tree_dict["postorder"] = postorder tree_dict["successors"] = successors else: if type(tree) is nx.DiGraph: root_node_from_tree = list(nx.topological_sort(tree))[0] if root_node is None: root_node = root_node_from_tree postorder, successors = self._set_postorder_successors( tree, root_node) elif root_node is not None: if root_node_from_tree == root_node: postorder, successors = self._set_postorder_successors( tree, root_node) else: raise ValueError( "root node of tree and passed argument 'root_node' did not match." ) elif type(tree) is nx.Graph and root_node is not None: tree = nx.dfs_tree(tree, root_node) postorder, successors = self._set_postorder_successors( tree, root_node) else: raise ValueError( "Argument 'tree' must be nx.DiGraph or nx.Graph with 'root_node' argument passed" ) tree_dict["root_node"] = root_node tree_dict["tree"] = nx.convert_matrix.to_pandas_adjacency( tree, nodelist=tree.nodes()) tree_dict["postorder"] = postorder tree_dict["successors"] = successors return adata if copy else None
def _search_oncotree_diagnosis(onco_tree, c): """Add all the oncotree nodes """ nodes = [] tmpc = {'ONCOTREE_PRIMARY_DIAGNOSIS_NAME': {}} for key in c['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'].keys(): # loop through all diagnoses diagnoses = c['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][key] if not isinstance(diagnoses, list): diagnoses = [diagnoses] for txt in diagnoses: if txt.endswith("_LIQUID_") or txt.endswith("_SOLID_"): # build the nodes for liquid. node1 = oncotreenx.lookup_text(onco_tree, "Lymph") node2 = oncotreenx.lookup_text(onco_tree, "Blood") nodes1 = list(nx.dfs_tree(onco_tree, node1)) nodes2 = list(nx.dfs_tree(onco_tree, node2)) nodes = list(set(nodes1).union(set(nodes2))) # if its really solid take the inverse. if txt == "_SOLID_": all_nodes = set(list(onco_tree.nodes())) tmp_nodes = all_nodes - set(nodes) nodes = list(tmp_nodes) else: # get tree node. node = oncotreenx.lookup_text(onco_tree, txt) # get its children. if onco_tree.has_node(node): # list of nodes. nodes = list(nx.dfs_tree(onco_tree, node)) # replace it with free text. nodes_txt = [onco_tree.node[n]['text'] for n in nodes] if key == '$eq': key = '$in' tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][key] = nodes_txt elif key == '$ne': key = '$nin' tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][key] = nodes_txt elif key == '$in': if '$in' in tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME']: tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][ '$in'] += nodes_txt else: tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][ '$in'] = nodes_txt elif key == '$nin': if '$nin' in tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME']: tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][ '$nin'] += nodes_txt else: tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][ '$nin'] = nodes_txt # remove duplicates for k in tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME']: tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][k] = list( set(tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME'][k])) return tmpc['ONCOTREE_PRIMARY_DIAGNOSIS_NAME']
# print(classDict.keys(), '\n', classDict.items()) # print(len(classDict)) # # print(dataFilePath) G = nx.read_edgelist(dataFilePath, create_using=nx.MultiDiGraph()) # print(edges.number_of_edges()) print("Nnodes: ", G.number_of_nodes(), "\n") print("Nedges: ", G.number_of_edges(), "\n") deg3 = sorted(G.degree_iter(), key=itemgetter(1), reverse=True) dd = deg3[0] tree = nx.dfs_tree(G, dd[0]) def hierarchy_pos(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5, pos = None, parent = None): '''If there is a cycle that is reachable from root, then this will see infinite recursion. G: the graph root: the root node of current branch width: horizontal space allocated for this branch - avoids overlap with other branches vert_gap: gap between levels of hierarchy vert_loc: vertical location of root xcenter: horizontal location of root pos: a dict saying where all nodes go if they have been assigned parent: parent of this branch.''' if pos == None: pos = {root:(xcenter,vert_loc)} else: pos[root] = (xcenter, vert_loc)