def partial_figure(self, including_nodes=None, source=None, *, n=None, n_at_level=3, n_expand=1): """ Generate a partial figure of the graph. Parameters ---------- including_nodes : iterable or None An iterable containing node codes or names (or a mix). source : nodecode, optional All paths from this node to everything in `including_nodes` will be represented. Defaults to `root_id`. n : int If including_nodes is None, select this number of nodes randomly Returns ------- Elem """ if source is None: source = self.root_id from networkx.algorithms.simple_paths import all_simple_paths shows = set() if including_nodes is None and n is not None: including_nodes = sorted( numpy.random.choice(self.nodes, n, replace=False)) if including_nodes is None and n_at_level is not None: from collections import deque import itertools q = deque([self.root_id]) including_nodes = [] while q: i = q.popleft() take = tuple(itertools.islice(self.successors(i), n_at_level)) q.extend(take[:n_expand]) including_nodes.extend(take) # Add every node in every path from the root to each `including_nodes` for each_node in including_nodes: if each_node in self.nodes: for i in all_simple_paths(self, source, each_node): for j in i: shows.add(j) else: for each_node_ in self.get_nodes_by_name(each_node): for i in all_simple_paths(self, source, each_node_): for j in i: shows.add(j) s = self.subgraph(shows) return graph_to_figure(s)
def get_number_of_paths(intervals,adapters): for (start,stop) in intervals: # get adapters in interval interval_adapters = adapters[adapters.index(start):adapters.index(stop)+1] print("adapters for interval ({},{}): {}".format(start,stop,interval_adapters)) # create graph # create graph with all possible connections G = nx.DiGraph() for a in interval_adapters: G.add_node(a) # find adapters with value a+1, a+2, a+3 if a+1 in adapters: G.add_node(a+1) G.add_edge(a,(a+1),weight=1) if a+2 in adapters: G.add_node(a+2) G.add_edge(a,(a+2),weight=2) if a+3 in adapters: G.add_node(a+3) G.add_edge(a,(a+3),weight=3) # calculate number of paths path_count = 0 print("paths:") for path in all_simple_paths(G, start, stop): print (path) path_count += 1 print("path count for interval ({},{}): {}".format(start,stop,path_count)) intervals[(start,stop)] = path_count return intervals
def _updatePaths(self): for i in xrange(len(self._hints)): source = self._hints[i] for j in xrange(i + 1, len(self._hints)): target = self._hints[j] path = all_simple_paths(self._graph, source, target, L) self._paths[(source, target)] = sorted(list(path), key = len)
def find_dependents(zip, dependency_regex, entry_points, targets): from networkx import DiGraph from networkx.algorithms.simple_paths import all_simple_paths nodes = zip.namelist() dependency_graph = DiGraph() dependency_graph.add_nodes_from(nodes) for node in nodes: with zip.open(node, 'r') as zipped_file: content = zipped_file.read() for match in dependency_regex.finditer(content): dependency_graph.add_edge(node, match.group()) dependency_graph.add_edge(node, match.group() + 'bin') dependents = set() actual_targets = set() for source in entry_points: for target in targets: for path in all_simple_paths(dependency_graph, source, target): actual_targets.add(path[-1]) for dependent in path[0:-1]: dependents.add(dependent) return list(dependents), list(actual_targets)
def solve_out_tips(graph, ending_nodes): '''removes unwanted out tips''' new_graph = nx.DiGraph(graph) new_ending_nodes = ending_nodes while True: # each time we change the graph # we start a new iteration with the new list of nodes and ending_nodes nodes = list(new_graph.nodes) new_ending_nodes = [ ending_node for ending_node in new_ending_nodes if ending_node in nodes ] done = True for node in nodes: if len(list(new_graph.successors(node))) >= 2: path_list = [] for ending_node in new_ending_nodes: path_list.extend( all_simple_paths(new_graph, node, ending_node)) path_length = [len(path) for path in path_list] weight_avg_list = [ path_average_weight(graph, path) for path in path_list ] new_graph = select_best_path(new_graph, path_list, path_length, weight_avg_list, delete_sink_node=True) done = False break if done: # done remains True if no node has more than 2 successors break return new_graph
def __init__(self, G: nx.DiGraph, shared: List[str]): super().__init__() self.orig_graph = G self.shared_nodes = shared orig_inputs = utils.get_input_nodes(self.orig_graph) self.shared_inputs = list( set(orig_inputs).intersection(set(self.shared_nodes))) self.outputs = utils.get_output_nodes(self.orig_graph) # Get all paths from shared inputs to shared outputs new_inputs = list(set(self.shared_nodes) - set(self.outputs)) self.paths = [ pth for (inp, out) in product(new_inputs, self.outputs) for pth in all_simple_paths(self.orig_graph, inp, out) ] # Get all edges needed to blanket the included nodes main_nodes = list(set([node for path in self.paths for node in path])) self.cover_edges = [[pred, node] for node in main_nodes for pred in self.orig_graph.predecessors(node) if pred not in main_nodes] # Need to include children and parents of children for markov blanket # successors = [[node, succ] for node in main_nodes # for succ in self.orig_graph.successors(node) # if succ not in main_nodes] # succ_preds = [[pred, node] for node in successors # for pred in self.orig_graph.predecessors(node) # if pred not in main_nodes] # self.cover_edges.extend(successors) # self.cover_edges.extend(succ_preds) self.cover_nodes = [node for node, _ in self.cover_edges] for path in self.paths: self.add_edges_from(list(zip(path, path[1:]))) self.add_edges_from(self.cover_edges) for node_name in self.cover_nodes: self.nodes[node_name]["color"] = forestgreen self.nodes[node_name]["fontcolor"] = forestgreen for node_name in self.shared_nodes: self.nodes[node_name]["color"] = dodgerblue3 self.nodes[node_name]["fontcolor"] = dodgerblue3 for dest in self.successors(node_name): self[node_name][dest]["color"] = dodgerblue3 self[node_name][dest]["fontcolor"] = dodgerblue3 for source, dest in self.cover_edges: self[source][dest]["color"] = forestgreen for node in self.nodes(data=True): node[1]["fontname"] = FONT for node_name in self.shared_inputs: self.nodes[node_name]["penwidth"] = 3.0
def get_contigs(graph, starting_nodes, ending_nodes): '''returns a list of tuples containing each contig and its length''' contigs = [] for starting_node in starting_nodes: for ending_node in ending_nodes: paths = list(all_simple_paths(graph, starting_node, ending_node)) for path in paths: contig = path[0] for node in path[1:]: contig += node[-1] contigs.append((contig, len(contig))) return contigs
def get_contigs(graph, inputs, outputs): contigs = [] for start_node in inputs: for sink_node in outputs: all_paths = all_simple_paths(graph, start_node, sink_node) for one_path in all_paths: contig = one_path[0] for cont in range(1, len(one_path)): #store contig in tuple contig = contig + one_path[cont][-1] tuple_temp = (contig, cont) contigs.append(tuple_temp) return contigs
def hamilton(self, source): starting_vertex = [ vertex for vertex in self.G.edges if source in vertex ] paths = [] for vertex in starting_vertex: offset_source = vertex.index(source) + 1 % 2 print(offset_source, ", ", vertex[offset_source]) offset_paths = all_simple_paths(self.G, vertex[offset_source], source) # "cicle" from offset print(offset_paths) paths.extend([[source, *path] for path in offset_paths]) print([ path for path in paths if len(path) == self.G.number_of_nodes() + 1 ])
def all_paths(pedigrees, joins, src_type, dst_type): """Generates a set of all paths with associated link info, sorted.""" G = nx.MultiDiGraph() G.add_nodes_from(pedigrees.keys()) # https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.MultiGraph.add_edges_from.html?highlight=add_edges_from#networkx.MultiGraph.add_edges_from G.add_edges_from([(j['child_id'], j['id'], j) for j in joins]) # print('dag_longest_path', dag_longest_path(G)) # print('local_node_connectivity', local_node_connectivity(G, src_type, dst_type)) # print('dijkstra', dijkstra_path(G, src_type,dst_type)) # paths = set( [p for p in dijkstra_path(G, src_type,dst_type)] ) # print(paths) # make a serialized string, so set can unique paths = set([ '->'.join(p) for p in [p for p in all_simple_paths(G, src_type, dst_type)] ]) paths = [p.split('->') for p in sorted(paths)] for path in paths: enriched_path = [] it = iter(path) dst = None for src in it: if dst: edge_data = G.get_edge_data(src, dst) if edge_data: edge_data = edge_data[0] enriched_path.append({ 'src_type': dst, 'dst_type': src, 'link': edge_data }) dst = next(it, None) edge_data = G.get_edge_data(src, dst) if edge_data: edge_data = edge_data[0] enriched_path.append({ 'src_type': src, 'dst_type': dst, 'link': edge_data }) yield enriched_path
def find_best_prob_graph(self, env, source, target, time, evidences): # get all simple paths edges_path_lists = [] nodes_paths_gen = all_simple_paths(env.graph, source, target) for nodes_path in nodes_paths_gen: # convert to a list of edges new_path = [] for i in range(len(nodes_path) - 1): new_path.append((nodes_path[i], nodes_path[i + 1])) edges_path_lists.append(new_path) path_probs = [] for path in edges_path_lists: path_probs.append(self.prob_path_not_blocked( path, time, evidences)) max_prob = max(path_probs) return edges_path_lists[path_probs.index(max_prob)]
def _test_graph(g, gold, ys): print "Starting to Calculate Statistics" true_pos = 0 false_pos = 0 true_neg = 0 false_neg = 0 # will always be zero, because we always consider ones bigger than the estimate print "RESULTS" for start in g.nodes(): for end in g.nodes(): if start == end: continue paths = simple_paths.all_simple_paths(g, start, end, cutoff=2) paths = list(paths) if not paths: continue midways = [] for path in paths: midways.append(path[1]) if len(midways) >= _MID_WAY_LOW_BOUND: if gold.get(start) and gold[start].get(end): if gold[start][end] > 0: gold[start][end] -= 1 true_pos += 1 ys[len(midways) - _MID_WAY_LOW_BOUND] += 1 else: false_pos -= 1 else: false_pos += 1 elif gold.get(start) and gold[start].get(end) and gold[start][end] > 0: false_neg += 1 else: true_neg += 1 print "True Positives: %s\nTrue Negatives: %s, False Positives: %s, False Negatives: %s" % ( true_pos, true_neg, false_pos, false_neg, ) print "Precision: %s%%" % (true_pos / float(true_pos + false_pos) * 100) print "Recall: %s%%" % (true_pos / float(true_pos + false_neg) * 100) print "Accuracy: %s%%" % ((true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg) * 100)
def weightme(bagfrom, bagto): sum_paths = 0 added_edges = set() for path in all_simple_paths(better_rules, bagfrom, bagto): p = 1 q = 0 for m in range(0, len(path) - 1): print(path[m], path[m + 1], ":", better_rules.get_edge_data(path[m], path[m + 1])["weight"]) current_weight = int( better_rules.get_edge_data(path[m], path[m + 1])["weight"]) print(q, p) p *= current_weight if m != len(path) - 2 and ((path[m], path[m + 1]) not in added_edges): added_edges.add((path[m], path[m + 1])) q += current_weight print(path, q, p) print(added_edges) sum_paths = sum_paths + p + q return sum_paths
def _test_graph(g, gold, ys): print 'Starting to Calculate Statistics' true_pos = 0 false_pos = 0 true_neg = 0 false_neg = 0 #will always be zero, because we always consider ones bigger than the estimate print 'RESULTS' for start in g.nodes(): for end in g.nodes(): if start == end: continue paths = simple_paths.all_simple_paths(g, start, end, cutoff=2) paths = list(paths) if not paths: continue midways = [] for path in paths: midways.append(path[1]) if len(midways) >= _MID_WAY_LOW_BOUND: if gold.get(start) and gold[start].get(end): if gold[start][end] > 0: gold[start][end] -= 1 true_pos += 1 ys[len(midways) - _MID_WAY_LOW_BOUND] += 1 else: false_pos -= 1 else: false_pos += 1 elif gold.get(start) and gold[start].get( end) and gold[start][end] > 0: false_neg += 1 else: true_neg += 1 print 'True Positives: %s\nTrue Negatives: %s, False Positives: %s, False Negatives: %s' % ( true_pos, true_neg, false_pos, false_neg) print 'Precision: %s%%' % (true_pos / float(true_pos + false_pos) * 100) print 'Recall: %s%%' % (true_pos / float(true_pos + false_neg) * 100) print 'Accuracy: %s%%' % ( (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg) * 100)
def path_contradiction_free_merge(graph_clusts, trace=False, trace_lemma=None): def filter_merge(weight): def filtered_graphs(): for g in graph_clusts: def filter_edge(n1, n2): try: return g.edges[n1, n2]['weight'] == weight except KeyError: return False yield subgraph_view(g, filter_edge=filter_edge) return reduce(compose, filtered_graphs()) same_merged = filter_merge(1) diff_merged = filter_merge(-1) rm = set() # Iterate through all diff edges for u, v in diff_merged.edges: # Find all same paths between them found_contradiction = False for path in all_simple_paths(same_merged, u, v): # Found some? Okay delete the negative edge and all positive paths found_contradiction = True rm.update(pairwise(path)) if trace: print(f"Contradiction in clustering for {trace_lemma}", file=sys.stderr) print((u, v), file=sys.stderr) print(path, file=sys.stderr) if found_contradiction: rm.add((u, v)) # Put them back together and remove all the invalid edges merged = compose(same_merged, diff_merged) merged.remove_edges_from(rm) return merged
def labels(self, these_leaves): """ finds the label of each of 'these_leaves' """ data = [ np.array(list(all_simple_paths(self.value_tree, 0, n))).squeeze() for n in these_leaves ] idx = np.array([ self.value_tree.nodes(data='var')[n] - 1 for d in data for n in d[1:] ]) val = np.array([ self.value_tree.nodes(data='val')[n] for d in data for n in d[1:] ]) var = np.concatenate( [np.ones(len(d) - 1, dtype=int) * i for i, d in enumerate(data)], -1) labels = np.zeros((self.num_vars, var.max() + 1)) * np.nan labels[idx, var] = val return labels
def all_paths(self, from_node, to_node): paths = all_simple_paths(self.reachability, from_node, to_node, cutoff=len(self.parentage.nodes())) return paths
def get_all_paths(self, v_a, v_b): """ Return all the paths (causal and non-causal alike) between `v_a` `and v_b` """ return list(simple_paths.all_simple_paths(self.G, v_a, v_b))
def set_routing_direction(edge_type_mat_allNodes, num_vert, pseudo_vert, faces, vert_to_face): """ Sets routing direction for traversing scaffold route path Parameters ---------- edge_type_mat_allNodes : networkx.classes.digraph.DiGraph Network representation including link types. Link types have the following possible values: -1 is half of a non-spanning tree edge (one side of scaffold crossover) 2 is spanning tree edge: DX edge with 0 scaffold crossovers num_vert : int number of vertices, V pseudo_vert : list row vector where value j at index i indicate that vertex i corresponds to vertex j, one of the V real vertices faces : list List of lists. The first dimension represents the face. The second dimension holds the index all nodes creating that face. vert_to_face : list List of lists. The first dimension represents the node. The second dimension holds the index of all faces that node is a part of. Returns ------- route_real row vector of vertices listed in visitation order (only real vertex IDs) route_vals row vector of edge types, where the value at index j in route_vals is the edge type of the edge between the vertices route_real(j:j+1), wrapping around at end """ # Choose a starting connected node (arbitrary start position). # Vertices #1-#V (V = number of vertices) are no longer connected in the # graph network start_node = 2*num_vert+2 # this node is a pseudo-node at Vertex 1 next_nodes = edge_type_mat_allNodes.neighbors(start_node) # you'll have 2 # TODO: Convert to generator, since you only need to make the second # path if the first is the wrong direction? for next_node in next_nodes: paths = all_simple_paths( edge_type_mat_allNodes, start_node, next_node) # you'll have one 1-length path because they're neighbors. You want # the other path that includes all the other nodes: path = pick_longest_path(paths) route_real = path temp_route_real = route_real + [route_real[0]] route_vals = [edge_type_mat_allNodes [temp_route_real[i]][temp_route_real[i+1]]['type'] for i in range(len(route_real))] dereferenced_path = dereference_pseudonodes_in_path(path, pseudo_vert) route_real = dereferenced_path # TODO: Review that this code is really no longer needed given how I'm # picking start node / generating the paths. # Does this differ from previous 'magic' number that picks first vert? # # # For consistency, start routing at a tree edge (route_vals = 2) # # # with Vertex 1 upstream (route_real = 1) # start_route = (i for i in range(len(route_real)) \ # if route_real[i] == 1 and route_vals[i] == 2) # start_route = intersect(find(route_real == 1), find(route_vals == 2)) # start_route = start_route(1) # in case there are multiple choices # # # # Shift route_real and route_vals to start at start_route # route_real = [route_real[start_route:end], route_real[0:start_route]] # route_vals = [route_vals(start_route:end), \ # route_vals(1:start_route-1)] if check_direction(route_real, vert_to_face, faces): return [route_real, route_vals] raise Exception("one of the two above should have returned")
def all_simple_paths(self, source, target): return simple_paths.all_simple_paths(self.graph._g, source=source, target=target)
route('Fred Meyer', 'University Bridge', 'B-G Trail', 3.2, RouteTypes.MULTI_USE) route('University Bridge', 'Lake Union Park', 'Eastlake Ave', 2.2, RouteTypes.MINOR_SEP) route('Fred Meyer', 'Montlake Bridge', 'B-G Trail', 4.0, RouteTypes.MULTI_USE) route('Montlake Bridge', 'Lake Union Park', 'Delmar Dr', 3.3, RouteTypes.MINOR_SEP) route('Home', 'Greenlake', '77th St', 2.8, RouteTypes.ROAD) route('Greenlake', 'University Bridge', 'Roosevelt Way', 2.9, RouteTypes.MINOR_SEP) route('Greenlake', 'Fremont Bridge', 'Stone Way', 3.3, RouteTypes.MINOR_SEP) num_paths = len(list(all_simple_paths(graph, 'Home', 'Work'))) print("Calculated {0} paths".format(num_paths)) def dfs(node, target, path=[]): if node == target: yield path for s, e, data in graph.edges(node, data=True): new_path = list(path) new_path.append((s, e, data)) yield from dfs(e, target, new_path) for path in dfs('Home', 'Work'): label = 'Home' distance = 0.
from networkx import DiGraph from networkx.algorithms.simple_paths import all_simple_paths with open("input.txt") as input_file: # with open("test_input.txt") as input_file: # with open("test_input2.txt") as input_file: adapters = [int(n.strip()) for n in input_file.readlines()] builtin = max(adapters) + 3 adapters = [0] + sorted(adapters) + [builtin] g = DiGraph() for index, adapter in enumerate(adapters): for next in adapters[index + 1:index + 4]: try: if next - adapter <= 3: g.add_edge(adapter, next) else: break except IndexError: break sum = 0 for _ in all_simple_paths(g, min(adapters), max(adapters)): sum += 1 print(sum)
# Construção do grafo com networkx para obtenção de informações e alterações necessárias import networkx as nx import json from networkx.algorithms.simple_paths import all_simple_paths with open("./data/disciplinas.json", 'r') as f: line = f.readline() disciplinas = json.loads(line) G = nx.DiGraph() G.add_nodes_from(list(disciplinas.keys())) for key in list(disciplinas.keys()): for req in disciplinas[key]['requisitos']: G.add_edge(req, key) if (len(disciplinas[key]['requisitos']) == 0): G.add_edge('START', key) # Obtem o maior caminho de disciplinas necessária para cada uma. # Usado para determinar a posição do vértice durante a construção da visualização no app. for key in list(disciplinas.keys()): max_path = [len(p) for p in all_simple_paths(G, 'START', key)] if (max_path): disciplinas[key]['maxpath'] = max(max_path) - 2 else: disciplinas[key]['maxpath'] = 0 with open("./public/assets/data/disciplinas.json", 'w+') as f: json.dump(disciplinas, f)
g = DiGraph() # add weighted edges for line in lines: for entry in line[1].split(','): if "no other bags" not in entry: source_node = re.search(r'\w*\s\w*', line[0]).group(0) dest_node = re.search(r'(?<=\d\s)\w*\s\w*', entry).group(0) weight = re.search(r'(?<=\s)\d*', entry).group(0) g.add_edge(source_node, dest_node, weight=weight) sources = [] # get all paths to shiny gold for source in list(g.nodes): for path in all_simple_paths(g, source=source, target='shiny gold'): sources.append(path[0]) # remove duplicate sources and count print(f'Task1: {len(set(sources))}') # get all paths from shiny gold res = [] for dest in list(g.nodes): for path in all_simple_paths(g, source='shiny gold', target=dest): res.append(path) final_total = 0 # loop through each path from shiny gold for path in res: total = 1
def bagpath(bagfrom, bagto): p = [] for path in all_simple_paths(better_rules, bagfrom, bagto): p.append(path) # print(p) return p
def __init__(self, G: GroundedFunctionNetwork, shared_nodes: Set[str]): super().__init__() self.output_node = G.output_node self.inputs = set(G.inputs).intersection(shared_nodes) # Get all paths from shared inputs to shared outputs path_inputs = shared_nodes - {self.output_node} io_pairs = [(inp, G.output_node) for inp in path_inputs] paths = [p for (i, o) in io_pairs for p in all_simple_paths(G, i, o)] # Get all edges needed to blanket the included nodes main_nodes = {node for path in paths for node in path} main_edges = {(n1, n2) for path in paths for n1, n2 in zip(path, path[1:])} self.cover_nodes = set() add_nodes, add_edges = list(), list() def place_var_node(var_node): prev_funcs = list(G.predecessors(var_node)) if (len(prev_funcs) > 0 and G.nodes[prev_funcs[0]]["label"] == "L"): prev_func = prev_funcs[0] add_nodes.extend([var_node, prev_func]) add_edges.append((prev_func, var_node)) else: self.cover_nodes.add(var_node) for node in main_nodes: if G.nodes[node]["type"] == "function": for var_node in G.predecessors(node): if var_node not in main_nodes: add_edges.append((var_node, node)) if "::IF_" in var_node: if_func = list(G.predecessors(var_node))[0] add_nodes.extend([if_func, var_node]) add_edges.append((if_func, var_node)) for new_var_node in G.predecessors(if_func): add_edges.append((new_var_node, if_func)) place_var_node(new_var_node) else: place_var_node(var_node) main_nodes |= set(add_nodes) main_edges |= set(add_edges) main_nodes = main_nodes - self.inputs - {self.output_node} orig_nodes = G.nodes(data=True) self.add_nodes_from([(n, d) for n, d in orig_nodes if n in self.inputs]) for node in self.inputs: self.nodes[node]["color"] = dodgerblue3 self.nodes[node]["fontcolor"] = dodgerblue3 self.nodes[node]["penwidth"] = 3.0 self.nodes[node]["fontname"] = FONT self.inputs = list(self.inputs) self.add_nodes_from([(n, d) for n, d in orig_nodes if n in self.cover_nodes]) for node in self.cover_nodes: self.nodes[node]["fontname"] = FONT self.nodes[node]["color"] = forestgreen self.nodes[node]["fontcolor"] = forestgreen self.add_nodes_from([(n, d) for n, d in orig_nodes if n in main_nodes]) for node in main_nodes: self.nodes[node]["fontname"] = FONT self.add_node(self.output_node, **G.nodes[self.output_node]) self.nodes[self.output_node]["color"] = dodgerblue3 self.nodes[self.output_node]["fontcolor"] = dodgerblue3 self.add_edges_from(main_edges) self.call_graph = self.build_call_graph() self.function_sets = self.build_function_sets()
def all_simple_paths(self, source, target): return simple_paths.all_simple_paths(self.graph._g, source=source, target=target)
def solve_bubble(graph, ancestor_node, descendant_node): '''find the best path between two nodes and remove the remaining''' path_list = list(all_simple_paths(graph, ancestor_node, descendant_node)) path_length = [len(path) for path in path_list] weight_avg_list = [path_average_weight(graph, path) for path in path_list] return select_best_path(graph, path_list, path_length, weight_avg_list)