Ejemplo n.º 1
0
 def test_single_target_shortest_path(self):
     p = nx.single_target_shortest_path(self.directed_cycle, 0)
     assert_equal(p[3], [3, 4, 5, 6, 0])
     p = nx.single_target_shortest_path(self.cycle, 0)
     assert_equal(p[3], [3, 2, 1, 0])
     p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0)
     assert_equal(p, {0: [0]})
Ejemplo n.º 2
0
 def test_single_target_shortest_path(self):
     p = nx.single_target_shortest_path(self.directed_cycle, 0)
     assert_equal(p[3], [3, 4, 5, 6, 0])
     p = nx.single_target_shortest_path(self.cycle, 0)
     assert_equal(p[3], [3, 2, 1, 0])
     p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0)
     assert_equal(p, {0: [0]})
Ejemplo n.º 3
0
 def test_single_target_shortest_path(self):
     p = nx.single_target_shortest_path(self.directed_cycle, 0)
     assert p[3] == [3, 4, 5, 6, 0]
     p = nx.single_target_shortest_path(self.cycle, 0)
     assert p[3] == [3, 2, 1, 0]
     p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0)
     assert p == {0: [0]}
Ejemplo n.º 4
0
 def blanket_subgraph(self,
                      alist_id,
                      ancestor_length=1,
                      descendant_length=1):
     ancestors = nx.single_target_shortest_path(self,
                                                alist_id,
                                                cutoff=ancestor_length)
     descendants = nx.single_source_shortest_path(self,
                                                  alist_id,
                                                  cutoff=descendant_length)
     nodes = set(list(ancestors.keys()) + list(descendants.keys()))
     blanket = self.subgraph(nodes)
     return blanket
Ejemplo n.º 5
0
 def apply_topography(self, graph):
     river_nodes = [
         node for node, data in graph.nodes(data=True)
         if data.get("river") is not None
     ]
     for node in graph.nodes():
         if node in river_nodes:
             graph.node[node]["height"] = 1
         else:
             shortest_paths = nx.single_target_shortest_path(graph, node)
             height = min(
                 [len(shortest_paths[node]) for node in river_nodes])
             graph.node[node]["height"] = min([height, 3])
Ejemplo n.º 6
0
    def _branches_ends_paths(self):
        branches = set()
        ends = set()
        for node_id, degree in self._disto_proximal.in_degree:
            if degree == 0:
                ends.add(node_id)
            elif degree > 1:
                branches.add(node_id)

        paths_to_root = nx.single_target_shortest_path(self._disto_proximal,
                                                       self.root)

        return branches, ends, paths_to_root
Ejemplo n.º 7
0
 def ReverseSaerch(self, target, nums=5):
     """
     Given target compound to reverse search the starting ones.
     tartget must be cid string
     num must be integer and >0
     """
     ret = []
     for k, v in nx.single_target_shortest_path(self.G, target,
                                                cutoff=nums).items():
         if 'C' in k:
             pth = [self.id2name_dict[i] for i in v]
             ret.append((self.id2name_dict[k], pth))
     if len(ret) >= 20: l = 20
     else: l = len(ret)
     return ret[0:l]
Ejemplo n.º 8
0
def get_path_funcs():
    upstream_paths = {}
    downstream_paths = {}
    if upstream_cutoffs > 0:
        upstream_paths = nx.single_target_shortest_path(
            call_graph, func, cutoff=upstream_cutoffs)
    if downstream_cutoff > 0:
        downstream_paths = nx.single_source_shortest_path(
            call_graph, func, cutoff=downstream_cutoff)

    path_funcs = set()
    for func_node in upstream_paths.items():
        path_funcs.update(func_node[1])
    for func_node in downstream_paths.items():
        path_funcs.update(func_node[1])
    return path_funcs
Ejemplo n.º 9
0
def partition_tree(g):
    ends = [coord for coord, deg in g.degree if deg == 1]
    if len(g) < 2:
        raise ValueError("Graph is cyclic")

    root, *leaves = ends

    paths = nx.single_target_shortest_path(g, root)
    visited = set()
    for leaf in leaves:
        path = []
        for node in paths[leaf]:
            path.append(node)
            if node in visited:
                break
        yield path
        visited.update(path)
Ejemplo n.º 10
0
    def get_files(self, mode, meta_provided, overwrite):

        flatten = lambda l: [item for sublist in l for item in sublist]

        #If mode provided, specify output files.
        if mode:
            if mode == 'preprocess':
                end_files = self.preprocessing_files
                if not meta_provided:
                    end_files.remove(self.comparisons_file)
                    end_files.remove(self.meta_file)
            elif mode == 'readthrough':
                end_files = self.readthrough_files
            elif mode == 'get_dogs':
                end_files = self.dogs_files
            elif mode == 'diff_exp_read_in':
                end_files = self.diff_exp_read_in_files
            else:
                end_files = self.diff_exp_dogs_files

        #No mode but meta provided.
        elif meta_provided:
            end_files = self.preprocessing_files | self.readthrough_files | self.dogs_files | \
                        self.diff_exp_read_in_files | self.diff_exp_dogs_files

        #No mode with no meta.
        else:
            end_files = self.preprocessing_files | self.readthrough_files | self.dogs_files

        #Go through paths and get non-existent out files.
        out_files = set()
        for f in end_files:
            paths = nx.single_target_shortest_path(self.dependency, f)
            for out_file in set(flatten(paths.values())):
                if overwrite:
                    if not os.path.isdir(
                            out_file) or out_file in self.tag_dirs:
                        out_files.add(out_file)
                else:
                    if not os.path.isfile(out_file) and not os.path.isdir(
                            out_file):
                        out_files.add(out_file)

        return out_files
def obtenerMenorPrecioParticular(nodo_destino, grafo):
    nodos_iniciales = generarNodosIniciales(grafo)
    dic_valor_nodos = dict(grafo.nodes(data='weight', default=1))
    conexiones = nx.single_target_shortest_path(grafo, nodo_destino)
    candidatos = {}
    candidatos_valor = {}
    contador = None

    for clave in conexiones:  # para sacar las conexiones de cada inicializador
        if clave in nodos_iniciales:
            candidatos[clave] = conexiones[clave]

    for clave in candidatos:  # vamos calculando los valores
        contador = 0
        for camino in candidatos[clave]:
            contador += dic_valor_nodos[camino]
        candidatos_valor[contador] = candidatos[clave]

    return min(candidatos_valor.keys()), candidatos_valor[min(
        candidatos_valor.keys())]
Ejemplo n.º 12
0
Archivo: d7alt.py Proyecto: bj0/aoc
g = nx.DiGraph()
for rule in data.splitlines():
    bag, contents = re.match(r'(.+) bags contain (.+)', rule).groups()
    for inner in contents.strip('.').split(','):
        inner = inner.strip()
        if inner == 'no other bags':
            g.add_edge(bag, 'empty', weight=0)
            continue
        n, col = re.match(r'(\d+) (.+) bags?', inner).groups()
        g.add_edge(bag, col, weight=int(n))

# tot = sum(nx.has_path(g, bag, "shiny gold") for bag in g.nodes if bag != "shiny gold")
tot = len(nx.dfs_tree(g.reverse(), "shiny gold").nodes) - 1
# 115
print(f'part1: {tot}')
# or
tot = len(nx.single_target_shortest_path(g, "shiny gold")) - 1
print(f'part1.1: {tot}')


def get(bag):
    if g.out_degree(bag) == 0:
        return 1
    return sum(g[bag][col]['weight'] * (get(col) + 1)
               for col in g.neighbors(bag))


tot = get('shiny gold')
# 1250
print(f'part2: {tot}')
Ejemplo n.º 13
0
 def estimate_targeting_paths(self, intermediate_target):
     for possible_path in nx.single_target_shortest_path(
             self.G, intermediate_target).values():
         if len(possible_path) == 2:
             return self.lookup(*possible_path)
Ejemplo n.º 14
0
def greedyBFSAlgoritam(G, source, target):
    najkraciPut = nx.single_target_shortest_path(G, target ,cutoff=None)
    print(list(najkraciPut[source]))
    return najkraciPut
Ejemplo n.º 15
0
def remove_very_indirect_dependencies(
    g: nx.DiGraph, attrs: List[Attr], modified_files: List[str]
) -> nx.DiGraph:
    """This is fairly subtle, and it requires a bit of tuning and
    just looking at the results on multiple PRs to see if it's
    reasonable.

    It's supposed to do a few things:

    1. `g` is the build graph. it's a DAG. each edge points from an
       more leaf-like package to a more core package that the leaf-like
       package depends on. For example, there's an edge from a numpy
       derivation to a python derivation.
    2. the graph `g` is over .drvs. many of these .drvs correspond to named
       attributes in nixpkgs, like python38Packages.numpy. Many of them do
       not. .drvs that are not attrs include `src`s and other stuff that's
       not addressable as a nixpkgs derivation. generally this is stuff that
       users do not care about.
    3. for each attr, with some execptions, we have the position filename and
       line number where it was created. We also know the set of modified files
       in this PR. From that, we can form a reasonable _guess_ of which attrs
       were actually _directly modified_ in the pr. the other attrs are ones that
       are downstream from the modification.
    4. The identification in (3) is both under- and over- inclusive. it's over-
       inclusive because we only look for a match at the level of the filename, so
       if a file contains many attrs then all of them will be flaged. it's under-
       inclusive because the PR may have changed a file that's used in attrs, but is
       not itself where any attr is declared. for example if someone changes a shell
       hook function and only edits a `.sh` file.
    5. so, because of (3) and (4), we have two schemes for identifying the "root"
       attrs that were actually changed by the PR. the first is simply which attrs
       are in files that were changed. the second is more graph theoretical. we look
       for the set of all *longest paths* in the DAG of drvs to be built, measuring
       the length of the path by the number of edges that include at least 1 attr
       in the edge. so edges that purely include non-attr .drvs don't count. and then
       we look at the deepest attr in each of these longest paths. these seem to capture
       the roots of the DAG pretty well.
    6. then, armed with these roots, we look for all nodes that are within 2 hops of
       a root, and throw away everything else. so we keep the roots themselves. we keep
       packages that directly link against these roots. and we keep edges that directly
       link against the packages that link to the roots. dependencies that are futher
       away than that get discarded.
    7. for each of these packages that are going to be retained, we record the number
       of ancestors they have. that is, how many attrs in the build graph depend directly
       or indirectly on them. note that this includes attrs in the *full graph* -- do
       this calculation before discarding the set that need to be discarded as described
       in (6). the purpose of this calculation is so that we can assign a relative
       importance to every package that we're keeping. things that are depended on by more
       packages are more important.

    So finally, the result is a new graph
    """

    non_autogenerated_modified_files = {
        m for m in modified_files if m not in BIG_AUTOGENERATED_FILES
    }

    #
    # Determine which attrs were modified directly by the git commit, rather than modified
    # indirectly because their inputs changed
    #
    drv_to_attr = {
        a.drv_path: a
        for a in attrs
        if a.drv_path is not None and a.position is not None
    }

    # Record if an edge connects to Attrs. There are some really long un-interesting
    # paths that relate to multi-lib or cross-compilation or something.
    for e in g.edges():
        g.edges[e]["is_attr"] = int(e[0] in drv_to_attr or e[1] in drv_to_attr)

    build_roots = {
        n: drv_to_attr[n].name
        for n in g.nodes
        if (
            n in drv_to_attr
            and drv_to_attr[n].filename() in non_autogenerated_modified_files
        )
    }
    log.info("Directly modified attrs", roots=json.dumps(sorted(build_roots.values())))

    #
    # Sometimes the scheme above might give no build roots, if for example
    # the only file edited was a hook or something and not where the attrs are
    # named. We want to have at least 1 build root, so if this happens lets
    # find the longest path in the dag counting only attrs, so like attr a depends
    # on b depends on c depends on d, and then let's call attr d the build root.
    #
    longest_path_build_roots = {}
    for long_path in dag_longest_paths(g, weight="is_attr"):
        *_, long_path_end = (n for n in long_path if n in drv_to_attr)
        long_path_end_name = drv_to_attr[long_path_end].name
        # log.info("Longest path", name=long_path_end_name)
        build_roots[long_path_end] = long_path_end_name
        longest_path_build_roots[long_path_end] = long_path_end_name

    log.info(
        "Longest-path build roots", roots=sorted(longest_path_build_roots.values())
    )
    log.info('Consensus build "roots"', roots=sorted(build_roots.values()))
    assert len(build_roots) > 0 or len(g) == 0

    path_length_counts: Counter_t[int] = Counter()
    to_keep = set()
    for root in build_roots:
        g.nodes[root]["is_root"] = True
        for node, path in nx.single_target_shortest_path(g, root, 2).items():
            # If node == root, len(path) == 1. Keep that, of course
            # If node is a direct dependency of root, len(path) == 2. Keep that too.
            # If node ia a 1-step indirect dependency of root, len(path) == 3. Keep that.
            # Otherwise, throw away.
            to_keep.add(node)
            path_length_counts[len(path) - 1] += 1

    #
    # Compute the closure of everything in to_keep, because we've gotta keep
    # those too.
    before_ancestor = time.time()
    transitive_closure = nx.transitive_closure_dag(g)
    if time.time() - before_ancestor > 1:
        log.info(
            f"Computing transitive closure of build graph: {time.time() - before_ancestor:.2f} sec"
        )

    #
    # Since the changed attrs were not necesarily actually roots of the build graph
    # (this happens if we're on staging and some unchanged dependencies of the changed
    # attrs actually haven't been built yet), we need to keep them in the graph. this
    # confused be at first, but if we don't keep these things in the build graph,
    # then the precedence-constrained knapsack problem won't be working with the right
    # information, because it won't know that these packages need to be built in order
    # to build the stuff we actually care about. if the "build roots" are really roots,
    # then these should be empty.
    #
    to_keep_closure = set()
    for k in to_keep:
        to_keep_closure.update(transitive_closure.succ[k])
    to_keep.update(to_keep_closure)

    if len(g) < 500:
        # Ehhhh, just keep everything if the number of nodes is small enough
        to_keep.update(g.nodes())

    for n in to_keep:
        n_ancestors = sum(nn in drv_to_attr for nn in transitive_closure.pred[n])
        g.nodes[n]["n_ancestors"] = n_ancestors

    log.info(
        "Removing remote dependencies",
        kept=len(to_keep),
        removed=g.number_of_nodes() - len(to_keep),
        total=g.number_of_nodes(),
    )

    if DEBUG:
        print("To remove")
        import IPython

        IPython.embed()

    return g.subgraph(to_keep), transitive_closure.subgraph(to_keep)
Ejemplo n.º 16
0
def part_1():
    graph = get_graph()
    print(len(nx.single_target_shortest_path(graph, 'shiny gold')) - 1)
Ejemplo n.º 17
0
def createRoutesToGraph(G, node):
    nodes = nx.single_target_shortest_path(G, node).keys()
    return G.subgraph(nodes)
Ejemplo n.º 18
0
def main():
    for i in range(100, 120):
        print(f'Loop {i}')
        G, no_actions = generate_graph(i)
        term_states = get_term_states(G)
        print(f'Term states: {term_states}')

        pos = [3, 4, 5, 6]
        option_set = []

        # For each state variable get the set of termination states
        for p in pos:
            terms = [k for k, v in term_states.items() if v == p]
            sp_list = []

            # Get the set of shortest paths to each termination state for a single state variable
            for t in terms:
                sp = nx.single_target_shortest_path(G, t)
                sp_list.append(sp)

            policy_dict = {}
            for idx, path in enumerate(sp_list):
                keys = path.keys()
                for key in keys:
                    shortest = True
                    nums = np.arange(len(sp_list))
                    num2 = np.delete(nums, idx)
                    for pos in num2:
                        keys2 = sp_list[pos].keys()
                        if key in keys2:
                            if len(sp_list[pos][key]) < len(path[key]):
                                shortest = False

                    # Check how many terminal states the path goes through
                    counter = 0
                    curr_path = path[key]
                    for ts in term_states:
                        if ts in curr_path:
                            counter += 1

                    # if the current path doesn't go through multiple termination sets and is the shortest then add
                    # the policy
                    if counter < 2 and len(curr_path) > 1 and shortest:
                        best_action = G[curr_path[0]][curr_path[1]]['action']

                        policy = list(np.zeros(no_actions, dtype=int))
                        policy[best_action] = 1
                        policy_dict[curr_path[0]] = policy
            init_set = list(policy_dict.keys())
            if policy_dict:
                option = option_holder.OptionHolder(policy_dict, init_set,
                                                    terms)
                option_set.append(option)
            # print(policy_dict)

        skill_folder_path = RESULTS_PATH + str(i) + '/'
        if not (os.path.isdir(skill_folder_path)):
            os.makedirs(skill_folder_path)
        pickle.dump(G, open(skill_folder_path + 'graph.pickle', "wb"))
        pickle.dump(option_set,
                    open(skill_folder_path + 'generated_skills.pickle', "wb"))
Ejemplo n.º 19
0
def neighbors_first_order(G, basenode, direction=None):
    '''
    Returns the first order neighbors of the basenode. G must be a directed graph.
    From the first order neighbors the basenode must be reachable with directed edges. 
    The neighbors can point at each other, but cannot point at any node, which cannot reach the basenode.
    
    Parameters:
    ----------
    G : networkx object 

    basenode : name of a node in the graph

    direction: None or string
        
            The default is undirected.

            If direction = in : We use the number of incoming degrees, as degree.

            If direction = out : We use the number of outgoing degrees, as degree.

    Returns:
    ------
    star_nodes : A list of the first order neighbors and the basenode.

    '''

    if G.is_directed():
        pass
    else:
        print('Graph must be directed.')
        return []

    #copy:
    G = G.copy()

    # remove out edges of basenode
    G.remove_edges_from(list(G.edges(basenode)))

    star_nodes = neighbors_within_n_step(G,
                                         node=basenode,
                                         cutoff=np.infty,
                                         direction=direction)

    # filter out: nodes with neighbor, that can't reach basenode, only through out edges:
    nodes_reach_basenode = list(
        nx.single_target_shortest_path(G, basenode, cutoff=None).keys())

    for node in star_nodes:

        neighs = list(G.neighbors(node))  #by out edges

        for n in neighs:
            # if basenode can't be reached from node's outneighbors
            if n not in nodes_reach_basenode:
                #remove edges of node:
                G.remove_edges_from(list(G.out_edges(node)))
                G.remove_edges_from(list(G.in_edges(node)))
        #update reachable nodes
        nodes_reach_basenode = list(
            nx.single_target_shortest_path(G, basenode, cutoff=None).keys())

    # only valid nodes remained:
    star_nodes = neighbors_within_n_step(G,
                                         node=basenode,
                                         cutoff=np.infty,
                                         direction=direction)

    return star_nodes