コード例 #1
0
    def add_edges_from(self, ebunch, attr_dict=None, **attr):
        """Add all the edges in ebunch.
        Parameters
        ----------
        ebunch : container of edges
            Each edge given in the container will be added to the
            graph. The edges must be given as as 2-tuples (u,v) or
            3-tuples (u,v,d) where d is a dictionary containing edge
            data.
        attr_dict : dictionary, optional (default= no attributes)
            Dictionary of edge attributes.  Key/value pairs will
            update existing data associated with each edge.
        attr : keyword arguments, optional
            Edge data (or labels or objects) can be assigned using
            keyword arguments.

        Enusres No cycles are in this directed graph (DAG)
      
        """
        # set up attribute dict
        if attr_dict is None:
            attr_dict = attr
        else:
            try:
                attr_dict.update(attr)
            except AttributeError:
                raise NetworkXError("The attr_dict argument must be a dict.")
        # process ebunch
        for e in ebunch:
            ne = len(e)
            if ne == 3:
                u, v, dd = e
                assert hasattr(dd, "update")
            elif ne == 2:
                u, v = e
                dd = {}
            else:
                raise NetworkXError("Edge tuple %s must be a 2-tuple or 3-tuple." % (e,))
            if u not in self.succ:
                self.succ[u] = {}
                self.pred[u] = {}
                self.node[u] = {}
            if v not in self.succ:
                self.succ[v] = {}
                self.pred[v] = {}
                self.node[v] = {}
            datadict = self.adj[u].get(v, {})
            datadict.update(attr_dict)
            datadict.update(dd)
            self.succ[u][v] = datadict
            self.pred[v][u] = datadict

            # make sure it's a DAG
            try:
                networkx.topological_sort(self)
            except networkx.NetworkXUnfeasible:
                self.remove_edge(u, v)
                raise networkx.NetworkXUnfeasible(
                    "Graph contains a cycle! DAG is acyclic! Edge " + u + "-" + v + " cannot be added."
                )
コード例 #2
0
 def evaluate(self, x):
     # XXX: this is a massive hack
     x = x[::-1]
     assert self.circuit
     g = self.circuit.copy()
     for node in nx.topological_sort(g):
         if "gate" not in g.node[node]:
             if g.node[node]["label"][0].startswith("x"):
                 g.add_node(node, value=int(x[node]))
             else:
                 g.add_node(node, value=int(g.node[node]["value"]))
         elif g.node[node]["gate"] in ("ADD", "MUL"):
             keys = g.pred[node].keys()
             if len(keys) == 1:
                 idx1 = idx2 = keys[0]
             else:
                 assert len(keys) == 2
                 idx1 = g.pred[node].keys()[0]
                 idx2 = g.pred[node].keys()[1]
             if g.node[node]["gate"] == "ADD":
                 value = g.node[idx1]["value"] + g.node[idx2]["value"]
             elif g.node[node]["gate"] == "MUL":
                 value = g.node[idx1]["value"] * g.node[idx2]["value"]
             g.add_node(node, value=value)
         else:
             raise Exception("Unable to evaluate")
     idx = nx.topological_sort(g)[-1]
     return g.node[idx]["value"] != 0
コード例 #3
0
ファイル: z_obfuscator.py プロジェクト: dmwit/obfuscation
 def evaluate(self, x):
     # XXX: this is a massive hack
     x = x[::-1]
     assert self.circuit
     g = self.circuit.copy()
     for node in nx.topological_sort(g):
         if 'gate' not in g.node[node]:
             if g.node[node]['label'][0].startswith('x'):
                 g.add_node(node, value=int(x[node]))
             else:
                 g.add_node(node, value=int(g.node[node]['value']))
         elif g.node[node]['gate'] in ('ADD', 'MUL', 'SUB'):
             keys = g.pred[node].keys()
             if len(keys) == 1:
                 idx1 = idx2 = keys[0]
             else:
                 assert(len(keys) == 2)
                 idx1 = g.pred[node].keys()[0]
                 idx2 = g.pred[node].keys()[1]
             if g.node[node]['gate'] == 'ADD':
                 value = g.node[idx1]['value'] + g.node[idx2]['value']
             elif g.node[node]['gate'] == 'SUB':
                 value = g.node[idx1]['value'] - g.node[idx2]['value']
             elif g.node[node]['gate'] == 'MUL':
                 value = g.node[idx1]['value'] * g.node[idx2]['value']
             g.add_node(node, value=value)
         else:
             raise Exception('Unable to evaluate')
     idx = nx.topological_sort(g)[-1]
     return g.node[idx]['value'] != 0
コード例 #4
0
ファイル: base.py プロジェクト: Erotemic/dtool
    def ensure_dependencies(request):
        r"""
        CommandLine:
            python -m dtool.base --exec-BaseRequest.ensure_dependencies

        Example:
            >>> # ENABLE_DOCTEST
            >>> from dtool.base import *  # NOQA
            >>> from dtool.example_depcache import testdata_depc
            >>> depc = testdata_depc()
            >>> request = depc.new_request('vsmany', [1, 2], [2, 3, 4])
            >>> request.ensure_dependencies()
        """
        import networkx as nx
        depc = request.depc
        if False:
            dependencies = nx.ancestors(depc.graph, request.tablename)
            subgraph = depc.graph.subgraph(set.union(dependencies, {request.tablename}))
            dependency_order = nx.topological_sort(subgraph)
            root = dependency_order[0]
            [nx.algorithms.dijkstra_path(subgraph, root, start)[:-1] +
             nx.algorithms.dijkstra_path(subgraph, start, request.tablename)
             for start in dependency_order]
        graph = depc.graph
        root = list(nx.topological_sort(graph))[0]
        edges = graph.edges()
        #parent_to_children = ut.edges_to_adjacency_list(edges)
        child_to_parents = ut.edges_to_adjacency_list([t[::-1] for t in edges])
        to_root = {request.tablename:
                   ut.paths_to_root(request.tablename, root, child_to_parents)}
        from_root = ut.reverse_path(to_root, root, child_to_parents)
        dependency_levels_ = ut.get_levels(from_root)
        dependency_levels = ut.longest_levels(dependency_levels_)

        true_order = ut.flatten(dependency_levels)[1:-1]
        #print('[req] Ensuring %s request dependencies: %r' % (request, true_order,))
        ut.colorprint(
            '[req] Ensuring request %s dependencies: %r' % (request, true_order,), 'yellow')
        for tablename in true_order:
            table = depc[tablename]
            if table.ismulti:
                pass
            else:
                # HACK FOR IBEIS
                all_aids = ut.flat_unique(request.qaids, request.daids)
                depc.get_rowids(tablename, all_aids)
                pass
            pass

        #zip(depc.get_implicit_edges())
        #zip(depc.get_implicit_edges())

        #raise NotImplementedError('todo')
        #depc = request.depc
        #parent_rowids = request.parent_rowids
        #config = request.config
        #rowid_dict = depc.get_all_descendant_rowids(
        #    request.tablename, root_rowids, config=config)
        pass
コード例 #5
0
ファイル: circuit.py プロジェクト: breuleux/colonel
 def attempt_edge(graph, a, b):
     a_gate, a_port = a
     b_gate, b_port = b
     graph.add_edge(a_gate, b_gate)
     try:
         nx.topological_sort(graph)
     except nx.NetworkXUnfeasible:
         graph.remove_edge(a_gate, b_gate)
         broken_up_links.append((a, b))
コード例 #6
0
ファイル: test_dag.py プロジェクト: datachomper/googleants
 def test_nbunch_argument(self):
     G=nx.DiGraph()
     G.add_edges_from([(1,2), (2,3), (1,4), (1,5), (2,6)])
     assert_equal(nx.topological_sort(G), [1, 2, 3, 6, 4, 5])
     assert_equal(nx.topological_sort_recursive(G), [1, 5, 4, 2, 6, 3])
     assert_equal(nx.topological_sort(G,[1]), [1, 2, 3, 6, 4, 5])
     assert_equal(nx.topological_sort_recursive(G,[1]), [1, 5, 4, 2, 6, 3])
     assert_equal(nx.topological_sort(G,[5]), [5])
     assert_equal(nx.topological_sort_recursive(G,[5]), [5])
コード例 #7
0
ファイル: pid.py プロジェクト: Autoplectic/dit
    def _compute_lattice_monotonicity(self, reds, pis):
        """
        Infer the redundancy and partial information of lattice elements via lattice monotonicity.

        Parameters
        ----------
        reds : dict
            Currently known redundancy values.
        pis : dict
            Currently known partial information values.

        Returns
        -------
        reds : dict
            Updated redundancy values.
        pis : dict
            Updated partial information values.
        """
        # everything below a redundancy of 0 is a redundancy of 0
        nodes = list(nx.topological_sort(self._lattice))
        while nodes:
            node = nodes.pop(0)
            if node in reds and np.isclose(0, reds[node]):
                for n in descendants(self._lattice, node):
                    if n not in reds:
                        reds[n] = 0
                        nodes.remove(n)

        # everything above a redundancy of I(inputs, output) is I(inputs, output)
        nodes = list(reversed(list(nx.topological_sort(self._lattice))))
        while nodes:
            node = nodes.pop(0)
            if node in reds and np.isclose(reds[node], self._total):
                for n in ascendants(self._lattice, node):
                    if n not in reds:
                        reds[n] = self._total
                        nodes.remove(n)

        # if redundancy of A == redundancy of B, then for all A -> C -> B, redundancy of C = redundancy of A, B
        tops = [node for node in self._lattice if node in reds and any((n not in reds) for n in self._lattice[node])]
        bottoms = [node for node in self._lattice if
                   node in reds and any((n not in reds) for n in self._lattice.reverse()[node])]
        for top, bottom in product(tops, bottoms):
            if np.isclose(reds[top], reds[bottom], atol=1e-5, rtol=1e-5):
                for path in nx.all_simple_paths(self._lattice, top, bottom):
                    for node in path[1:-1]:
                        if node not in reds:
                            reds[node] = reds[top]

        # if redundancy of A is equal to the redundancy any of A's children, then pi(A) = 0
        for node in self._lattice:
            if node not in pis:
                if node in reds and all(n in reds for n in self._lattice[node]) and self._lattice[node]:
                    if any(np.isclose(reds[n], reds[node], atol=1e-5, rtol=1e-5) for n in self._lattice[node]):
                        pis[node] = 0

        return reds, pis
コード例 #8
0
def main():
  log = logging.getLogger(__name__)
  logging.basicConfig(level = logging.INFO)

  backend = foradage.RECAST_Backend(2)

  g = adage.mk_dag()

  global_context = {
    'workdir':'/Users/lukas/Code/code-snippets/cap-schema-drafts/steer',
    'dataset':'user15.lheinric.p123/'
  }

  steps_graph = nx.DiGraph()
  workflow = json.load(open('capdata/workflow.json'))

  for step in workflow:
    steps_graph.add_node(step['name'],step)
    for x in step['dependencies']:
      steps_graph.add_edge(x,step['name'])

  rules = {}
  for stepname in nx.topological_sort(steps_graph):
      stepinfo = steps_graph.node[stepname]
      rule = foradage.RECAST_Rule(stepinfo,workflow,rules,global_context)
      rules[stepname] = rule

  adage.rundag(g,rules.values(), track = True, backend = backend)

  provgraph = nx.DiGraph()
  for x in nx.topological_sort(g):
    attr = g.node[x].copy()
    attr.update(color = 'red',label = g.getNode(x).name)
    provgraph.add_node(x,attr)
    nodeinfo =  g.getNode(x).task.node

    if 'used_inputs' in nodeinfo:
      for k,inputs_from_node in nodeinfo['used_inputs'].iteritems():
        for one in inputs_from_node:
          depname = 'output_{}_{}_{}'.format(k,one[0],one[1])
          provgraph.add_edge(depname,x)
    else:
      for pre in g.predecessors(x):
        provgraph.add_edge(pre,x)


    for k,v in g.getNode(x).result_of()['RECAST_metadata']['outputs'].iteritems():
      for i,y in enumerate(v):
        name = 'output_{}_{}_{}'.format(g.getNode(x).task.node['name'],k,i)
        provgraph.add_node(name,{'shape':'box','label':'{}_{}'.format(k,i),'color':'blue'})
        provgraph.add_edge(x,name)
        
  nx.write_dot(provgraph,'workflow_instance.dot')
  subprocess.call(['dot','-Tpdf','workflow_instance.dot'], stdout = open('workflow_instance.pdf','w'))
  nx.write_dot(steps_graph,'steps.dot')
  subprocess.call(['dot','-Tpdf','steps.dot'], stdout = open('steps.pdf','w'))
コード例 #9
0
ファイル: test_dag.py プロジェクト: aparamon/networkx
    def test_topological_sort2(self):
        DG = nx.DiGraph({1: [2], 2: [3], 3: [4],
                         4: [5], 5: [1], 11: [12],
                         12: [13], 13: [14], 14: [15]})
        assert_raises(nx.NetworkXUnfeasible, consume, nx.topological_sort(DG))

        assert_false(nx.is_directed_acyclic_graph(DG))

        DG.remove_edge(1, 2)
        consume(nx.topological_sort(DG))
        assert_true(nx.is_directed_acyclic_graph(DG))
コード例 #10
0
ファイル: graph.py プロジェクト: brettc/causalinfo
    def __init__(self, equations):
        # Everythings must be an equation
        self.equations = equations
        self.equations_by_name = {}

        for eq in equations:
            if not isinstance(eq, Equation):
                raise RuntimeError("Non Equation found.")

            if eq.name in equations:
                raise RuntimeError("Equations names must be unique within a graph")
            self.equations_by_name[eq.name] = eq

        # Make a network from this. The first is the full network of both
        # equations and variables (a bipartite graph). The second is just the
        # network of causal variables (the project of the bipartite graph).
        full_network = nx.DiGraph()
        causal_network = nx.DiGraph()
        for p in equations:
            for i in p.inputs:
                full_network.add_edge(i, p)
            for o in p.outputs:
                full_network.add_edge(p, o)
            for i in p.inputs:
                for o in p.outputs:
                    causal_network.add_edge(i, o)

        self.full_network = full_network
        self.causal_network = causal_network

        # Nodes are either inputs, outputs, or inner
        self.inputs = set()
        self.outputs = set()
        self.inner = set()

        for n in self.causal_network.nodes():
            preds = self.full_network.predecessors(n)
            sucs = self.full_network.successors(n)
            if not preds:
                self.inputs.add(n)
            if not sucs:
                self.outputs.add(n)
            if preds and sucs:
                self.inner.add(n)

        # Sort all nodes into topological order. This allows us to calculate
        # the probabilities across the nodes in the right order, so that the
        # inputs for each player are always calculated in time (by previous
        # equations).
        self.ordered_variables = nx.topological_sort(self.causal_network)
        self.ordered_nodes = nx.topological_sort(self.full_network)

        self.graphviz_prettify(self.full_network)
        self.graphviz_prettify(self.causal_network)
コード例 #11
0
    def add_edge(self, u, v, attr_dict=None, **attr):
        """Add an edge between u and v.

        The nodes u and v will be automatically added if they are
        not already in the graph.

        Edge attributes can be specified with keywords or by providing
        a dictionary with key/value pairs.  See examples below.

        Ensures no cycle are introduced in the graph.
        
        Parameters
        ----------
        u,v : nodes
            Nodes can be, for example, strings or numbers.
            Nodes must be hashable (and not None) Python objects.
        attr_dict : dictionary, optional (default= no attributes)
            Dictionary of edge attributes.  Key/value pairs will
            update existing data associated with the edge.
        attr : keyword arguments, optional
            Edge data (or labels or objects) can be assigned using
            keyword arguments.

        """
        # set up attribute dict
        if attr_dict is None:
            attr_dict = attr
        else:
            try:
                attr_dict.update(attr)
            except AttributeError:
                raise NetworkXError("The attr_dict argument must be a dictionary.")
        # add nodes
        if u not in self.succ:
            self.succ[u] = {}
            self.pred[u] = {}
            self.node[u] = {}
        if v not in self.succ:
            self.succ[v] = {}
            self.pred[v] = {}
            self.node[v] = {}
        # add the edge
        datadict = self.adj[u].get(v, {})
        datadict.update(attr_dict)
        self.succ[u][v] = datadict
        self.pred[v][u] = datadict

        # make sure it's a DAG
        try:
            networkx.topological_sort(self)
        except networkx.NetworkXUnfeasible:
            self.remove_edge(u, v)
            raise networkx.NetworkXUnfeasible("Graph contains a cycle! DAG is acyclic!")
コード例 #12
0
ファイル: test_dag.py プロジェクト: datachomper/googleants
    def test_topological_sort1(self):
        DG=nx.DiGraph()
        DG.add_edges_from([(1,2),(1,3),(2,3)])
        assert_equal(nx.topological_sort(DG),[1, 2, 3])
        assert_equal(nx.topological_sort_recursive(DG),[1, 2, 3])

        DG.add_edge(3,2)
        assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
        assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
        
        DG.remove_edge(2,3)
        assert_equal(nx.topological_sort(DG),[1, 3, 2])
        assert_equal(nx.topological_sort_recursive(DG),[1, 3, 2])
コード例 #13
0
ファイル: storegraph.py プロジェクト: nicolasmarti/pythonlibs
    def __delitem__(self, key):

        #print "__delitem__(" + str(key) + ")__"

        #a special case: self
        if key in ["self", "key", "value", "col", "row"]:
            raise KeyError
        
        # if we do not have the key, then we leave
        if key not in self.state:
            raise KeyError
        
        # we remove the key
        #self.G.remove_node(key)
        del self.formulas[key]
        del self.values[key]
        #del self.mode[key]
        #del self.state[key]

        # we call the callbacks
        for i in self.callbacks:
            try:
                i("delete", key)
            except Exception as e:
                print "callback delete " + key
                print "callback :=" + str(i)
                print "error: " + str(e)
                pass

        for i in self.named_callbacks:
            try:
                self.__getitem__(i)(self, "delete", key)
            except Exception as e:
                print "callback delete " + key
                print "callback :=" + str(i)
                print "error: " + str(e)
                pass

        # we mark all successor as dirty
        for i in nx.topological_sort(self.G, [key]):
            if i <> key:
                #print str(key) + " -> " + str(i)
                self.state[i] = 0
                self.update(i)

        # if there is not successor, we remove the node, and mode and state
        if nx.topological_sort(self.G, [key]) == [key]:
            self.G.remove_node(key)
            del self.mode[key]
            del self.state[key]
コード例 #14
0
ファイル: graph.py プロジェクト: sashatarg/treeano
 def _nodes(self, order=None):
     """
     returns all nodes in the graph
     """
     if order is None:
         node_names = self.name_to_node.keys()
     elif order == "architecture":
         node_names = nx.topological_sort(self.architectural_tree)
     elif order == "computation":
         node_names = nx.topological_sort(self.computation_graph)
     else:
         raise ValueError("Unknown order: %s" % order)
     # make sure that all of the original nodes are returned
     assert set(self.name_to_node.keys()) == set(node_names)
     return [self.name_to_node[name] for name in node_names]
コード例 #15
0
ファイル: topo.py プロジェクト: Ankuratgithub/eden
def order_tables(dbdir_str):
    """
    Sorts tables in constraint order, primary tables first

    Constructs a constraint graph for the tables in the supplied directory,
    sorts it in topological order, and returns the table names in that
    order in a list.
    """

    graph = nx.DiGraph()

    for file_str in os.listdir(dbdir_str):
        if file_str.endswith(".table"):
            add_table_to_graph(dbdir_str, file_str, graph)

    ordered_tables = None
    try:
        ordered_tables = nx.topological_sort(graph)
    except nx.NetworkXUnfeasible:
        print >> sys.stderr, "Tables and their constraints do not form a DAG"
        # The NetworkX package does not include a function that finds cycles
        # in a directed graph.  Lacking that, report the undirected cycles.
        # Since our tables currently contain no cycles other than self-loops,
        # no need to implement a directed cycle finder.
        cycles = nx.cycle_basis(nx.Graph(graph))
        print >> sys.stderr, "Found possible cycles:"
        print >> sys.stderr, cycles
        sys.exit(1)

    return ordered_tables
コード例 #16
0
ファイル: dag_util.py プロジェクト: xiaohan2012/lst
def binarize_dag(g,
                 vertex_weight_key,
                 edge_weight_key,
                 dummy_node_name_prefix='d_'):
    g = g.copy()  # be functional
    dummy_node_counter = 1
    for u in nx.topological_sort(g):
        nbs = g.neighbors(u)
        while len(nbs) > 2:
            for p_1, p_2 in chunks(nbs, 2):
                v = "{}{}".format(
                    dummy_node_name_prefix,
                    dummy_node_counter
                )
                g.add_node(v)
                g.node[v]['dummy'] = True
                
                g.add_edge(u, v)
                g.add_edges_from([(v, p_1),
                                  (v, p_2)])

                g.node[v][vertex_weight_key] = 0
                g[v][p_1][edge_weight_key] = g[u][p_1][edge_weight_key]
                g[v][p_2][edge_weight_key] = g[u][p_2][edge_weight_key]
                g[u][v][edge_weight_key] = 0
                
                g.remove_edges_from([(u, p_1), (u, p_2)])
                dummy_node_counter += 1
            nbs = g.neighbors(u)
    return g
コード例 #17
0
def get_v_to_subtree_probs(nstates, G_dag, edge_to_P, leaf_to_state_index):
    """
    Get partial likelihoods on an arbitrarily rooted tree.
    Use a Felsenstein-like algorithm
    to compute subtree probs given a subtree root state.
    This depends on the rooted tree structure, the edge_to_P map,
    and the states of the alignment column at the leaves.
    @param nstates: number of states
    @param G_dag: arbitrarily rooted genetic history with branch lengths
    @param edge_to_P: (a, b) to transition matrix
    @param leaf_to_state_index: alignment column information
    @return: map from a vertex to a vector of state-conditioned subtree probs
    """
    v_to_subtree_probs = {}
    for a in reversed(nx.topological_sort(G_dag)):
        successors = G_dag.successors(a)
        if successors:
            subtree_probs = np.ones(nstates, dtype=float)
            for state_index in range(nstates):
                for b in successors:
                    P = edge_to_P[a, b]
                    p = np.dot(P[state_index], v_to_subtree_probs[b])
                    subtree_probs[state_index] *= p
        else:
            state_index = leaf_to_state_index[a]
            subtree_probs = np.zeros(nstates, dtype=float)
            subtree_probs[state_index] = 1
        v_to_subtree_probs[a] = subtree_probs
    return v_to_subtree_probs
コード例 #18
0
ファイル: ffnet.py プロジェクト: pombreda/aichallenge-1
def _dconec(conec, inno):
    """
    Return positions of edges (in conec) of graphs for
    derivative calculation, all packed in one list (dconecno). Additionaly
    beginings of each graph in this list is returned (dconecmk)
    """
    dgraphs = []; dconecno = []; dconecmk = [0]
    for idx, i in enumerate(inno):
        dgraph = NX.DiGraph()
        dgraph.add_edges_from(conec)
        dgraph = _dependency(dgraph, i) 
        dsnodes = NX.topological_sort(dgraph)
        for dnode in dsnodes:
            try:  # This is for networkx-0.3x
                for dedge in dgraph.in_edges(dnode):
                    idx = conec.index(dedge) + 1
                    dconecno.append(idx)
            except:  # This is for new networkx-0.99
                for dedge in (dgraph.reverse(copy=True)).edges(dnode):
                    dedge = (dedge[1], dedge[0]) #!!!
                    idx = conec.index(dedge) + 1
                    dconecno.append(idx)
        dgraphs.append(dgraph)
        dconecmk.append(len(dconecno))
    return dgraphs, dconecno, dconecmk
コード例 #19
0
ファイル: utils.py プロジェクト: chaselgrove/nipype
def get_levels(G):
    levels = {}
    for n in nx.topological_sort(G):
        levels[n] = 0
        for pred in G.predecessors_iter(n):
            levels[n] = max(levels[n], levels[pred]+1)
    return levels
コード例 #20
0
ファイル: utils.py プロジェクト: JohnGriffiths/nipype
def _iterable_nodes(graph_in):
    """Returns the iterable nodes in the given graph and their join
    dependencies.

    The nodes are ordered as follows:

    - nodes without an itersource precede nodes with an itersource
    - nodes without an itersource are sorted in reverse topological order
    - nodes with an itersource are sorted in topological order

    This order implies the following:

    - every iterable node without an itersource is expanded before any
      node with an itersource

    - every iterable node without an itersource is expanded before any
      of it's predecessor iterable nodes without an itersource

    - every node with an itersource is expanded before any of it's
      successor nodes with an itersource

    Return the iterable nodes list
    """
    nodes = nx.topological_sort(graph_in)
    inodes = [node for node in nodes if node.iterables is not None]
    inodes_no_src = [node for node in inodes if not node.itersource]
    inodes_src = [node for node in inodes if node.itersource]
    inodes_no_src.reverse()
    return inodes_no_src + inodes_src
コード例 #21
0
ファイル: ffnet.py プロジェクト: pombreda/aichallenge-1
def _ffconec(conec):
    """
    Checks if conec is acyclic, sorts it if necessary and returns tuple:
    (conec, inno, hidno, outno) where:
    conec - sorted input connectivity
    inno/hidno/outno  - lists of input/hidden/ouput units
    """
    if len(conec) == 0: raise ValueError("Empty connectivity list")
    graph = NX.DiGraph()
    graph.add_edges_from(conec)
    snodes = NX.topological_sort(graph)
    if not snodes:
        raise TypeError("Network has cycles.")
    else:
        conec = []; inno = []; hidno = []; outno = []
        for node in snodes:
            try:  # This is for networkx-0.3x
                ins = graph.in_edges(node)
                outs = graph.out_edges(node)
            except:  # This is for new networkx-0.99
                ins = (graph.reverse(copy=True)).edges(node)
                ins = [(v,u) for (u,v) in ins] # reversing back!
                outs = graph.edges(node)
            if not ins and node != 0 :  # biases handling
                inno += [node]
            else:
                conec += ins   #Maybe + [(0,node)] i.e. bias
                if not outs: outno += [node]
                else: 
                    if node != 0: hidno += [node] #bias handling again
    return graph, conec, inno, hidno, outno
コード例 #22
0
ファイル: ffnet.py プロジェクト: pombreda/aichallenge-1
def _bconec(conec, inno):
    """
    Returns positions of edges of reversed graph in conec (for backprop). 
    Conec is assumed to be acyclic.
    """
    bgraph = NX.DiGraph()
    bgraph.add_edges_from(conec)
    bgraph = bgraph.reverse()
    try:  # This is for networkx-0.3x
        bgraph.delete_nodes_from(inno)
        try: bgraph.delete_node(0) #handling biases
        except: pass
    except:  # This is for networkx >= 0.99
        bgraph.remove_nodes_from(inno)
        try: bgraph.remove_node(0) #handling biases
        except: pass
    bsnodes = NX.topological_sort(bgraph)
    bconecno = []
    for bnode in bsnodes:
        try:  # This is for networkx-0.3x
            for bedge in bgraph.in_edges(bnode):
                edge = (bedge[1], bedge[0])
                idx = conec.index(edge) + 1
                bconecno.append(idx)
        except: # This is for new networkx-0.99
            for bedge in (bgraph.reverse(copy=True)).edges(bnode):
                edge = bedge #(bedge[1], bedge[0])
                idx = conec.index(edge) + 1
                bconecno.append(idx)
    return bgraph, bconecno
コード例 #23
0
    def test_bidirectional(self):
        """
         m1 <-> m2 -> m3

        """


        self.g.add_edge('m1','m2')
        self.g.add_edge('m2','m3')
        self.g.add_edge('m3','m2')
        self.g.add_edge('m3','m4')

        # remove any models that done have links
        #for

        # determine cycles
        cycles = n.recursive_simple_cycles(self.g)
        for cycle in cycles:
            # remove edges that form cycles
            self.g.remove_edge(cycle[0],cycle[1])

        # perform toposort
        order = n.topological_sort(self.g)

        # re-add bidirectional dependencies (i.e. cycles)
        for cycle in cycles:
            # find index of inverse link
            for i in xrange(0,len(order)-1):
                if order[i] == cycle[1] and order[i+1] == cycle[0]:
                    order.insert(i+2, cycle[1])
                    order.insert(i+3,cycle[0])
                    break

        self.assertTrue(''.join(order) == 'm1m2m3m2m3m4')
コード例 #24
0
ファイル: simulator.py プロジェクト: chairmanmeow50/nengo
    def __init__(self, model, dt=0.001, seed=None, builder=None):
        if builder is None:
            # By default, we'll use builder.Builder and copy the model.
            builder = Builder(copy=True)

        # Call the builder to build the model
        self.model = builder(model, dt)

        # Note: seed is not used right now, but one day...
        if seed is None:
            seed = self.model._get_new_seed()  # generate simulator seed

        # -- map from Signal.base -> ndarray
        self._sigdict = SignalDict(__time__=np.asarray(0.0, dtype=np.float64))
        for op in self.model.operators:
            op.init_sigdict(self._sigdict, self.model.dt)

        self.dg = self._init_dg()
        self._step_order = [node
                            for node in nx.topological_sort(self.dg)
                            if hasattr(node, 'make_step')]
        self._steps = [node.make_step(self._sigdict, self.model.dt)
                       for node in self._step_order]

        self.n_steps = 0
        self.probe_outputs = dict((probe, []) for probe in self.model.probes)
コード例 #25
0
ファイル: test_dag.py プロジェクト: aparamon/networkx
    def test_topological_sort3(self):
        DG = nx.DiGraph()
        DG.add_edges_from([(1, i) for i in range(2, 5)])
        DG.add_edges_from([(2, i) for i in range(5, 9)])
        DG.add_edges_from([(6, i) for i in range(9, 12)])
        DG.add_edges_from([(4, i) for i in range(12, 15)])

        def validate(order):
            ok_(isinstance(order, list))
            assert_equal(set(order), set(DG))
            for u, v in combinations(order, 2):
                assert_false(nx.has_path(DG, v, u))
        validate(list(nx.topological_sort(DG)))

        DG.add_edge(14, 1)
        assert_raises(nx.NetworkXUnfeasible, consume, nx.topological_sort(DG))
コード例 #26
0
ファイル: cli.py プロジェクト: bioconda/bioconda-utils
def dag(recipe_folder, config, packages="*", format='gml', hide_singletons=False):
    """
    Export the DAG of packages to a graph format file for visualization
    """
    dag, name2recipes = graph.build(utils.get_recipes(recipe_folder, "*"), config)
    if packages != "*":
        dag = graph.filter(dag, packages)
    if hide_singletons:
        for node in nx.nodes(dag):
            if dag.degree(node) == 0:
                dag.remove_node(node)
    if format == 'gml':
        nx.write_gml(dag, sys.stdout.buffer)
    elif format == 'dot':
        write_dot(dag, sys.stdout)
    elif format == 'txt':
        subdags = sorted(map(sorted, nx.connected_components(dag.to_undirected())))
        subdags = sorted(subdags, key=len, reverse=True)
        singletons = []
        for i, s in enumerate(subdags):
            if len(s) == 1:
                singletons += s
                continue
            print("# subdag {0}".format(i))
            subdag = dag.subgraph(s)
            recipes = [
                recipe for package in nx.topological_sort(subdag)
                for recipe in name2recipes[package]]
            print('\n'.join(recipes) + '\n')
        if not hide_singletons:
            print('# singletons')
            recipes = [recipe for package in singletons for recipe in
                       name2recipes[package]]
            print('\n'.join(recipes) + '\n')
コード例 #27
0
ファイル: merge.py プロジェクト: fay19/gene_ontology
	def gotermSummarization(self, P_geneList, P_value, P_minProteinNo):
	#use P_GotermNumber to summarizate a given list of proteins
	#Topologically sort at first

		self.node_Mapping_ProteinList_PV2(P_geneList)
		self.symplify_Goterm_Structure()
		
		#Topologically sort all node, the first element is a leaf and the last element is the root
		New_Top_sort_nodes = networkx.topological_sort(self)

		#go through all nodes in topological order from leaves to the root.
		for nodeTp_child in New_Top_sort_nodes:
			nodePV = self.node[nodeTp_child][self.PV]
			nodeSize = len(self.node[nodeTp_child][self.mapping])

			#if the current node's pv or size is not constrained by the setting conditions, merge this node to the most close parent node.
			if nodePV>P_value or nodeSize<P_minProteinNo:
				L_parrents = self.successors(nodeTp_child)
				L_MinWeight = 100000; L_MinPa = 'none'
				for nodetp in L_parrents:
					if self.edge[nodeTp_child][nodetp]['weight']<L_MinWeight:
						L_MinWeight = self.edge[nodeTp_child][nodetp]['weight']
						L_MinPa = nodetp
				if L_MinPa == 'none':
					pass
				else:
					self.merge_nodes2(nodeTp_child,L_MinPa)

		return self.printResult()
コード例 #28
0
def submit_jobs(view, G, jobs):
    """Submit jobs via client where G describes the time dependencies."""
    results = {}
    for node in nx.topological_sort(G):
        with view.temp_flags(after=[ results[n] for n in G.predecessors(node) ]):
            results[node] = view.apply(jobs[node])
    return results
コード例 #29
0
def _node_iter(G, g_inputs):
    """
    Run a graph

    G : nx.DiGraph
       Nodes are tools, edges contain attribute 'link' which
       is a list of A -> B pairs for connecting the inputs
       and outputs

    g_inputs : dict
       Global inputs to tools.  Keyed on tool
       instances, values are dicts keyed on name
       of input values
    """
    # sort the tools by topological order so all needed inputs are
    # available
    for job in nx.topological_sort(G):
        # get input edges
        input_edges = G.in_edges(job)
        in_link_info = dict()
        # construct dict of useful information
        for parent, child in input_edges:
            print(parent, child)
            in_link_info[parent] = G[parent][child]['links']
        # get output edges
        output_edges = G.out_edges(job)
        out_link_info = dict()
        # construct dict of useful information
        for parent, child in output_edges:
            out_link_info[child] = G[parent][child]['links']
        # pull the tool args out of the global dict
        args = g_inputs[job]

        # push work off to helper function
        yield (job, in_link_info, out_link_info, args)
コード例 #30
0
ファイル: base.py プロジェクト: mvnnn/tardis
    def _resolve_update_list(self, changed_properties):
        """
        Returns a list of all plasma models which are affected by the
        changed_modules due to there dependency in the
        the plasma_graph.

        Parameters
        ----------

        changed_modules: ~list
            all modules changed in the plasma

        Returns
        -------

            : ~list
            all affected modules.
        """

        descendants_ob = []

        for plasma_property in changed_properties:
            node_name = self.outputs_dict[plasma_property].name
            descendants_ob += nx.descendants(self.graph, node_name)

        descendants_ob = list(set(descendants_ob))
        sort_order = nx.topological_sort(self.graph)

        descendants_ob.sort(key=lambda val: sort_order.index(val))

        logger.debug("Updating modules in the following order:".format("->".join(descendants_ob)))

        return descendants_ob
コード例 #31
0
def compute_builds(path,
                   base_name,
                   git_rev=None,
                   stop_rev=None,
                   folders=None,
                   matrix_base_dir=None,
                   steps=0,
                   max_downstream=5,
                   test=False,
                   public=True,
                   output_dir='../output',
                   output_folder_label='git',
                   config_overrides=None,
                   **kw):
    if not git_rev and not folders:
        raise ValueError(
            "Either git_rev or folders list are required to know what to compute"
        )
    checkout_rev = stop_rev or git_rev
    folders = folders
    path = path.replace('"', '')
    if not folders:
        folders = git_changed_recipes(git_rev, stop_rev, git_root=path)
    if not folders:
        print(
            "No folders specified to build, and nothing changed in git.  Exiting."
        )
        return
    matrix_base_dir = matrix_base_dir or path
    # clean up quoting from concourse template evaluation
    matrix_base_dir = matrix_base_dir.replace('"', '')

    repo_commit = ''
    git_identifier = ''
    if checkout_rev:
        with checkout_git_rev(checkout_rev, path):
            git_identifier = _get_current_git_rev(path)
            task_graph = collect_tasks(path,
                                       folders=folders,
                                       steps=steps,
                                       max_downstream=max_downstream,
                                       test=test,
                                       matrix_base_dir=matrix_base_dir)
            try:
                repo_commit = _get_current_git_rev(path)
            except subprocess.CalledProcessError:
                repo_commit = 'master'
    else:
        task_graph = collect_tasks(path,
                                   folders=folders,
                                   steps=steps,
                                   max_downstream=max_downstream,
                                   test=test,
                                   matrix_base_dir=matrix_base_dir)

    with open(os.path.join(matrix_base_dir, 'config.yml')) as src:
        data = yaml.load(src)
    data['recipe-repo-commit'] = repo_commit

    if config_overrides:
        data.update(config_overrides)

    plan = graph_to_plan_with_jobs(os.path.abspath(path),
                                   task_graph,
                                   commit_id=repo_commit,
                                   matrix_base_dir=matrix_base_dir,
                                   config_vars=data,
                                   public=public)

    output_dir = output_dir.format(base_name=base_name,
                                   git_identifier=git_identifier)

    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    with open(os.path.join(output_dir, 'plan.yml'), 'w') as f:
        yaml.dump(plan, f, default_flow_style=False)

    # expand folders to include any dependency builds or tests
    if not os.path.isabs(path):
        path = os.path.normpath(os.path.join(os.getcwd(), path))
    for fn in glob.glob(os.path.join(output_dir, 'output_order*')):
        os.remove(fn)
    last_recipe_dir = None
    for node in nx.topological_sort(task_graph, reverse=True):
        meta = task_graph.node[node]['meta']
        if meta.meta_path:
            recipe = os.path.dirname(meta.meta_path)
        else:
            recipe = meta.get('extra', {}).get('parent_recipe', {})
        assert recipe, ("no parent recipe set, and no path associated "
                        "with this metadata")
        # make recipe path relative
        recipe = recipe.replace(path + '/', '')
        # copy base recipe into a folder named for this node
        out_folder = os.path.join(output_dir, node)
        if os.path.isdir(out_folder):
            shutil.rmtree(out_folder)
        shutil.copytree(os.path.join(path, recipe), out_folder)
        # write the conda_build_config.yml for this particular metadata into that recipe
        #   This should sit alongside meta.yaml, where conda-build will be able to find it
        with open(os.path.join(out_folder, 'conda_build_config.yaml'),
                  'w') as f:
            yaml.dump(meta.config.squished_variants,
                      f,
                      default_flow_style=False)
        order_fn = 'output_order_' + task_graph.node[node]['worker']['label']
        with open(os.path.join(output_dir, order_fn), 'a') as f:
            f.write(node + '\n')
        recipe_dir = os.path.dirname(recipe) if os.sep in recipe else recipe
        if not last_recipe_dir or last_recipe_dir != recipe_dir:
            order_recipes_fn = 'output_order_recipes_' + task_graph.node[node][
                'worker']['label']
            with open(os.path.join(output_dir, order_recipes_fn), 'a') as f:
                f.write(recipe_dir + '\n')
            last_recipe_dir = recipe_dir
コード例 #32
0
 def get_schedule(self, simulation):
     schedule = {host: [] for host in simulation.hosts}
     graph = simulation.get_task_graph()
     for task in networkx.topological_sort(graph):
         schedule[random.choice(simulation.hosts)].append(task)
     return schedule
コード例 #33
0
ファイル: workflows.py プロジェクト: Raniac/NEURO-LEARN
 def _generate_flatgraph(self):
     """Generate a graph containing only Nodes or MapNodes
     """
     import networkx as nx
     logger.debug('expanding workflow: %s', self)
     nodes2remove = []
     if not nx.is_directed_acyclic_graph(self._graph):
         raise Exception(('Workflow: %s is not a directed acyclic graph '
                          '(DAG)') % self.name)
     nodes = list(nx.topological_sort(self._graph))
     for node in nodes:
         logger.debug('processing node: %s', node)
         if isinstance(node, Workflow):
             nodes2remove.append(node)
             # use in_edges instead of in_edges_iter to allow
             # disconnections to take place properly. otherwise, the
             # edge dict is modified.
             # dj: added list() for networkx ver.2
             for u, _, d in list(
                     self._graph.in_edges(nbunch=node, data=True)):
                 logger.debug('in: connections-> %s', to_str(d['connect']))
                 for cd in deepcopy(d['connect']):
                     logger.debug("in: %s", to_str(cd))
                     dstnode = node._get_parameter_node(cd[1], subtype='in')
                     srcnode = u
                     srcout = cd[0]
                     dstin = cd[1].split('.')[-1]
                     logger.debug('in edges: %s %s %s %s', srcnode, srcout,
                                  dstnode, dstin)
                     self.disconnect(u, cd[0], node, cd[1])
                     self.connect(srcnode, srcout, dstnode, dstin)
             # do not use out_edges_iter for reasons stated in in_edges
             # dj: for ver 2 use list(out_edges)
             for _, v, d in list(
                     self._graph.out_edges(nbunch=node, data=True)):
                 logger.debug('out: connections-> %s', to_str(d['connect']))
                 for cd in deepcopy(d['connect']):
                     logger.debug("out: %s", to_str(cd))
                     dstnode = v
                     if isinstance(cd[0], tuple):
                         parameter = cd[0][0]
                     else:
                         parameter = cd[0]
                     srcnode = node._get_parameter_node(parameter,
                                                        subtype='out')
                     if isinstance(cd[0], tuple):
                         srcout = list(cd[0])
                         srcout[0] = parameter.split('.')[-1]
                         srcout = tuple(srcout)
                     else:
                         srcout = parameter.split('.')[-1]
                     dstin = cd[1]
                     logger.debug('out edges: %s %s %s %s', srcnode, srcout,
                                  dstnode, dstin)
                     self.disconnect(node, cd[0], v, cd[1])
                     self.connect(srcnode, srcout, dstnode, dstin)
             # expand the workflow node
             # logger.debug('expanding workflow: %s', node)
             node._generate_flatgraph()
             for innernode in node._graph.nodes():
                 innernode._hierarchy = '.'.join(
                     (self.name, innernode._hierarchy))
             self._graph.add_nodes_from(node._graph.nodes())
             self._graph.add_edges_from(node._graph.edges(data=True))
     if nodes2remove:
         self._graph.remove_nodes_from(nodes2remove)
     logger.debug('finished expanding workflow: %s', self)
コード例 #34
0
 def get_target_topic(self):
     return nx.topological_sort(self.graph)[-1]
コード例 #35
0
    def set_max_size(g, max_size):
        unary_graph = nx.DiGraph()
        for x in g.nonterminals:
            unary_graph.add_node(x)
        for lhs, rules in g.by_lhs.items():
            for rule in rules:
                if len(rule.cfg_rhs
                       ) == 1 and rule.cfg_rhs[0] in g.nonterminals:
                    unary_graph.add_edge(rule.lhs,
                                         rule.cfg_rhs[0],
                                         weight=rule.prob)

        try:
            topological = nx.topological_sort(nx.DiGraph(unary_graph),
                                              reverse=True)
            unary_matrix = None
        except nx.NetworkXUnfeasible:
            topological = list(g.nonterminals)
            # unary_matrix[i][j] == 1 means there is a rule i->j
            unary_matrix = np.array(
                nx.to_numpy_matrix(unary_graph, topological))
            # find infinite summation over chains of unary rules (at least one long)
            try:
                unary_matrix = np.dot(
                    unary_matrix,
                    np.array(
                        np.linalg.inv(np.eye(len(topological)) -
                                      unary_matrix)))
            except np.linalg.LinAlgError as e:
                raise np.linalg.LinAlgError(
                    e.message + " (cycle of unary rules with weight >= 1)")

        nt_to_index = {x: i for i, x in enumerate(topological)}

        alpha = np.empty((len(topological), max_size + 1))
        alpha.fill(-np.inf)
        for size in range(1, max_size + 1):
            for lhs_i, lhs in enumerate(topological):
                for rule in g.by_lhs[lhs]:
                    if unary_matrix is not None:
                        # we'll do unary rules later
                        if len(rule.cfg_rhs
                               ) == 1 and rule.cfg_rhs[0] in g.nonterminals:
                            continue

                    nts = [
                        nt_to_index[x] for x in rule.cfg_rhs
                        if x in g.nonterminals
                    ]
                    n = size - (len(rule.cfg_rhs) - len(nts)
                                )  # total size available for nonterminals

                    if len(nts) == 0:
                        if n != 0:
                            continue
                        p = 0.

                    elif len(nts) == 1:
                        p = alpha[nts[0], n]

                    elif len(nts) == 2:
                        if n < 2:
                            continue

                        p = np.logaddexp.reduce([
                            alpha[nts[0], k] + alpha[nts[1], n - k]
                            for k in g.splits(n)
                        ])

                    else:
                        raise ValueError("more than two nonterminals in rhs")

                    with np.errstate(invalid='ignore'):
                        alpha[lhs_i,
                              size] = np.logaddexp(alpha[lhs_i, size],
                                                   np.log(rule.prob) + p)
            # Apply unary rules
            # If we weren't in log-space, this would be:
            # alpha[:,size] = unary_matrix * alpha[:,size]
            if unary_matrix is not None:
                lz = np.max(alpha[:, size])
                # the reason we made unary_matrix be 1+ applications of unary
                # rules is just in case of underflow here
                alpha[:, size] = np.logaddexp(
                    alpha[:, size],
                    np.log(np.dot(unary_matrix, np.exp(alpha[:, size] - lz))) +
                    lz)
        g.alpha = alpha
        g.topological = topological
コード例 #36
0
def generate_reference(name, chain):
    """Generates a simple, unoptimized SDFG to run on the CPU, for verification
       purposes."""

    sdfg = SDFG(name)

    for k, v in chain.constants.items():
        sdfg.add_constant(k, v["value"], dace.data.Scalar(v["data_type"]))

    (dimensions_to_skip, shape, vector_length, parameters, iterators,
     memcopy_indices, memcopy_accesses) = _generate_init(chain)

    prev_state = sdfg.add_state("init")

    # Throw vectorization in the bin for the reference code
    vector_length = 1

    shape = tuple(map(int, shape))

    input_shapes = {}  # Maps inputs to their shape tuple

    for node in chain.graph.nodes():
        if isinstance(node, Input) or isinstance(node, Output):
            if isinstance(node, Input):
                for output in node.outputs.values():
                    pars = tuple(
                        output["input_dims"]
                    ) if "input_dims" in output and output[
                        "input_dims"] is not None else tuple(parameters)
                    arr_shape = tuple(s for s, p in zip(shape, parameters)
                                      if p in pars)
                    input_shapes[node.name] = arr_shape
                    break
                else:
                    raise ValueError("No outputs found for input node.")
            else:
                arr_shape = shape
            if len(arr_shape) > 0:
                try:
                    sdfg.add_array(node.name, arr_shape, node.data_type)
                except NameError:
                    sdfg.data(
                        node.name).access = dace.dtypes.AccessType.ReadWrite
            else:
                sdfg.add_symbol(node.name, node.data_type)

    for link in chain.graph.edges(data=True):
        name = link[0].name
        if name not in sdfg.arrays and name not in sdfg.symbols:
            sdfg.add_array(name, shape, link[0].data_type, transient=True)
            input_shapes[name] = tuple(shape)

    input_iterators = {
        k: tuple("0:{}".format(s) for s in v)
        for k, v in input_shapes.items()
    }

    # Enforce dependencies via topological sort
    for node in nx.topological_sort(chain.graph):

        if not isinstance(node, Kernel):
            continue

        state = sdfg.add_state(node.name)
        sdfg.add_edge(prev_state, state, dace.InterstateEdge())

        (stencil_node, input_to_connector,
         output_to_connector) = _generate_stencil(node, chain, shape,
                                                  dimensions_to_skip)
        stencil_node.implementation = "CPU"

        for field, connector in input_to_connector.items():

            if len(input_iterators[field]) == 0:
                continue  # Scalar variable

            # Outer memory read
            read_node = state.add_read(field)
            state.add_memlet_path(read_node,
                                  stencil_node,
                                  dst_conn=connector,
                                  memlet=Memlet.simple(
                                      field,
                                      ", ".join(input_iterators[field])))

        for _, connector in output_to_connector.items():

            # Outer write
            write_node = state.add_write(node.name)
            state.add_memlet_path(stencil_node,
                                  write_node,
                                  src_conn=connector,
                                  memlet=Memlet.simple(
                                      node.name, ", ".join("0:{}".format(s)
                                                           for s in shape)))

        prev_state = state

    return sdfg
コード例 #37
0
ファイル: graph.py プロジェクト: pythseq/MIX
def maximal_independant_longest_path_for_acyclic_graph(acyclic_graph,inv_mapping={}):
	# Compute longest-path-acyclic 
	g_dir_trans_io = nx.DiGraph(acyclic_graph)


	# After the cycle reduction, some nodes might have disappeared : 
	# This can happen in cases where we had a SCC but without inputs. Once this SCC is removed, some nodes might not have predecessors anymore 
	# We thus connect these nodes without pred back to the In node

	for inp in [x for x,v in g_dir_trans_io.node.items() if ("In" in v) and (x!="In")]+[x for x,v in g_dir_trans_io.in_degree().items() if (v==0) and (x!="In")]:
		g_dir_trans_io.add_edge("In",inp,attr_dict={"weight":0})

	for inp in [x for x,v in g_dir_trans_io.node.items() if ("Out" in v) and (x!="Out")]+[x for x,v in g_dir_trans_io.out_degree().items() if (v==0) and (x!="Out")]:
		g_dir_trans_io.add_edge(inp,"Out",attr_dict={"weight":0})

	nodes_in_order = nx.topological_sort(g_dir_trans_io)
	# The sort should start and end with resp. In and Out
	assert(nodes_in_order[0]=="In")
	assert(nodes_in_order[-1]=="Out")

	# Update each node attributes with the accumulated length from the beginning 
	# Q? Should we store each path separately or a greedy alg is ok? trying greedy 
	# Q? How to deal with mulitple equivalent paths ?

	for n in g_dir_trans_io:
		g_dir_trans_io.node[n]['in_longest']="False"
	for src,tgt in g_dir_trans_io.edges():
		g_dir_trans_io[src][tgt]["in_longest"]="False"

	all_longest_path_trans_io = []
	nodes_in_any_longest_path = set()

	inputs_nodes = [k for k,v in g_dir_trans_io.in_degree().items() if v==0]
	inputs_nodes.extend([x for x,v in g_dir_trans_io.node.items() if "In" in v])
	outputs_nodes = [k for k,v in g_dir_trans_io.out_degree().items() if v==0]
	outputs_nodes.extend([x for x,v in g_dir_trans_io.node.items() if "Out" in v])



	# Todo: Account for contig ID in longest path
	for long_path_index in range(0,2000):
		# Clear all longest path info 
		print "longest path iteration",long_path_index
		# print nodes_in_any_longest_path

		for n in g_dir_trans_io.node.keys():
			if "longest_path" in g_dir_trans_io[n]:
				del g_dir_trans_io[n]['longest_path']

		#Init
		g_dir_trans_io.node['In']['longest_path']=[0,[]]
		for n in nodes_in_order[1:]:
			contig = g_dir_trans_io.node[n].get("contig",n)
			if contig in nodes_in_any_longest_path:
				continue

			pred = [x for x in g_dir_trans_io.predecessors(n) if g_dir_trans_io.node[x].get("contig",x) not in nodes_in_any_longest_path]

			if len(pred)==0: 
				# This can happen in cases where we had a SCC but without inputs. Once this SCC is removed, some nodes might not have predecessors anymore 
				longest_path=[0,[]]
			else:
				longest_path= [(g_dir_trans_io.node[pred[0]]['longest_path'][0] + g_dir_trans_io[pred[0]][n]['weight']),\
							pred[0]]
			for p in pred:
				if (g_dir_trans_io.node[p]['longest_path'][0]+ g_dir_trans_io[p][n]['weight']) > longest_path[0]:
					longest_path= [(g_dir_trans_io.node[p]['longest_path'][0]+ g_dir_trans_io[p][n]['weight']),p]
			g_dir_trans_io.node[n]['longest_path']=longest_path


		# Rebuild the longest path, starting from "Out" up to "In" in predecessor order 
		current_node = "Out"
		longest_path_trans_io = []
		while (current_node != "In") and current_node!=[]: 
			longest_path_trans_io.append((current_node,g_dir_trans_io.node[current_node]["longest_path"][0]))
			current_node=g_dir_trans_io.node[current_node]['longest_path'][1]
		if current_node==[]:
			break
		longest_path_trans_io.append((current_node,g_dir_trans_io.node[current_node]["longest_path"][0]))
		longest_path_trans_io.reverse()

		print longest_path_trans_io
		print [g_dir_trans_io.node[x[0]].get("contig",x[0]) for x in longest_path_trans_io]
		if len(longest_path_trans_io)<=2: # only in and out?
			break
		all_longest_path_trans_io.append(longest_path_trans_io)

		# Mark this path in the graph

		g_dir_trans_io.node['Out']['in_longest']="True"


		for i in range(0, len(longest_path_trans_io)-1):
			n = longest_path_trans_io[i][0]
			g_dir_trans_io.node[n]['in_longest']=long_path_index
			g_dir_trans_io[longest_path_trans_io[i][0]][longest_path_trans_io[i+1][0]]['in_longest']=long_path_index
			# debug_here()
			if (n not in ["In","Out"]):
				nodes_in_any_longest_path.add(g_dir_trans_io.node[n]['contig'])
	# Trasnform the longest path into the original graph IDs
	original_longest_paths=[]
	for a_lp in all_longest_path_trans_io:
		lp = [inv_mapping.get(x[0].split("@")[0],x[0].split("@")[0]) for x in a_lp]
		original_longest_paths.append(lp)
	return original_longest_paths
コード例 #38
0
ファイル: conll.py プロジェクト: vyraun/xtreme
    def _keep_fused_form(self, posPreferenceDicts):
        # For a span A,B  and external tokens C, such as  A > B > C, we have to
        # Make A the head of the span
        # Attach C-level tokens to A
        #Remove B-level tokens, which are the subtokens of the fused form della: de la

        if self.graph["multi_tokens"] == {}:
            return

        spanheads = []
        spanhead_fused_token_dict = {}
        # This double iteration is overkill, one could skip the spanhead identification
        # but in this way we avoid modifying the tree as we read it
        for fusedform_idx in sorted(self.graph["multi_tokens"]):
            fusedform_start, fusedform_end = self.graph["multi_tokens"][
                fusedform_idx]["id"]
            fuseform_span = list(range(fusedform_start, fusedform_end + 1))
            spanhead = self._choose_spanhead_from_heuristics(
                fuseform_span, posPreferenceDicts)
            #if not spanhead:
            #    spanhead = self._choose_spanhead_from_heuristics(fuseform_span,posPreferenceDicts)
            spanheads.append(spanhead)
            spanhead_fused_token_dict[spanhead] = fusedform_idx

        # try:
        #     order = list(nx.topological_sort(self))
        # except nx.NetworkXUnfeasible:
        #     msg = 'Circular dependency detected between hooks'
        #     problem_graph = ', '.join(f'{a} -> {b}'
        #                   for a, b in nx.find_cycle(self))
        #     print('nx.simple_cycles', list(nx.simple_cycles(self)))
        #     print(problem_graph)
        #     exit(0)
        # for edge in list(nx.simple_cycles(self)):
        #     self.remove_edge(edge[0], edge[1])
        self = remove_all_cycle(self)
        bottom_up_order = [
            x for x in nx.topological_sort(self) if x in spanheads
        ]
        for spanhead in bottom_up_order:
            fusedform_idx = spanhead_fused_token_dict[spanhead]
            fusedform = self.graph["multi_tokens"][fusedform_idx]["form"]
            fusedform_start, fusedform_end = self.graph["multi_tokens"][
                fusedform_idx]["id"]
            fuseform_span = list(range(fusedform_start, fusedform_end + 1))

            if spanhead:
                #Step 1: Replace form of head span (A)  with fusedtoken form  -- in this way we keep the lemma and features if any
                self.nodes[spanhead]["form"] = fusedform
                # 2-  Reattach C-level (external dependents) to A
                #print(fuseform_span,spanhead)

                internal_dependents = set(fuseform_span) - set([spanhead])
                external_dependents = [
                    nx.bfs_successors(self, x) for x in internal_dependents
                ]
                for depdict in external_dependents:
                    for localhead in depdict:
                        for ext_dep in depdict[localhead]:
                            if ext_dep in self[localhead]:
                                deprel = self[localhead][ext_dep]["deprel"]
                                self.remove_edge(localhead, ext_dep)
                                self.add_edge(spanhead, ext_dep, deprel=deprel)

                #3- Remove B-level tokens
                for int_dep in internal_dependents:
                    self.remove_edge(self.head_of(int_dep), int_dep)
                    self.remove_node(int_dep)

        #4 reconstruct tree at the very end
        new_index_dict = {}
        for new_node_index, old_node_idex in enumerate(sorted(self.nodes())):
            new_index_dict[old_node_idex] = new_node_index

        T = DependencyTree()  # Transfer DiGraph, to replace self

        for n in sorted(self.nodes()):
            T.add_node(new_index_dict[n], self.nodes[n])

        for h, d in self.edges():
            T.add_edge(new_index_dict[h],
                       new_index_dict[d],
                       deprel=self[h][d]["deprel"])
        #4A Quick removal of edges and nodes
        self.__init__()

        #4B Rewriting the Deptree in Self
        # TODO There must a more elegant way to rewrite self -- self= T for instance?
        for n in sorted(T.nodes()):
            self.add_node(n, T.nodes[n])

        for h, d in T.edges():
            self.add_edge(h, d, T[h][d])

        # 5. remove all fused forms form the multi_tokens field
        self.graph["multi_tokens"] = {}
コード例 #39
0
    def apply(cls, stack, model, *args, **kwargs):
        if 'path_to_feeder_file' in kwargs:
            path_to_feeder_file = kwargs['path_to_feeder_file']

        path_to_no_feeder_file = None
        if 'path_to_no_feeder_file' in kwargs:
            path_to_no_feeder_file = kwargs['path_to_no_feeder_file']

        if 'compute_metrics' in kwargs:
            compute_metrics = kwargs['compute_metrics']
        else:
            compute_metrics = False

        if 'compute_kva_density_with_transformers' in kwargs:
            compute_kva_density_with_transformers = kwargs[
                'compute_kva_density_with_transformers']
        else:
            compute_kva_density_with_transformers = True

        if compute_metrics:
            if 'excel_output' in kwargs:
                excel_output = kwargs['excel_output']
            else:
                raise ValueError('Missing output file name for excel')

            if 'json_output' in kwargs:
                json_output = kwargs['json_output']
            else:
                raise ValueError('Missing output file name for json')

        #Open and read feeder.txt
        with open(path_to_feeder_file, 'r') as f:
            lines = f.readlines()

        #Parse feeder.txt to have the feeder structure of the network
        feeders = {}
        substations = {}
        substation_transformers = {}

        for line in lines[1:]:

            #Parse the line
            node, sub, feed, sub_trans = map(lambda x: x.strip().lower(),
                                             line.split(' '))

            #If feeder is new, then add it to the feeders dict
            if feed not in feeders:

                #Initialize with a list holding the node
                feeders[feed] = [node.lower().replace('.', '')]

            #Othewise, just append the node
            else:
                feeders[feed].append(node.lower().replace('.', ''))

            #Same thing for the substation
            if feed not in substations:
                substations[feed] = sub.lower().replace('.', '')

            #Same thing for substation_transformers
            if feed not in substation_transformers:
                substation_transformers[feed] = sub.lower().replace('.', '')

        if path_to_no_feeder_file is not None:
            with open(path_to_no_feeder_file, 'r') as f:
                lines = f.readlines()
            for line in lines[1:]:
                node, feed = map(lambda x: x.strip().lower(), line.split(' '))
                if feed != 'mv-mesh':
                    if 'subtransmission' not in feeders:
                        feeders['subtransmission'] = [
                            node.lower().replace('.', '')
                        ]
                    else:
                        feeders['subtransmission'].append(node.lower().replace(
                            '.', ''))
                    if 'subtransmission' not in substations:
                        substations['subtransmission'] = ''

        #Create a network analyzer object
        network_analyst = NetworkAnalyzer(model)

        #Add the feeder information to the network analyzer
        network_analyst.add_feeder_information(
            list(feeders.keys()), list(feeders.values()), substations,
            '')  #TODO find a way to get the feeder type

        #Split the network into feeders
        network_analyst.split_network_into_feeders()

        #Tag the objects
        network_analyst.tag_objects()

        #Set the names
        network_analyst.model.set_names()

        # Set reclosers. This algorithm finds to closest 1/3 of goabs to the feeder head (in topological order)
        # without common ancestry. i.e. no recloser should be upstream of another recloser. If this is not possible,
        # the number of reclosers is decreased

        recloser_proportion = 0.33
        all_goabs = {}
        np.random.seed(0)
        tmp_network = Network()
        tmp_network.build(network_analyst.model, 'st_mat')
        tmp_network.set_attributes(network_analyst.model)
        tmp_network.remove_open_switches(network_analyst.model)
        tmp_network.rebuild_digraph(network_analyst.model, 'st_mat')
        sorted_elements = []
        for element in nx.topological_sort(tmp_network.digraph):
            sorted_elements.append(element)
        for i in network_analyst.model.models:
            if isinstance(
                    i, Line
            ) and i.is_switch is not None and i.is_switch and i.name is not None and 'goab' in i.name.lower(
            ):
                is_open = False
                for wire in i.wires:
                    if wire.is_open:
                        is_open = True
                if is_open:
                    continue
                if hasattr(
                        i, 'feeder_name'
                ) and i.feeder_name is not None and i.feeder_name != 'subtransmission':
                    if i.feeder_name in all_goabs:
                        all_goabs[i.feeder_name].append(i.name)
                    else:
                        all_goabs[i.feeder_name] = [i.name]

        for key in list(all_goabs.keys()):
            feeder_goabs_dic = {
            }  # Topological sorting done by node. This matches goabs to their end-node
            for goab in all_goabs[key]:
                feeder_goabs_dic[
                    model[goab].
                    to_element] = goab  # shouldn't have multiple switches ending at the same node
            feeder_goabs = []
            feeder_goab_ends = []
            for element in sorted_elements:
                if element in feeder_goabs_dic:
                    feeder_goabs.append(feeder_goabs_dic[element])
                    feeder_goab_ends.append(element)
            connectivity_matrix = [[False for i in range(len(feeder_goabs))]
                                   for j in range(len(feeder_goabs))]
            for i in range(len(feeder_goabs)):
                recloser1 = feeder_goab_ends[i]
                for j in range(i + 1, len(feeder_goabs)):
                    recloser2 = feeder_goab_ends[j]
                    if recloser2 == recloser1:
                        continue
                    connected = nx.has_path(tmp_network.digraph, recloser1,
                                            recloser2)
                    connectivity_matrix[i][j] = connected
                    if connected:
                        connectivity_matrix[j][i] = connected

            selected_goabs = []
            num_goabs = int(len(feeder_goabs) * float(recloser_proportion))
            finished = False
            if num_goabs == 0:
                finished = True
            while not finished:
                for i in range(len(feeder_goabs)):
                    current_set = set([i])
                    for j in range(i + 1, len(feeder_goabs)):
                        skip_this_one = False
                        for k in current_set:
                            if connectivity_matrix[j][
                                    k]:  #i.e. see if the candidate node has common anything upstream or downstream
                                skip_this_one = True
                                break
                        if skip_this_one:
                            continue
                        current_set.add(j)
                        if len(current_set) == num_goabs:
                            break
                    if len(current_set) == num_goabs:
                        finished = True
                        for k in current_set:
                            selected_goabs.append(feeder_goabs[k])
                        break
                if not finished:
                    num_goabs -= 1

            #selected_goabs = np.random.choice(feeder_goabs,int(len(feeder_goabs)*float(recloser_proportion)))

            for recloser in selected_goabs:
                network_analyst.model[recloser].is_switch = False
                network_analyst.model[recloser].is_recloser = True
                if network_analyst.model[recloser].wires is not None:
                    for wire in network_analyst.model[recloser].wires:
                        wire.is_switch = False
                        wire.is_recloser = True
                network_analyst.model[recloser].name = network_analyst.model[
                    recloser].name.replace('goab', 'recloser')
                network_analyst.model[recloser].nameclass = 'recloser'
        network_analyst.model.set_names()

        #Compute the metrics if needed
        if compute_metrics:

            #Compute metrics
            network_analyst.compute_all_metrics_per_feeder(
                compute_kva_density_with_transformers=
                compute_kva_density_with_transformers)

            #Export metrics to Excel
            network_analyst.export(excel_output)

            #Export metrics to JSON
            network_analyst.export_json(json_output)

        #Return the model
        return network_analyst.model
コード例 #40
0
ファイル: pipeline.py プロジェクト: srinivasav22/kale
 def _topological_sort(self) -> Iterable[Step]:
     return self._steps_iterable(nx.topological_sort(self))
コード例 #41
0
ファイル: build.py プロジェクト: cshinn363/bioconda-utils
def build_recipes(recipe_folder: str, config_path: str, recipes: List[str],
                  mulled_test: bool = True, testonly: bool = False,
                  force: bool = False,
                  docker_builder: docker_utils.RecipeBuilder = None,
                  label: str = None,
                  anaconda_upload: bool = False,
                  mulled_upload_target=None,
                  check_channels: List[str] = None,
                  do_lint: bool = None,
                  lint_exclude: List[str] = None,
                  n_workers: int = 1,
                  worker_offset: int = 0,
                  keep_old_work: bool = False):
    """
    Build one or many bioconda packages.

    Arguments:
      recipe_folder: Directory containing possibly many, and possibly nested, recipes.
      config_path: Path to config file
      packages: Glob indicating which packages should be considered. Note that packages
        matching the glob will still be filtered out by any blacklists
        specified in the config.
      mulled_test: If true, test the package in a minimal container.
      testonly: If true, only run test.
      force: If true, build the recipe even though it would otherwise be filtered out.
      docker_builder: If specified, use to build all recipes
      label: If specified, use to label uploaded packages on anaconda. Default is "main" label.
      anaconda_upload: If true, upload the package(s) to anaconda.org.
      mulled_upload_target: If specified, upload the mulled docker image to the given target
        on quay.io.
      check_channels: Channels to check to see if packages already exist in them.
        Defaults to every channel in the config file except "defaults".
      do_lint: Whether to run linter
      lint_exclude: List of linting functions to exclude.
      n_workers: The number of parallel instances of bioconda-utils being run. The
        sub-DAGs are then split into groups of n_workers size.
      worker_offset: If n_workers is >1, then every worker_offset within a given group of
        sub-DAGs will be processed.
      keep_old_work: Do not remove anything from environment, even after successful build and test.
    """
    if not recipes:
        logger.info("Nothing to be done.")
        return True

    config = utils.load_config(config_path)
    blacklist = utils.get_blacklist(config, recipe_folder)

    # get channels to check
    if check_channels is None:
        if config['channels']:
            check_channels = [c for c in config['channels'] if c != "defaults"]
        else:
            check_channels = []

    # setup linting
    if do_lint:
        always_exclude = ('build_number_needs_bump',)
        if not lint_exclude:
            lint_exclude = always_exclude
        else:
            lint_exclude = tuple(set(lint_exclude) | set(always_exclude))
        linter = lint.Linter(config, recipe_folder, lint_exclude)
    else:
        linter = None

    failed = []

    dag, name2recipes = graph.build(recipes, config=config_path, blacklist=blacklist)
    if not dag:
        logger.info("Nothing to be done.")
        return True

    skip_dependent = defaultdict(list)
    dag = remove_cycles(dag, name2recipes, failed, skip_dependent)
    subdag = get_subdags(dag, n_workers, worker_offset)
    if not subdag:
        logger.info("Nothing to be done.")
        return True
    logger.info("%i recipes to build and test: \n%s", len(subdag), "\n".join(subdag.nodes()))

    recipe2name = {}
    for name, recipe_list in name2recipes.items():
        for recipe in recipe_list:
            recipe2name[recipe] = name

    recipes = [(recipe, recipe2name[recipe])
               for package in nx.topological_sort(subdag)
               for recipe in name2recipes[package]]


    built_recipes = []
    skipped_recipes = []
    failed_uploads = []

    for recipe, name in recipes:
        if name in skip_dependent:
            logger.info('BUILD SKIP: skipping %s because it depends on %s '
                        'which had a failed build.',
                        recipe, skip_dependent[name])
            skipped_recipes.append(recipe)
            continue

        logger.info('Determining expected packages for %s', recipe)
        try:
            pkg_paths = utils.get_package_paths(recipe, check_channels, force=force)
        except utils.DivergentBuildsError as exc:
            logger.error('BUILD ERROR: packages with divergent build strings in repository '
                         'for recipe %s. A build number bump is likely needed: %s',
                         recipe, exc)
            failed.append(recipe)
            for pkg in nx.algorithms.descendants(subdag, name):
                skip_dependent[pkg].append(recipe)
            continue
        except UnsatisfiableError as exc:
            logger.error('BUILD ERROR: could not determine dependencies for recipe %s: %s',
                         recipe, exc)
            failed.append(recipe)
            for pkg in nx.algorithms.descendants(subdag, name):
                skip_dependent[pkg].append(recipe)
            continue
        if not pkg_paths:
            logger.info("Nothing to be done for recipe %s", recipe)
            continue

        # If a recipe depends on conda, it means it must be installed in
        # the root env, which is not compatible with mulled-build tests. In
        # that case, we temporarily disable the mulled-build tests for the
        # recipe.
        keep_mulled_test = not has_conda_in_build(recipe)
        if mulled_test and not keep_mulled_test:
            logger.info('TEST SKIP: skipping mulled-build test for %s because it '
                        'depends on conda or conda-build', recipe)

        res = build(recipe=recipe,
                    pkg_paths=pkg_paths,
                    testonly=testonly,
                    mulled_test=mulled_test and keep_mulled_test,
                    channels=config['channels'],
                    docker_builder=docker_builder,
                    linter=linter)

        if not res.success:
            failed.append(recipe)
            for pkg in nx.algorithms.descendants(subdag, name):
                skip_dependent[pkg].append(recipe)
        else:
            built_recipes.append(recipe)
            if not testonly:
                if anaconda_upload:
                    for pkg in pkg_paths:
                        if not upload.anaconda_upload(pkg, label=label):
                            failed_uploads.append(pkg)
                if mulled_upload_target and keep_mulled_test:
                    for img in res.mulled_images:
                        upload.mulled_upload(img, mulled_upload_target)

        # remove traces of the build
        if not keep_old_work:
            conda_build_purge()

    if failed or failed_uploads:
        logger.error('BUILD SUMMARY: of %s recipes, '
                     '%s failed and %s were skipped. '
                     'Details of recipes and environments follow.',
                     len(recipes), len(failed), len(skipped_recipes))
        if built_recipes:
            logger.error('BUILD SUMMARY: while the entire build failed, '
                         'the following recipes were built successfully:\n%s',
                         '\n'.join(built_recipes))
        for recipe in failed:
            logger.error('BUILD SUMMARY: FAILED recipe %s', recipe)
        for name, dep in skip_dependent.items():
            logger.error('BUILD SUMMARY: SKIPPED recipe %s '
                         'due to failed dependencies %s', name, dep)
        if failed_uploads:
            logger.error('UPLOAD SUMMARY: the following packages failed to upload:\n%s',
                         '\n'.join(failed_uploads))
        return False

    logger.info("BUILD SUMMARY: successfully built %s of %s recipes",
                len(built_recipes), len(recipes))
    return True
コード例 #42
0
    def make_sorted_analysis_unit_dict_from_function_graph(
            self, analysis_unit_dict):
        ''' BUILDS A TOPO SORTED FUNCTION GRAPH.  THIS ALLOWS THE ANALYSIS TO START ON FUNCTION LEAFS, 
            SO WE CAN HOPEFULLY DISCOVER UNITS ON THE RETURN TYPE AND PROPAGE THEM UP.  THE 
            FUNCTION GRAPH MAY HAVE CYCLES (recursion, for example), THEREFORE WE REMOVE THESE EDGES FROM THE GRAPH
            AND ANALYZE THEM LAST (<-- not sure this is best)
            input:  a dictionary of functions from this dump file
            output: OrderedDict of functions
            postcondition:   returned dict must be the same length as the input dict, and contain all the same elements
            '''
        if self.debug_verbose:
            print inspect.stack()[0][3]
        return_dict = OrderedDict()
        G = self.function_graph
        # TRY FINDING A DAG.  IF NOT, REMOVE EDGES AND TRY AGAIN.
        super_break = 0
        while nx.number_of_nodes(G) > 0 and super_break < 1000:
            super_break += 1
            if not nx.is_directed_acyclic_graph(G):
                try:
                    # SEARCH FOR CYCLE AND REMOVE EDGES
                    edges = nx.find_cycle(G)
                    G.remove_edges_from(edges)
                    if self.debug and self.debug_print_function_graph:
                        print 'Function graph has cycle %s' % edges,
                except:
                    if self.debug and self.debug_print_function_graph:
                        print 'Function graph is not a DAG and does not have a cycle!'
                        # GIVE UP AND RETURN UNSORTED
                    return analysis_unit_dict
            else:
                # WE HAVE A DIGRAPH, CAN PROCEED ( and topo sort )
                break

        if nx.number_of_nodes(G) == 0:
            # RETURN UNCHANGED
            return analysis_unit_dict
        # WE HAVE A DIRECTED GRAPH WITH NODES, CAN SORT AND ADD NODES TO ORDERED LIST
        function_graph_topo_sort = nx.topological_sort(G)
        function_graph_topo_sort_reversed = function_graph_topo_sort[::
                                                                     -1]  # REVERSED

        # OPTIONAL DEBUG PRINT
        if self.debug_print_function_topo_sort:
            print function_graph_topo_sort_reversed

        # CREATE RETURN DICT FROM TOPO SORT
        for node in function_graph_topo_sort_reversed:
            function_id_attr_dict = nx.get_node_attributes(G, 'function_id')
            #print function_id_attr_dict
            if node in function_id_attr_dict:  # FIRST OPTION SHOULD SHORTCUT WHEN ATTR DICT DOES NOT EXITS
                # ADD FUNCTION TO NEW DICTIONARY - THIS IS THE EXPLORE ORDER
                return_dict[function_id_attr_dict[node]] = analysis_unit_dict[
                    function_id_attr_dict[node]]
            else:
                if self.debug and self.debug_print_function_graph:
                    print "Graph node does not have function_dict_key"

        # ADD ANY REMAINING FUNCTIONS NOT IN THE TOPO SORT TO THE ORDERED DICT
        for k in analysis_unit_dict.keys():
            if k not in return_dict:
                return_dict[k] = analysis_unit_dict[k]

        assert (len(return_dict) == len(analysis_unit_dict))

        return return_dict
コード例 #43
0
    def build_graph(graph):
        """
        Instantiate Modules used in graph and fill the ModuleTable.

        Args:
          graph: A instance of class:ModuleGraph.

        Returns:
          ModuleTable: it holds all information of modules(nodes) and relations(edges) between them.
          toposort: Topological sorting of the graph, it's a list-like object

        Raises:
          Stop: Or one of its subclasses if an error occurs while building the graph.
          TypeError: If modules are of an inappropriate type.
        """

        # instantiation
        nodes = graph.nodes
        modules = []
        for node_name in nodes:
            module_name = graph.node[node_name]['attr']['module']
            p = __import__(module_name, globals(), locals(), level=0)
            globals()[module_name] = p.__dict__[module_name]
            modules.append(eval(module_name + '()'))
            # print(eval(module_name).make_module_description().input_names())

        # check type
        def _Check_Module(md):
            """
            Check Modules which will be registered to ModuleTable.

            Args:
                md (Module or [Module]): a Module or a list of Modules

            Returns:
                succeed or not
            """
            if isinstance(md, (list, tuple)):
                for x in md:
                    _Check_Module(x)
                return
            # assert isinstance(md, Module), md
            # assert not isinstance(modules, Modules), \
            #     "Cannot append more Modules after BaseSession was setup!"
            return True

        # Check_Module = _Check_Module
        #
        # for md in modules:
        #     Check_Module(md)

        # register to ModuleTable
        logger.logger.info("Build graph ...")

        ModuleTable = {
            'node_name': {
                'module_name': None,
                'module_instance': None,
                'module_desc': None,
                'output': None
            }
        }
        for index, node_name in enumerate(nodes):
            logger.logger.info('Register ' + node_name + ' to session')
            ModuleTable[node_name] = {
                'module_name': graph.node[node_name]['attr']['module'],
                'module_instance': modules[index],
                'module_desc': modules[index].make_module_description(),
                'output': None
            }

        toposort = list(nx.topological_sort(graph))

        return ModuleTable, toposort
コード例 #44
0
ファイル: cfg_utils.py プロジェクト: AkshaySG14/582proj-angr
    def quasi_topological_sort_nodes(graph, nodes=None):
        """
        Sort a given set of nodes from a graph based on the following rules:

        # - if A -> B and not B -> A, then we have A < B
        # - if A -> B and B -> A, then the ordering is undefined

        Following the above rules gives us a quasi-topological sorting of nodes in the graph. It also works for cyclic
        graphs.

        :param networkx.DiGraph graph: A local transition graph of the function.
        :param iterable nodes: A list of nodes to sort. None if you want to sort all nodes inside the graph.
        :return: A list of ordered nodes.
        :rtype: list
        """

        # fast path for single node graphs
        if graph.number_of_nodes() == 1:
            return graph.nodes()

        # make a copy to the graph since we are gonna modify it
        graph_copy = networkx.DiGraph()

        # find all strongly connected components in the graph
        sccs = [
            scc for scc in networkx.strongly_connected_components(graph)
            if len(scc) > 1
        ]

        # collapse all strongly connected components
        for src, dst in graph.edges():
            scc_index = CFGUtils._components_index_node(sccs, src)
            if scc_index is not None:
                src = SCCPlaceholder(scc_index)
            scc_index = CFGUtils._components_index_node(sccs, dst)
            if scc_index is not None:
                dst = SCCPlaceholder(scc_index)

            if isinstance(src, SCCPlaceholder) and isinstance(
                    dst, SCCPlaceholder) and src == dst:
                continue
            if src == dst:
                continue

            graph_copy.add_edge(src, dst)

        # add loners
        out_degree_zero_nodes = [
            node for (node, degree) in graph.out_degree() if degree == 0
        ]
        for node in out_degree_zero_nodes:
            if graph.in_degree(node) == 0:
                graph_copy.add_node(node)

        # topological sort on acyclic graph `graph_copy`
        tmp_nodes = networkx.topological_sort(graph_copy)

        ordered_nodes = []
        for n in tmp_nodes:
            if isinstance(n, SCCPlaceholder):
                CFGUtils._append_scc(graph, ordered_nodes, sccs[n.scc_id])
            else:
                ordered_nodes.append(n)

        if nodes is None:
            return ordered_nodes

        nodes = set(nodes)
        ordered_nodes = [n for n in ordered_nodes if n in nodes]
        return ordered_nodes
コード例 #45
0
ファイル: install_cmd.py プロジェクト: qihaowei89/wfpm
def install_cmd(ctx, force=False, skip_tests=False, pkg_json=None):
    if not pkg_json:
        project = ctx.obj['PROJECT']
        if not project.root:
            echo("Not in a package project directory.")
            ctx.abort()

        if str(Path(os.getcwd()).parent) != project.root:
            echo("Not in a package directory.")
            ctx.abort()

        if not os.path.isfile('pkg.json'):
            echo(
                "Not in a package directory, 'pkg.json' not found in the current direcotry."
            )
            ctx.abort()

        install_dest = project.root
        pkg_json = os.path.join(os.getcwd(), 'pkg.json')

    else:
        install_dest = os.path.dirname(
            os.path.dirname(pkg_json))  # parent dir of where pkg.json is

    try:
        package = Package(pkg_json=pkg_json)
        dep_graph = nx.DiGraph()
        build_dep_graph(package, DG=dep_graph)
    except Exception as ex:
        echo(f"Unable to build package dependency graph: {ex}")
        ctx.abort()

    # exclude the first one which is the current package itself, it's important to reverse the order
    dep_pkgs = list(reversed(list(nx.topological_sort(dep_graph))[1:]))

    if dep_pkgs:
        echo("Start dependency installation.")
    else:
        echo("No dependency defined, no installation needed.")

    failed_pkgs = []
    installed_pkgs = []
    for dep_pkg_uri in dep_pkgs:
        package = Package(pkg_uri=dep_pkg_uri)
        installed = False

        try:
            path = package.install(install_dest, force=force)
            installed = True
            installed_pkgs.append(dep_pkg_uri)
            echo(
                f"Package installed in: {path.replace(os.path.join(os.getcwd(), ''), '')}"
            )

        except Exception as ex:
            echo(f"{ex}")
            failed_pkgs.append(dep_pkg_uri)

        if not skip_tests and installed:
            test_package(path)

    return installed_pkgs, failed_pkgs
コード例 #46
0
    if scene[1] == 'None':  #parallel of previous == None - get first scene
        G.add_node(scene[0])
        labels.append(scene[0])
        target = scene[0]
    else:
        pass


def chain_scenes(scenes, target):
    for scene in scenes:
        if scene[1] == target:
            G.add_edge(target, scene[0])
            labels.append(scene[0])
            target = scene[0]
            chain_scenes(scenes, target)
        else:
            pass


chain_scenes(scenes, target)

srt = nx.topological_sort(G)
print(srt)

pos = nx.spectral_layout(G)

nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_edges(G, pos)
plt.show()
コード例 #47
0
ファイル: cmp2.py プロジェクト: MindFigment/task-scheduling
 def _get_topological_order(self):
     return list(nx.topological_sort(self.graph))
コード例 #48
0
 def topological_sort(self):
     """Return a list of nodes in this graph in topological sort order."""
     return nx.topological_sort(self)
コード例 #49
0
ファイル: new.py プロジェクト: shannonsh/NPWizardry
def solve(num_wizards, num_constraints, wizards, constraints):
    """
    Write your algorithm here.
    Input:
        num_wizards: Number of wizards
        num_constraints: Number of constraints
        wizards: An array of wizard names, in no particular order
        constraints: A 2D-array of constraints, 
                     where constraints[0] may take the form ['A', 'B', 'C']i

    Output:
        An array of wizard names in the ordering your algorithm returns
    """
    global wiz_const
    wiz_const = mapConstraints(wizards, constraints)
    partial_soltns = []

    # counter for priority queue since it doesn't allow
    # identical priorities
    k = 0

    # list of wizards sorted by lowest to highest degree
    sorted_wiz = sortWizByConsts(wiz_const)
    wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}

    const_set = set(map(tuple, constraints))
    for i in range(4):
        heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))
        k += 1

    print("setup done, commencing solving")

    while len(partial_soltns):

        # for partial_soltn, const_set in partial_soltns :
        #             partial_soltns.remove(partial_soltn)
        num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)
        const = findNextConst(partial_soltn, const_set, wiz_rankings)
        print("seen " + str(len(partial_soltn)) +
              "\t num partial_solutions\t" + str(len(partial_soltns)))
        try:
            const_set.remove(const)
        except KeyError:
            print("BAD SHIT")
            pass
        possible_arrangements = [(const[0], const[1], const[2]),
                                 (const[2], const[0], const[1]),
                                 (const[2], const[1], const[0]),
                                 (const[1], const[0], const[2])]
        for arr in possible_arrangements:
            soltn = partial_soltn.copy()
            a, b, c = arr
            if not (soltn.has_node(a) and soltn.has_node(b)
                    and nx.has_path(soltn, a, b)):
                soltn.add_edge(a, b)
            if not (soltn.has_node(b) and soltn.has_node(c)
                    and nx.has_path(soltn, b, c)):
                soltn.add_edge(b, c)
            # see if we violated any other constraints (seen or not seen)
            is_valid, num_wiz = validNumWiz(soltn, const_set)

            if is_valid and len(list(nx.simple_cycles(soltn))) == 0:
                heapq.heappush(partial_soltns,
                               (-len(soltn), k, soltn, const_set.copy()))
                k += 1
                # are we done?
                if num_wiz == num_wizards:
                    print(
                        "FINAL SOLUTION (found without processing all constraints but validating against them)"
                    )
                    ordering = list(nx.topological_sort(soltn))
                    finishEverything(ordering, constraints)
                    return ordering
    if foundCompleteOrdering(heapq.heappop(partial_soltns)):
        print("FINAL SOLUTION")
        ordering = list(nx.topological_sort(soltn))
        finishEverything(ordering, constraints)
        return ordering
    print("NO SOLUTION FOUND")
    return ""
コード例 #50
0
def _write_detailed_dot(graph, dotfilename):
    """Create a dot file with connection info

    digraph structs {
    node [shape=record];
    struct1 [label="<f0> left|<f1> mid\ dle|<f2> right"];
    struct2 [label="<f0> one|<f1> two"];
    struct3 [label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"];
    struct1:f1 -> struct2:f0;
    struct1:f0 -> struct2:f1;
    struct1:f2 -> struct3:here;
    }
    """
    text = ['digraph structs {', 'node [shape=record];']
    # write nodes
    edges = []
    replacefunk = lambda x: x.replace('_', '').replace('.', ''). \
        replace('@', '').replace('-', '')
    for n in nx.topological_sort(graph):
        nodename = str(n)
        inports = []
        for u, v, d in graph.in_edges_iter(nbunch=n, data=True):
            for cd in d['connect']:
                if isinstance(cd[0], string_types):
                    outport = cd[0]
                else:
                    outport = cd[0][0]
                inport = cd[1]
                ipstrip = 'in' + replacefunk(inport)
                opstrip = 'out' + replacefunk(outport)
                edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''),
                                                      opstrip,
                                                      str(v).replace('.', ''),
                                                      ipstrip))
                if inport not in inports:
                    inports.append(inport)
        inputstr = '{IN'
        for ip in sorted(inports):
            inputstr += '|<in%s> %s' % (replacefunk(ip), ip)
        inputstr += '}'
        outports = []
        for u, v, d in graph.out_edges_iter(nbunch=n, data=True):
            for cd in d['connect']:
                if isinstance(cd[0], string_types):
                    outport = cd[0]
                else:
                    outport = cd[0][0]
                if outport not in outports:
                    outports.append(outport)
        outputstr = '{OUT'
        for op in sorted(outports):
            outputstr += '|<out%s> %s' % (replacefunk(op), op)
        outputstr += '}'
        srcpackage = ''
        if hasattr(n, '_interface'):
            pkglist = n._interface.__class__.__module__.split('.')
            if len(pkglist) > 2:
                srcpackage = pkglist[2]
        srchierarchy = '.'.join(nodename.split('.')[1:-1])
        nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1],
                                            srcpackage,
                                            srchierarchy)
        text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''),
                                             inputstr,
                                             nodenamestr,
                                             outputstr)]
    # write edges
    for edge in sorted(edges):
        text.append(edge)
    text.append('}')
    filep = open(dotfilename, 'wt')
    filep.write('\n'.join(text))
    filep.close()
    return text
コード例 #51
0
 def traverse(self):
     order = list(nx.topological_sort(self.graph))
     for o in order:
         n = self.graph.nodes[o]
         self.data = n['data'].execute(self.data)
コード例 #52
0
ファイル: workflows.py プロジェクト: Raniac/NEURO-LEARN
    def _get_dot(self,
                 prefix=None,
                 hierarchy=None,
                 colored=False,
                 simple_form=True,
                 level=0):
        """Create a dot file with connection info
        """
        import networkx as nx
        if prefix is None:
            prefix = '  '
        if hierarchy is None:
            hierarchy = []
        colorset = [
            '#FFFFC8',  # Y
            '#0000FF',
            '#B4B4FF',
            '#E6E6FF',  # B
            '#FF0000',
            '#FFB4B4',
            '#FFE6E6',  # R
            '#00A300',
            '#B4FFB4',
            '#E6FFE6',  # G
            '#0000FF',
            '#B4B4FF'
        ]  # loop B
        if level > len(colorset) - 2:
            level = 3  # Loop back to blue

        dotlist = ['%slabel="%s";' % (prefix, self.name)]
        for node in nx.topological_sort(self._graph):
            fullname = '.'.join(hierarchy + [node.fullname])
            nodename = fullname.replace('.', '_')
            if not isinstance(node, Workflow):
                node_class_name = get_print_name(node, simple_form=simple_form)
                if not simple_form:
                    node_class_name = '.'.join(node_class_name.split('.')[1:])
                if hasattr(node, 'iterables') and node.iterables:
                    dotlist.append(('%s[label="%s", shape=box3d,'
                                    'style=filled, color=black, colorscheme'
                                    '=greys7 fillcolor=2];') %
                                   (nodename, node_class_name))
                else:
                    if colored:
                        dotlist.append(
                            ('%s[label="%s", style=filled,'
                             ' fillcolor="%s"];') %
                            (nodename, node_class_name, colorset[level]))
                    else:
                        dotlist.append(
                            ('%s[label="%s"];') % (nodename, node_class_name))

        for node in nx.topological_sort(self._graph):
            if isinstance(node, Workflow):
                fullname = '.'.join(hierarchy + [node.fullname])
                nodename = fullname.replace('.', '_')
                dotlist.append('subgraph cluster_%s {' % nodename)
                if colored:
                    dotlist.append(prefix + prefix + 'edge [color="%s"];' %
                                   (colorset[level + 1]))
                    dotlist.append(prefix + prefix + 'style=filled;')
                    dotlist.append(prefix + prefix + 'fillcolor="%s";' %
                                   (colorset[level + 2]))
                dotlist.append(
                    node._get_dot(prefix=prefix + prefix,
                                  hierarchy=hierarchy + [self.name],
                                  colored=colored,
                                  simple_form=simple_form,
                                  level=level + 3))
                dotlist.append('}')
            else:
                for subnode in self._graph.successors(node):
                    if node._hierarchy != subnode._hierarchy:
                        continue
                    if not isinstance(subnode, Workflow):
                        nodefullname = '.'.join(hierarchy + [node.fullname])
                        subnodefullname = '.'.join(hierarchy +
                                                   [subnode.fullname])
                        nodename = nodefullname.replace('.', '_')
                        subnodename = subnodefullname.replace('.', '_')
                        for _ in self._graph.get_edge_data(node,
                                                           subnode)['connect']:
                            dotlist.append('%s -> %s;' %
                                           (nodename, subnodename))
                        logger.debug('connection: %s', dotlist[-1])
        # add between workflow connections
        for u, v, d in self._graph.edges(data=True):
            uname = '.'.join(hierarchy + [u.fullname])
            vname = '.'.join(hierarchy + [v.fullname])
            for src, dest in d['connect']:
                uname1 = uname
                vname1 = vname
                if isinstance(src, tuple):
                    srcname = src[0]
                else:
                    srcname = src
                if '.' in srcname:
                    uname1 += '.' + '.'.join(srcname.split('.')[:-1])
                if '.' in dest and '@' not in dest:
                    if not isinstance(v, Workflow):
                        if 'datasink' not in \
                           str(v._interface.__class__).lower():
                            vname1 += '.' + '.'.join(dest.split('.')[:-1])
                    else:
                        vname1 += '.' + '.'.join(dest.split('.')[:-1])
                if uname1.split('.')[:-1] != vname1.split('.')[:-1]:
                    dotlist.append(
                        '%s -> %s;' %
                        (uname1.replace('.', '_'), vname1.replace('.', '_')))
                    logger.debug('cross connection: %s', dotlist[-1])
        return ('\n' + prefix).join(dotlist)
コード例 #53
0
    def compute(self):
        def getName(pref, id):
            while '%s%d' % (pref, id) in self.depTree:
                id += 1
            return '%s%d' % (pref, id)

        self.depTree = self.inGraph.copy()
        self.transitveReduction(self.depTree)
        tsort = list(reversed(list(NX.topological_sort(self.depTree))))
        nextId = 0
        for node in tsort:
            #strip out cycles within parents
            parents = set([i[0] for i in self.depTree.in_edges(node)])
            if len(parents) > 1:
                for parent in parents:
                    for pedge in self.depTree.out_edges(parent):
                        if pedge[1] in parents:
                            self.depTree.remove_edge(parent, node)
                            break

            parents = [i[0] for i in self.depTree.in_edges(node)]
            if len(parents) > 1:
                # insert new "virtual" node as parent to all parents
                vpNode = getName("Virtual", nextId)
                self.depTree.add_node(vpNode, virtual='1')
                # insert new "virtual follow-on" node as follow up to above node
                vfoNode = getName("VirtualF", nextId)
                self.depTree.add_node(vfoNode, virtual='1')
                self.depTree.add_edge(vpNode, vfoNode, followOn='1')

                nextId += 1
                for parent in parents:
                    # insert vpNode above all the parents
                    for inEdge in self.depTree.in_edges(parent):
                        if not self.isFollowOn(inEdge[0], inEdge[1]):
                            self.depTree.add_edge(inEdge[0], vpNode)
                            self.depTree.remove_edge(inEdge[0], inEdge[1])
                    # add parent as child of the followOn
                    self.depTree.add_edge(vfoNode, parent)
                    # remove node from parent
                    self.depTree.remove_edge(parent, node)
                    # add parent's childrent to vpNode
                    for child in list(self.depTree.successors(parent)):
                        if not self.isFollowOn(parent, child):
                            self.depTree.remove_edge(parent, child)
                            self.depTree.add_edge(vpNode, child)
                            if self.depTree.has_edge(child, vpNode):
                                self.depTree.remove_edge(child, vpNode)
                #add node to vpNode
                self.depTree.add_edge(vpNode, node)
                #lazy clean-up of inserted transitive edges
                #will need to not do this brute-force for bigger trees
                self.transitveReduction(self.depTree)
                assert (len(self.depTree.in_edges(node)) == 1)
                # process virtual node next
                tsort.insert(tsort.index(node) + 1, vpNode)

            assert len(self.depTree.in_edges(node)) < 2

        for node in tsort:
            assert len(self.depTree.in_edges(node)) < 2
        assert NX.is_directed_acyclic_graph(self.depTree)
        self.enforceMaxParallel()
コード例 #54
0
import networkx as nx
import pygraphviz as pgv
from nxpd import draw, nxpdParams
nxpdParams['show'] = 'ipynb'

G = nx.DiGraph()
G.add_edges_from([('a', 'b'), ('b', 'c'), ('b', 'd'), ('d', 'c'), ('a', 'd')])
draw(G, layout='circo')

# if the directed graph is acyclic then it has a topological ordering
if nx.is_directed_acyclic_graph(G):
    print("Topological ordering of the nodes:", nx.topological_sort(G))
else:
    print("G contains a cycle, hence it cannot be topologically sorted.")
コード例 #55
0
ファイル: graph.py プロジェクト: zhangshoug/czipline
 def ordered(self):
     return iter(topological_sort(self.graph))
コード例 #56
0
ファイル: build.py プロジェクト: yanghui2048/bioconda-utils
def build_recipes(
    recipe_folder,
    config,
    packages="*",
    mulled_test=True,
    testonly=False,
    force=False,
    docker_builder=None,
    label=None,
    anaconda_upload=False,
    mulled_upload_target=None,
    check_channels=None,
    lint_args=None,
):
    """
    Build one or many bioconda packages.

    Parameters
    ----------

    recipe_folder : str
        Directory containing possibly many, and possibly nested, recipes.

    config : str or dict
        If string, path to config file; if dict then assume it's an
        already-parsed config file.

    packages : str
        Glob indicating which packages should be considered. Note that packages
        matching the glob will still be filtered out by any blacklists
        specified in the config.

    mulled_test : bool
        If True, then test the package in a minimal container.

    testonly : bool
        If True, only run test.

    force : bool
        If True, build the recipe even though it would otherwise be filtered
        out.

    docker_builder : docker_utils.RecipeBuilder instance
        If not None, then use this RecipeBuilder to build all recipes.

    label : str
        Optional label to use when uploading packages. Useful for testing and
        debugging. Default is to use the "main" label.

    anaconda_upload :  bool
        If True, upload the package to anaconda.org.

    mulled_upload_target : None
        If not None, upload the mulled docker image to the given target on quay.io.

    check_channels : list
        Channels to check to see if packages already exist in them. If None,
        then defaults to every channel in the config file except "defaults".

    lint_args : linting.LintArgs | None
        If not None, then apply linting just before building.
    """
    orig_config = config
    config = utils.load_config(config)
    blacklist = utils.get_blacklist(config['blacklists'], recipe_folder)

    if check_channels is None:
        if config['channels']:
            check_channels = [c for c in config['channels'] if c != "defaults"]
        else:
            check_channels = []

    logger.info('blacklist: %s', ', '.join(sorted(blacklist)))

    if packages == "*":
        packages = ["*"]
    recipes = []
    for package in packages:
        for recipe in utils.get_recipes(recipe_folder, package):
            if os.path.relpath(recipe, recipe_folder) in blacklist:
                logger.debug('blacklisted: %s', recipe)
                continue
            recipes.append(recipe)
            logger.debug(recipe)
    if not recipes:
        logger.info("Nothing to be done.")
        return True

    logger.debug('recipes: %s', recipes)

    if lint_args is not None:
        lint_exclude = (lint_args.exclude or ())
        if 'already_in_bioconda' not in lint_exclude:
            lint_exclude = tuple(lint_exclude) + ('already_in_bioconda', )
        lint_args = linting.LintArgs(lint_exclude, lint_args.registry)

    dag, name2recipes = graph.build(recipes,
                                    config=orig_config,
                                    blacklist=blacklist)
    recipe2name = {}
    for k, v in name2recipes.items():
        for i in v:
            recipe2name[i] = k

    if not dag:
        logger.info("Nothing to be done.")
        return True
    else:
        logger.info("Building and testing %s recipes in total", len(dag))
        logger.info("Recipes to build: \n%s", "\n".join(dag.nodes()))

    subdags_n = int(os.environ.get("SUBDAGS", 1))
    subdag_i = int(os.environ.get("SUBDAG", 0))

    if subdag_i >= subdags_n:
        raise ValueError("SUBDAG=%s (zero-based) but only SUBDAGS=%s "
                         "subdags are available")

    failed = []
    skip_dependent = defaultdict(list)

    # Get connected subdags and sort by nodes
    if testonly:
        # use each node as a subdag (they are grouped into equal sizes below)
        subdags = sorted([[n] for n in nx.nodes(dag)])
    else:
        # take connected components as subdags, remove cycles
        subdags = []
        for cc_nodes in nx.connected_components(dag.to_undirected()):
            cc = dag.subgraph(sorted(cc_nodes))
            nodes_in_cycles = set()
            for cycle in list(nx.simple_cycles(cc)):
                logger.error(
                    'BUILD ERROR: '
                    'dependency cycle found: %s',
                    cycle,
                )
                nodes_in_cycles.update(cycle)
            for name in sorted(nodes_in_cycles):
                cycle_fail_recipes = sorted(name2recipes[name])
                logger.error(
                    'BUILD ERROR: '
                    'cannot build recipes for %s since it cyclically depends '
                    'on other packages in the current build job. Failed '
                    'recipes: %s',
                    name,
                    cycle_fail_recipes,
                )
                failed.extend(cycle_fail_recipes)
                for n in nx.algorithms.descendants(cc, name):
                    if n in nodes_in_cycles:
                        continue  # don't count packages twice (failed/skipped)
                    skip_dependent[n].extend(cycle_fail_recipes)
            cc_without_cycles = dag.subgraph(name for name in cc
                                             if name not in nodes_in_cycles)
            # ensure that packages which need a build are built in the right order
            subdags.append(nx.topological_sort(cc_without_cycles))
    # chunk subdags such that we have at most subdags_n many
    if subdags_n < len(subdags):
        chunks = [[n for subdag in subdags[i::subdags_n] for n in subdag]
                  for i in range(subdags_n)]
    else:
        chunks = subdags
    if subdag_i >= len(chunks):
        logger.info("Nothing to be done.")
        return True
    # merge subdags of the selected chunk
    subdag = dag.subgraph(chunks[subdag_i])

    recipes = [
        recipe for package in subdag for recipe in name2recipes[package]
    ]

    logger.info("Building and testing subdag %s of %s (%s recipes)",
                subdag_i + 1, subdags_n, len(recipes))

    built_recipes = []
    skipped_recipes = []
    all_success = True
    failed_uploads = []

    for recipe in recipes:
        recipe_success = True
        name = recipe2name[recipe]

        if name in skip_dependent:
            logger.info(
                'BUILD SKIP: '
                'skipping %s because it depends on %s '
                'which had a failed build.', recipe, skip_dependent[name])
            skipped_recipes.append(recipe)
            continue

        logger.info('Determining expected packages')
        try:
            pkg_paths = utils.get_package_paths(recipe,
                                                check_channels,
                                                force=force)
        except utils.DivergentBuildsError as e:
            logger.error(
                'BUILD ERROR: '
                'packages with divergent build strings in repository '
                'for recipe %s. A build number bump is likely needed: %s',
                recipe, e)
            failed.append(recipe)
            for n in nx.algorithms.descendants(subdag, name):
                skip_dependent[n].append(recipe)
            continue
        except UnsatisfiableError as e:
            logger.error(
                'BUILD ERROR: '
                'could not determine dependencies for recipe %s: %s', recipe,
                e)
            failed.append(recipe)
            for n in nx.algorithms.descendants(subdag, name):
                skip_dependent[n].append(recipe)
            continue
        if not pkg_paths:
            logger.info("Nothing to be done for recipe %s", recipe)
            continue

        # If a recipe depends on conda, it means it must be installed in
        # the root env, which is not compatible with mulled-build tests. In
        # that case, we temporarily disable the mulled-build tests for the
        # recipe.
        deps = []
        deps += utils.get_deps(recipe, orig_config, build=True)
        deps += utils.get_deps(recipe, orig_config, build=False)
        keep_mulled_test = True
        if 'conda' in deps or 'conda-build' in deps:
            keep_mulled_test = False
            if mulled_test:
                logger.info(
                    'TEST SKIP: '
                    'skipping mulled-build test for %s because it '
                    'depends on conda or conda-build', recipe)

        res = build(
            recipe=recipe,
            recipe_folder=recipe_folder,
            pkg_paths=pkg_paths,
            testonly=testonly,
            mulled_test=mulled_test and keep_mulled_test,
            force=force,
            channels=config['channels'],
            docker_builder=docker_builder,
            lint_args=lint_args,
        )

        all_success &= res.success
        recipe_success &= res.success

        if not res.success:
            failed.append(recipe)
            for n in nx.algorithms.descendants(subdag, name):
                skip_dependent[n].append(recipe)
        elif not testonly:
            for pkg in pkg_paths:
                # upload build
                if anaconda_upload:
                    if not upload.anaconda_upload(pkg, label=label):
                        failed_uploads.append(pkg)
            if mulled_upload_target and keep_mulled_test:
                for img in res.mulled_images:
                    upload.mulled_upload(img, mulled_upload_target)

        # remove traces of the build
        purge()

        if recipe_success:
            built_recipes.append(recipe)

    if failed or failed_uploads:
        logger.error(
            'BUILD SUMMARY: of %s recipes, '
            '%s failed and %s were skipped. '
            'Details of recipes and environments follow.', len(recipes),
            len(failed), len(skipped_recipes))

        if len(built_recipes) > 0:
            logger.error(
                'BUILD SUMMARY: while the entire build failed, '
                'the following recipes were built successfully:\n%s',
                '\n'.join(built_recipes))

        for recipe in failed:
            logger.error('BUILD SUMMARY: FAILED recipe %s', recipe)

        for name, dep in skip_dependent.items():
            logger.error(
                'BUILD SUMMARY: SKIPPED recipe %s '
                'due to failed dependencies %s', name, dep)

        if failed_uploads:
            logger.error(
                'UPLOAD SUMMARY: the following packages failed to upload:\n%s',
                '\n'.join(failed_uploads))

        return False

    logger.info("BUILD SUMMARY: successfully built %s of %s recipes",
                len(built_recipes), len(recipes))

    return all_success
コード例 #57
0
    def initialize(self):
        """
        Initialize Graph class instance.

        Initialization includes: create NetworkX DiGraph,
        populate it with input and step nodes, and directed edges.

        Args:
            None.

        Returns:
            On failure: Raises WorkflowDAGException.

        """
        for context in self._exec_contexts | self._data_contexts:
            # set default empty values for context options
            if context not in self._context_options:
                self._context_options[context] = {}

        # references to step classes for each context
        try:
            self._load_context_classes()
        except WorkflowDAGException as err:
            msg = 'cannot load context-specific step classes'
            Log.an().error(msg)
            raise WorkflowDAGException(str(err)+'|'+msg) from err

        # flatten parameters
        self._parameters = {
            param_name: param['value']
            for param_name, param in self._workflow['parameters'].items()
        }

        # init DAG object with structure and empty nodes
        self._graph = nx.DiGraph()

        try:
            self._init_graph_structure()
        except WorkflowDAGException as err:
            msg = 'cannot initialize graph structure'
            Log.an().error(msg)
            raise WorkflowDAGException(str(err)+'|'+msg) from err

        # validate that graph is a DAG
        if not nx.is_directed_acyclic_graph(self._graph):
            msg = 'graph contains cycles, check step dependencies'
            Log.an().error(msg)
            raise WorkflowDAGException(msg)

        # topological sort of graph nodes
        self._topo_sort = list(nx.topological_sort(self._graph))

        # create URIs for each input and step for all contexts
        try:
            self._init_context_uris()
        except WorkflowDAGException as err:
            msg = 'cannot initialize context uris'
            Log.an().error(msg)
            raise WorkflowDAGException(str(err)+'|'+msg) from err

        # initalize input nodes
        try:
            self._init_inputs()
        except WorkflowDAGException as err:
            msg = 'cannot initialize workflow inputs'
            Log.an().error(msg)
            raise WorkflowDAGException(str(err)+'|'+msg) from err

        # initialize step nodes
        try:
            self._init_steps()
        except WorkflowDAGException as err:
            msg = 'cannot initialize workflow steps'
            Log.an().error(msg)
            raise WorkflowDAGException(str(err)+'|'+msg) from err
コード例 #58
0
ファイル: workflows.py プロジェクト: Raniac/NEURO-LEARN
    def export(self,
               filename=None,
               prefix="output",
               format="python",
               include_config=False):
        """Export object into a different format

        Parameters
        ----------
        filename: string
           file to save the code to; overrides prefix
        prefix: string
           prefix to use for output file
        format: string
           one of "python"
        include_config: boolean
           whether to include node and workflow config values

        """
        import networkx as nx
        formats = ["python"]
        if format not in formats:
            raise ValueError('format must be one of: %s' % '|'.join(formats))
        flatgraph = self._create_flat_graph()
        nodes = nx.topological_sort(flatgraph)

        all_lines = None
        lines = ['# Workflow']
        importlines = [
            'from nipype.pipeline.engine import Workflow, '
            'Node, MapNode'
        ]
        functions = {}
        if format == "python":
            connect_template = '%s.connect(%%s, %%s, %%s, "%%s")' % self.name
            connect_template2 = '%s.connect(%%s, "%%s", %%s, "%%s")' \
                                % self.name
            wfdef = '%s = Workflow("%s")' % (self.name, self.name)
            lines.append(wfdef)
            if include_config:
                lines.append('%s.config = %s' % (self.name, self.config))
            for idx, node in enumerate(nodes):
                nodename = node.fullname.replace('.', '_')
                # write nodes
                nodelines = format_node(node,
                                        format='python',
                                        include_config=include_config)
                for line in nodelines:
                    if line.startswith('from'):
                        if line not in importlines:
                            importlines.append(line)
                    else:
                        lines.append(line)
                # write connections
                for u, _, d in flatgraph.in_edges(nbunch=node, data=True):
                    for cd in d['connect']:
                        if isinstance(cd[0], tuple):
                            args = list(cd[0])
                            if args[1] in functions:
                                funcname = functions[args[1]]
                            else:
                                func = create_function_from_source(args[1])
                                funcname = [
                                    name for name in func.__globals__
                                    if name != '__builtins__'
                                ][0]
                                functions[args[1]] = funcname
                            args[1] = funcname
                            args = tuple([arg for arg in args if arg])
                            line_args = (u.fullname.replace('.', '_'), args,
                                         nodename, cd[1])
                            line = connect_template % line_args
                            line = line.replace("'%s'" % funcname, funcname)
                            lines.append(line)
                        else:
                            line_args = (u.fullname.replace('.', '_'), cd[0],
                                         nodename, cd[1])
                            lines.append(connect_template2 % line_args)
            functionlines = ['# Functions']
            for function in functions:
                functionlines.append(pickle.loads(function).rstrip())
            all_lines = importlines + functionlines + lines

            if not filename:
                filename = '%s%s.py' % (prefix, self.name)
            with open(filename, 'wt') as fp:
                fp.writelines('\n'.join(all_lines))
        return all_lines
コード例 #59
0
def hierarchy_pos(G,
                  root=None,
                  width=1.,
                  vert_gap=0.2,
                  vert_loc=0,
                  xcenter=0.5):
    '''
    From Joel's answer at https://stackoverflow.com/a/29597209/2966723 

    If the graph is a tree this will return the positions to plot this in a 
    hierarchical layout.

    G: the graph (must be a tree)

    root: the root node of current branch 
    - if the tree is directed and this is not given, the root will be found and used
    - if the tree is directed and this is given, then the positions will be just for the descendants of this node.
    - if the tree is undirected and not given, then a random choice will be used.

    width: horizontal space allocated for this branch - avoids overlap with other branches

    vert_gap: gap between levels of hierarchy

    vert_loc: vertical location of root

    xcenter: horizontal location of root
    '''
    if not nx.is_tree(G):
        raise TypeError(
            'cannot use hierarchy_pos on a graph that is not a tree')

    if root is None:
        if isinstance(G, nx.DiGraph):
            root = next(iter(nx.topological_sort(
                G)))  #allows back compatibility with nx version 1.11
        else:
            root = random.choice(list(G.nodes))

    def _hierarchy_pos(G,
                       root,
                       width=1.,
                       vert_gap=0.2,
                       vert_loc=0,
                       xcenter=0.5,
                       pos=None,
                       parent=None):
        '''
        see hierarchy_pos docstring for most arguments

        pos: a dict saying where all nodes go if they have been assigned
        parent: parent of this branch. - only affects it if non-directed

        '''

        if pos is None:
            pos = {root: (xcenter, vert_loc)}
        else:
            pos[root] = (xcenter, vert_loc)
        children = list(G.neighbors(root))
        if not isinstance(G, nx.DiGraph) and parent is not None:
            children.remove(parent)
        if len(children) != 0:
            dx = width / len(children)
            nextx = xcenter - width / 2 - dx / 2
            for child in children:
                nextx += dx
                pos = _hierarchy_pos(G,
                                     child,
                                     width=dx,
                                     vert_gap=vert_gap,
                                     vert_loc=vert_loc - vert_gap,
                                     xcenter=nextx,
                                     pos=pos,
                                     parent=root)
        return

    return _hierarchy_pos(G, root, width, vert_gap, vert_loc, xcenter)
コード例 #60
0
def build_recipes(recipe_list,
                  check_channels,
                  force=False,
                  debug=False,
                  n_workers=1,
                  worker_offset=0):
    """
    build_recipes
    =============
    Controller method used to perpare, check, build, and process each recipe in the recipe list. It will 
     build a DAG with Nodes as recipes and dependencies, and edges connecting recipe to dependencies. It
     removes any cyclic nodes that depend on each other. Identify new or updated recipes that need to 
     be built and build them. 

    Parameters:
    -----------
    1) recipe_list: (str) A list of recieps to check (The directory path of the recipe)
    2) check_channels: (list) A list of channels to check against 
    3) force: (bool) Whether or not to force the recipe to be built even if the same verion and build exits in a channel being checked against (Default = False)
    4) debug: (bool) Whether or not to run 'conda build' in the debug phase. (Default = False)
    5) n_workers: (int) The number of works to use to create subdags. (Default = 1) 
    6) worker_offset: (int) The number to use to offset the n_workers used for subdag creation. (Default = 0)

    Return:
    +++++++
    1) True if all recipes are checked and there are no errors. False otherwise 
    """

    if not recipe_list:
        print(":ggd:build recipes: Nothing to be done")
        return True

    ## create a dag
    dag, name2recipe, recipe2name = build_recipe_dag(recipe_list)

    if not dag:
        print(":ggd:build recipes: Nothing to be done")
        return True

    ## Remove cyclic dependencies in the build job
    ### If current build jobs depend on each other, can't build them
    skip_dependent = defaultdict(list)
    dag = remove_dag_cycles(dag, name2recipe, skip_dependent)

    ## Create subdag workers
    subdag = get_subdags(dag, n_workers, worker_offset)

    if not subdag:
        print(":ggd:build recipes: Nothing to be done")
        return True

    print(":ggd:build recipes: {} recipes to build and test: \n{}".format(
        len(subdag), "\n".join(subdag.nodes())))

    ## Filter recipes
    filtered_recipes = [(recipe, recipe2name[recipe])
                        for package in nx.topological_sort(subdag)
                        for recipe in name2recipe[package]]

    ## Get the Repodata for each channel
    repodata_by_channel, actualname_to_idname = get_repodata(check_channels)

    ## Remove defaults channel for now
    if "defaults" in check_channels:
        check_channels.remove("defaults")

    ## Check each recipe
    built_recipes = []
    skipped_recipes = []
    failed_recipes = []

    for recipe, name in filtered_recipes:

        ## Check if the recipe is being skipped
        if name in skip_dependent:
            print((
                ":ggd:build recipes: SKIPPING BUILD: skipping {} because it depends on {} "
                " which failed build").format(recipe, skip_dependent[name]))
            skipped_recipes.append(recipe)
            continue

        print(
            ":ggd:build recipes: Determining expected packages for {}".format(
                recipe))

        ## Check a recipe to see if it is any other channel repodata and if it is if it's version/build is greater then what is in the repo data
        predicted_path = check_recipe_for_build(
            recipe,
            check_channels,
            repodata_by_channel,
            actualname_to_idname,
            force=force,
        )

        ## if no predicted path, skip building this recipe
        if not predicted_path:
            print(":ggd:build recipes: Nothing to be done for recipe '{}'".
                  format(recipe))
            continue

        ## Build the recipe
        success = conda_build_recipe(recipe, check_channels, predicted_path,
                                     debug)

        ## Check for a successful recipe build
        if success:
            built_recipes.append(recipe)
            print(":ggd:build recipes: Package recipe located at {}".format(
                ",".join(predicted_path)))

        else:
            failed_recipes.append(recipe)
            for pkg in nx.algorithms.descendants(subdag, name):
                skip_dependent[pkg].append(recipe)

    ## Check for failed recipes
    if failed_recipes:
        print(
            (":ggd:build recipes: BUILD SUMMARY: of {} recipes, "
             "{} failed and {} were skipped. ").format(len(filtered_recipes),
                                                       len(failed_recipes),
                                                       len(skipped_recipes)))

        if built_recipes:
            print((":ggd:build recipes: BUILD SUMMARY: Although "
                   "the build process failed, there were {} recipes "
                   "built successfully.").format(len(built_recipes)))

        for frecipe in failed_recipes:
            print(":ggd:build recipes: BUILD SUMMARY: FAILED recipe {}".format(
                frecipe))

        ## Purge the builds
        sp.check_call(["conda", "build", "purge"],
                      stderr=sys.stderr,
                      stdout=sys.stdout)

        return False

    ## IF not failed recipes, prompt for a successful build
    print(
        ":ggd:build recipes: BUILD SUMMARY: SUCCESSFULLY BUILT {} of {} recipes"
        .format(len(built_recipes), len(filtered_recipes)))

    return True