def test_topological_sort_on_very_deep_graph(self):
     gr = pygraph.classes.graph.graph()
     gr.add_nodes(list(range(0,20001)))
     for i in range(0,20000):
         gr.add_edge((i,i+1))
     recursionlimit = getrecursionlimit()
     topological_sorting(gr)
     assert getrecursionlimit() == recursionlimit
Exemple #2
0
def sort_nodes_topologically(graph, nodeLs):
    """
    Get a topological sort of a subset of the nodes of a graph
    
    @type  graph: graph_wrapper.GraphWrapper
    @param graph: a graph in which the nodes reside

    @type  nodeLs: list [node]
    @param nodeLs: a list of nodes from which to generate sorting. nodes must not be mutually accessive!
    
    @rtype:  list [node]
    @return: topological sort of the nodes
    """
    # uid_dic = dict([(node.uid,node) for node in nodeLs])
    # helperNodes = uid_dic.keys()
    helperGraph = graph.__class__(originalSentence="")  # TODO: efficiency - this is done this way to avoid circular dependency
    helperGraph.add_nodes(nodeLs)
    acc = accessibility(graph)
    
    for node1 in nodeLs:
            for node2 in acc[node1]:
                if node2 in nodeLs:
                    if node1.uid != node2.uid:  # TODO: efficiency 
                        helperGraph.add_edge((node1, node2))
    
    sorted_nodes = topological_sorting(helperGraph)
    return sorted_nodes
 def testDigraph(self):
     
     def has_parent(node, list):
         for each in list:
             if gr.has_edge(each, node):
                 return True
         return (ts == [])
         
     gr = pygraph.digraph()
     gr.add_nodes([0,1,2,3,4,5,6,7,8])
     gr.add_edge(0,1)
     gr.add_edge(0,2)
     gr.add_edge(1,3)
     gr.add_edge(1,4)
     gr.add_edge(2,5)
     gr.add_edge(2,6)
     gr.add_edge(3,7)
     gr.add_edge(8,0)
     gr.add_edge(7,5)
     gr.add_edge(3,0)
     gr.add_edge(4,3)
     gr.add_edge(2,7)
     gr.add_edge(6,0)
     ts = topological_sorting(gr)
     while (ts):
         x = ts.pop()
         assert has_parent(x, ts)
Exemple #4
0
	def _invalidate_caches(self):
		'invalidate the downstream caches of updated nodes'
		
		if len(self.updated) == 0:
			return
			
		# Sort the nodes in worklist and remove duplicates
		sg = topological_sorting(self.digraph) # sorted graph
		
		worklist = []
		# insert nodes into worklist in sorted order
		for node in sg:
			if node in self.updated:
				worklist.append(node)
		self.updated.clear()
		
		# iterate through worklist
		while worklist:
			node = worklist.pop() # one item at a time
			downstream = breadth_first_search(self.digraph, root=node)[1] # get all downstream labels
			for n in downstream:
				if n in worklist:
					# remove labels that will already be done
					worklist.remove(n)
				# remove cache entries
				self.cache[n] = None
Exemple #5
0
    def resolve_plugin_dependencies(self):
        graph = digraph()
        problems = defaultdict(list)

        def check_plugin_dependencies(plugin_id):
            result = True

            def add_problem(problem_type, plugin_id, dependency):
                problems[plugin_id].append(problem_type(plugin_id, dependency))
                result = False

            for dependency in self.plugin_dependencies(plugin_id):
                if dependency.id not in self.manifests:
                    add_problem(MissingDependency, plugin_id, dependency)
                elif dependency.version:
                    if manifests[required_id].version not in dependency.version:
                        add_problem(IncorrectVersion, plugin_id, dependency)
                elif dependency.id not in graph:
                    if dependency.id in self.enabled_plugins:
                        add_problem(IndirectDependency, plugin_id, dependency)
                    else:
                        add_problem(DisabledDependency, plugin_id, dependency)

            return result

        def remove_dependents(plugin_id):
            for node in traversal(graph, plugin_id, 'pre'):
                for dependent in graph[node]:
                    edge = node, dependent
                    problems[dependent].append(IndirectDependency(dependent, 
                        graph.get_edge_properties(edge)['dependency']))
                graph.del_node(node)

        graph.add_nodes(self.enabled_plugins)
        for plugin_id in self.enabled_plugins:
            if check_plugin_dependencies(plugin_id):
                for dependency in self.plugin_dependencies(plugin_id):
                    edge = dependency.id, plugin_id
                    graph.add_edge(edge)
                    graph.set_edge_properties(edge, dependency=dependency)
            else:
                remove_dependents(plugin_id)

        transitive_deps = accessibility(graph)
        cycle_nodes = [
            node
            for node in graph
            if any(
                (node in transitive_deps[dependent])
                for dependent in transitive_deps[node]
                if dependent != node)]
        for node in cycle_nodes:
            problems[node].append(CyclicDependency(node))
            graph.del_node(node)

        self.dependency_graph = graph
        self._dependency_problems = problems
        self._load_order = topological_sorting(graph)
 def allOff(self):
     '''Turn all servers off ungracefully
     '''
     nodeList = topological_sorting(self.graph)
     for node in nodeList:
         server = self.graph.node_attributes(node)
         for outlet in server.getOutlets():
             outlet.setState(False)
     return
Exemple #7
0
	def evaluate_dag(self):
		'Force every node to be evaluated'
		# First invalidate all changed caches
		self._invalidate_caches()
		
		# Then call update on every node
		sg = topological_sorting(self.digraph) # sorted graph
		for label in sg:
			self._update_node(label)
 def getSortedNodeList(self):
     '''
     returns a list of the nodes topologically sorted
     
     :return: a list of the nodes topologically sorted
     '''
     nodeList = topological_sorting(self.graph)
     servers=[]
     for node in nodeList:
         servers.append(self.graph.node_attributes(node))
     return servers
Exemple #9
0
def order_seq_only(graph):
    """
    A topological sort is performed on the graph. All actions are
    enclosed into a Sequence ISE structure.
    """
    _prepare(graph)
    nodes = topological_sorting(graph.reverse())
    actions = []
    for node in nodes:
        actions.extend(_create_actions_from(node, graph, create_deps=False))

    return _create_final_xml_from(actions, SEQ)
Exemple #10
0
 def process(self):
     """ Process image processing flow for IPFGraph """
     
     # Invalidate all input ports in __blocks
     ((iport.invalidate() for iport in block.input_ports.values()) 
                          for block in self.__blocks.values())
     
     graph = self._make_flow_graph()
     
     # Apply topological sorting and execute processing blocks in
     # topological order 
     sorted_graph = topological_sorting(graph)
     for node in sorted_graph:
         node().process()
 def topo_up_down(graph):
     """
     Yield the nodes of the graph, starting with the leaf-most
     (latest topological) node, running up to the root-most
     (earliest topological) node, and pushing back down to the leaves,
     excepting the leaf-most node.
     
     Undefined behavior if the graph is not a DAG.
     
     graph: A->B->C
     yields: (C, B, A, B)
     """
     tsort = topological_sorting(graph)
     for node in reversed(tsort):
         yield node
     for node in tsort[1 : -1]:
         yield node
    def test_topological_sorting_on_tree(self):
        gr = testlib.new_graph()
        st, pre, post = depth_first_search(gr)
        tree = pygraph.classes.digraph.digraph()

        
        for each in st:
            if st[each]:
                if (each not in tree.nodes()):
                    tree.add_node(each)
                if (st[each] not in tree.nodes()):
                    tree.add_node(st[each])
                tree.add_edge((st[each], each))
        
        ts = topological_sorting(tree)
        for each in ts:
            if (st[each]):
                assert ts.index(each) > ts.index(st[each])
Exemple #13
0
    def topo_sort_work(self):
        my_work = self.work

        work_dict = {}
        for w in my_work:
            work_dict[w.work_id] = w

        graph = digraph()
        graph.add_nodes(my_work)

        for w in my_work:
            for p in w.prereqs:
                if not work_dict.has_key(p):
                    continue
                if work_dict[p]:
                    graph.add_edge((work_dict[p], w))
        self.work = topological_sorting(graph)
        return
Exemple #14
0
def find_connected_resources(resource, dependency_graph=None):
    """
    Collects all resources connected to the given resource and returns a
    dictionary mapping member resource classes to new collections containing
    the members found.
    """
    # Build a resource_graph.
    resource_graph = \
                build_resource_graph(resource,
                                     dependency_graph=dependency_graph)
    entity_map = OrderedDict()
    for mb in topological_sorting(resource_graph):
        mb_cls = get_member_class(mb)
        ents = entity_map.get(mb_cls)
        if ents is None:
            ents = []
            entity_map[mb_cls] = ents
        ents.append(mb.get_entity())
    return entity_map
 def test_topological_sorting_on_digraph(self):
     
     def is_ordered(node, list):
         # Has parent on list
         for each in list:
             if gr.has_edge((each, node)):
                 return True
         # Has no possible ancestors on list
         st, pre, post = depth_first_search(gr, node)
         for each in list:
             if (each in st):
                 return False
         return True
         
     gr = testlib.new_digraph()
     ts = topological_sorting(gr)
     
     while (ts):
         x = ts.pop()
         assert is_ordered(x, ts)
 def testTree(self):
     gr = pygraph.digraph()
     gr.add_nodes([0,1,2,3,4,5,6,7,8])
     gr.add_edge(0,1)
     gr.add_edge(0,2)
     gr.add_edge(1,3)
     gr.add_edge(1,4)
     gr.add_edge(2,5)
     gr.add_edge(2,6)
     gr.add_edge(3,7)
     gr.add_edge(8,0)
     ts = topological_sorting(gr)
     assert ts.index(8) < ts.index(0)
     assert ts.index(1) > ts.index(0)
     assert ts.index(2) > ts.index(0)
     assert ts.index(3) > ts.index(1)
     assert ts.index(4) > ts.index(1)
     assert ts.index(5) > ts.index(2)
     assert ts.index(6) > ts.index(2)
     assert ts.index(7) > ts.index(3)
Exemple #17
0
def find_connected_resources(resource, dependency_graph=None):
    """
    Collects all resources connected to the given resource and returns a 
    dictionary mapping member resource classes to new collections containing
    the members found.
    """
    # Build a resource_graph.
    resource_graph = \
                build_resource_graph(resource,
                                     dependency_graph=dependency_graph)
    # Build an ordered dictionary of collections.
    collections = OrderedDict()
    for mb in topological_sorting(resource_graph):
        mb_cls = get_member_class(mb)
        coll = collections.get(mb_cls)
        if coll is None:
            # Create new collection.
            coll = create_staging_collection(mb)
            collections[mb_cls] = coll
        coll.add(mb)
    return collections
Exemple #18
0
def transitive_edges(graph):
    """
    Return a list of transitive edges.
    
    Example of transitivity within graphs: A -> B, B -> C, A ->  C
    in this case the transitive edge is: A -> C
    
    @attention: This function is only meaningful for directed acyclic graphs.
    
    @type graph: digraph
    @param graph: Digraph
    
    @rtype: List
    @return: List containing tuples with transitive edges (or an empty array if the digraph
        contains a cycle) 
    """
    #if the LoopGraph contains a cycle we return an empty array
    if not len(find_cycle(graph)) == 0:
        return []
    
    tranz_edges = [] # create an empty array that will contain all the tuples
    
    #run trough all the nodes in the LoopGraph
    for start in topological_sorting(graph):
        #find all the successors on the path for the current node
        successors = [] 
        for a in traversal(graph,start,'pre'):
            successors.append(a)
        del successors[0] #we need all the nodes in it's path except the start node itself
        
        for next in successors:
            #look for an intersection between all the neighbors of the 
            #given node and all the neighbors from the given successor
            intersect_array = _intersection(graph.neighbors(next), graph.neighbors(start) )
            for a in intersect_array:
                if graph.has_edge((start, a)):
                    ##check for the detected edge and append it to the returned array   
                    tranz_edges.append( (start,a) )      
    return tranz_edges # return the final array
    def get_levels(self):
        '''arrange the nodes in layers according to degree
        level 0 will have no outgoing edges, last level will have no incoming edges'''
        # Topological order: any strict order which satisfies the partial order
        # as established by the outgoing edges
        graph = self._graph
        ordered = topological_sorting(graph)
        ordered.reverse()
        # print 'ordered %s'%ordered
        result = []
        for elem in ordered:
            deps = graph.neighbors(elem)
            maximo = -1
            for i, level in enumerate(result):
                for d in deps:
                    if d in level and i > maximo:
                        maximo = i

            if maximo + 1 >= len(result):
                result.append(set())
            result[maximo + 1].add(elem)

        return result
Exemple #20
0
def calculate_dependencies(configurers):
    """Given a dictionary of name -> DependencyConfigurer objects, returns a
    list with the leaf nodes of the implied dependencies first.
    """
    
    # We do this via graph theory rather than hard-coding a particular startup
    # order.
    config_graph = digraph()
    for configurer in configurers.values():
        config_graph.add_node(configurer)
    for configurer_name, configurer in configurers.items():
        # Add outbound dependencies for every node.
        for name in configurer.depends_on_names:
            _log.debug("%s depends on %s", configurer_name, name)
            config_graph.add_edge(configurer, configurers[name])
        # Add inbound dependencies for every node.
        for name in configurer.depended_on_names:
            _log.debug("%s depends on %s", name, configurer_name)
            config_graph.add_edge(configurers[name], configurer)
    
    # Reverse here because in topological sorting, nodes with no inbound are first
    # and nodes with no outbound edges are last. We actually want to do things the
    # other way around.
    return topological_sorting(config_graph.reverse())
Exemple #21
0
def main():
    """Everybody's favorite starting function"""
    parser = argparse.ArgumentParser(
        description="Run a series of json specified jobs to exercise pipeline and test results."
    )
    parser.add_argument(
        "-r",
        "--replace",
        help="Replace macro strings in commands with value in CURRENT=REPLACE format.",
        action="append",
        default=[],
    )
    parser.add_argument(
        "--environment", help="Generic additions to the environment variables (path, etc.)", default="DUMMY=DUMMY"
    )
    parser.add_argument("--type", help="Types of jobs to run.", default="all")
    parser.add_argument("--not_type", help="Types of jobs not to run.", default="all")
    parser.add_argument("--jobs", help="json file with jobs to run")
    parser.add_argument("--output", help="Directory for results and intermediate files.")
    parser.add_argument("--jobnames", help="Name of particular jobs to run (e.g. 'job1|job2|job3').", default=None)
    parser.add_argument("--update_file", help="Name of particular job to run.", default=None)
    parser.add_argument("--update_decision", help="Name of particular job to run.", default=2)
    parser.add_argument("--update_root", help="Path to gold data to update.", default=None)
    parser.add_argument(
        "--ts_base", help="Base of torrent suite checkout for auto setting of environment", default=None
    )
    parser.add_argument("--gold", help="Path to base of gold data root", default=None)
    parser.add_argument("--data", help="Path to base of data to be used", default=None)
    parser.add_argument("--failures", help="Failures log", default=None)
    args = parser.parse_args()
    args_ok = check_args(args)
    # Put the output root in our regular expressions to be replaced
    for i in range(len(args.replace)):
        if re.match(args.replace[i], "__OUTPUT_ROOT__") != None:
            print "__OUTPUT_ROOT_ is specified from --output not manually."
            raise
    args.replace.append("__OUTPUT_ROOT__=%s" % args.output)

    # Put the gold root in our regular expressions to be replaced
    if args.gold != None:
        args.replace.append("__GOLD_ROOT__=%s" % args.gold)
        args.update_root = args.gold

    # Put the data root if specified in the regular expressions to be replaced
    if args.data != None:
        args.replace.append("__DATA__=%s" % args.data)

    # If specified, set up the paths based on specified torrent server code root
    if args.ts_base != None:
        args.environment = (
            args.environment
            + " PATH=%s/build/Analysis:%s/build/Analysis/TMAP:%s/pipeline/bin:%s ION_CONFIG=%s/Analysis/config"
            % (args.ts_base, args.ts_base, args.ts_base, os.getenv("PATH"), args.ts_base)
        )

    if args_ok[0] == False:
        sys.exit(args_ok[1])
    log_file = args.output + "/log.txt"
    json_results = args.output + "/results.json"
    mkdir_p(args.output)

    # Main logger at the debug level
    logging.basicConfig(
        filename=log_file, filemode="w", level=logging.DEBUG, format="%(asctime)s\t%(levelname)s\t%(message)s"
    )

    # Add a logger for the console at the info level
    console_logger = logging.StreamHandler(sys.stdout)
    console_logger.setLevel(logging.INFO)
    formatter = logging.Formatter("%(message)s")
    console_logger.setFormatter(formatter)
    logging.getLogger("").addHandler(console_logger)

    if args.failures != None:
        fail_log = open(args.failures, "w")

    # Read our json
    logging.info("Reading json file: " + args.jobs)
    jobs_file = open(args.jobs)
    jobs = json.loads(jobs_file.read())
    jobs_file.close()
    logging.info("Got %d jobs from file" % len(jobs))

    report_file = os.path.join(args.output, "report.txt")
    runner = JobRunner(args, report_file)
    job_ids = jobs.keys()
    job_values = jobs.values()

    # Create our graph for dependency checking
    gr = digraph()
    # Add all of the nodes
    for i in range(len(job_ids)):
        logging.debug("Adding: %s" % job_values[i]["job_name"])
        if gr.has_node(job_values[i]["job_name"]):
            logging.info("Already here?? %s" % job_values[i]["job_name"])
        gr.add_node(job_values[i]["job_name"])

    # Add all the edges
    for i in range(len(job_ids)):
        logging.debug("Adding edges for: %s" % job_values[i]["job_name"])
        if "dependencies" in job_values[i]:
            for j in range(len(job_values[i]["dependencies"])):
                logging.debug("Adding edge: %s %s" % (job_values[i]["dependencies"][j], job_values[i]["job_name"]))
                gr.add_edge((job_values[i]["dependencies"][j], job_values[i]["job_name"]))

    order = topological_sorting(gr)
    logging.debug("Order is: %s" % order)
    selector = re.compile(args.type)
    neg_selector = re.compile(args.not_type)
    jobnames = None
    if args.jobnames != None:
        jobnames = re.compile(args.jobnames)
    dorun = True
    start_time = time.time()
    for i in range(len(order)):
        # See if we're running this class of jobs
        dorun = args.type == "all"
        job = jobs[order[i]]
        if dorun == False:
            for tag_ix in range(len(job["tags"])):
                if selector.match(job["tags"][tag_ix]):
                    dorun = True
                    break
            for tag_ix in range(len(job["tags"])):
                if neg_selector.match(job["tags"][tag_ix]):
                    dorun = False
                break

        # Check to see if we're running a particular job which trumps tags
        if jobnames != None and jobnames.match(job["job_name"]):
            dorun = True
        elif jobnames != None:
            dorun = False

        # Run our job if necessary
        if dorun:
            logging.info("Running job %d %s" % (i, jobs[order[i]]["job_name"]))
            runner.run_job(job["job_name"], job)
            if job["ret_val"] == 0:
                logging.info("Passed in %.2f seconds" % job["elapsed_time"])
            else:
                logging.info("Failed in %.2f seconds" % job["elapsed_time"])
                if args.failures != None:
                    fail_log.write("Job %s failed\n" % job["job_name"])
                    fail_log.write("stdout file: %s\n" % job["stdout"])
                    sys.stdout.write("stdout file: %s\n" % job["stdout"])
                    out = open(job["stdout"])
                    print_head_tail(out, fail_log, alt_out=sys.stdout)
                    out.close()
                    fail_log.write("stderr file: %s\n" % job["stderr"])
                    sys.stdout.write("stderr file: %s\n" % job["stderr"])
                    out = open(job["stderr"])
                    print_head_tail(out, fail_log, alt_out=sys.stdout)
                    out.close()
        else:
            logging.info("Skipping job %d %s" % (i, job["job_name"]))
    elapsed_time = time.time() - start_time
    logging.info("All tests took: %.2f seconds" % elapsed_time)
    if args.failures != None:
        fail_log.write("Ran %d jobs, %d passed and %d failed\n" % (runner.run, runner.passed, runner.failed))
        fail_log.close()
    json_out = open(json_results, mode="w")
    json_out.write(json.dumps(jobs, sort_keys=True, indent=4, separators=(",", ": ")))
    logging.info("Ran %d jobs, %d passed and %d failed" % (runner.run, runner.passed, runner.failed))
    logging.info("Full run took: %.3f seconds" % (elapsed_time))

    # Look to see if we need to update the gold data
    if args.update_file != None and args.update_root != None:
        ufile = open(os.path.join(args.output, args.update_file))
        ustats = json.loads(ufile.read())
        if float(ustats["quantile_min"]) > float(args.update_decision) and runner.failed == 0:
            gold_backup = "%s_%s" % (args.update_root, datetime.datetime.now().isoformat())
            print "Moving %s to %s and %s to new gold" % (args.update_root, gold_backup, args.output)
            os.rename(args.update_root, gold_backup)
            os.symlink(args.output, args.update_root)
        else:
            print "Skipping update as metric is: %.2f and threshold is %.2f or numfailed > 0 (%d)" % (
                ustats["quantile_min"],
                float(args.update_decision),
                runner.failed,
            )
    else:
        print "Skipping update as file not specified"

    logging.info("Done.")
    if runner.failed > 0:
        print "Failed."
        sys.exit(1)
    else:
        print "Success."
Exemple #22
0
        "zonesystem",
        "rawspeed",
    ]
)

add_edges(gr)

# make sure we don't have cycles:
cycle_list = find_cycle(gr)
if cycle_list:
    print "cycles:"
    print cycle_list
    exit(1)

# get us some sort order!
sorted_nodes = topological_sorting(gr)
length = len(sorted_nodes)
priority = 1000
for n in sorted_nodes:
    # now that should be the priority in the c file:
    print "%d %s" % (priority, n)
    filename = "../src/iop/%s.c" % n
    if not os.path.isfile(filename):
        filename = "../src/iop/%s.cc" % n
    if not os.path.isfile(filename):
        if not n == "rawspeed":
            print "could not find file `%s', maybe you're not running inside tools/?" % filename
        continue
    replace_all(
        filename,
        "( )*?(module->priority)( )*?(=).*?(;).*\n",
    def get_bootschedule(self):
        """ Calculate a Single Appearance Schedule to boot peeking tokens.
        """

        sdfgraph = self.sdfgraph
        edges = sdfgraph.edges()
        peek_tokens = sdfgraph.peek_tokens
        strong_connected = self.strong_connected
        repetition = sdfgraph.repetition
        boot_schedule = []

        if self.boot_schedule != []:
            return self.boot_schedule

        PEEK_FOUND = False
        for edge in edges:
            if len(peek_tokens[edge]) > 0:
                PEEK_FOUND = True
                break

        if not PEEK_FOUND:
            return []

        # calculate repetitions of each to fill up peeks only
        rep={}

        nodes=topological_sorting(sdfgraph.graph)
        nodes.reverse()         # reversed
        for node in nodes:
            if node in rep:   # the node is backedge
                continue
            rep[node] = 0
            backedge = [];
            for succ in sdfgraph.out_nodes(node):
                edge = (node, succ)
                if succ not in rep:   # the node is backedge
                    backedge.append(succ)   # process later
                    continue
                rep[node]=max(rep[node],int(ceil((rep[succ]*sdfgraph.consumption[edge]+len(peek_tokens[edge]))/float(sdfgraph.production[edge]))))
            for u in backedge:
                rep[u]=rep[node]

            # calculate max level difference
            for succ in sdfgraph.out_nodes(node):
                edge = (node,succ)
                diff = fabs(int(ceil(float(rep[node])/repetition[node]))-int(ceil(float(rep[succ])/repetition[succ])))
                if self.max_lev_diff < diff:
                    self.max_lev_diff = diff

        nodes.reverse()         # ordered

        # initialize number of actor invocations required
        schedule_len = 0
        for node in nodes:
            schedule_len = schedule_len + rep[node]

        # initialize fillstate
        fillstate = {}
        for edge in sdfgraph.edges():
            fillstate[edge]=sdfgraph.delay[edge]
        
        while schedule_len > 0:
            init_sched_len = schedule_len
            for node in nodes:
                if rep[node] > 0:
                    # check whether node is fireable, i.e. enough tokens on the input channel
                    firable = True
                    for in_node in sdfgraph.in_nodes(node):
                        edge=(in_node,node)
                        pop_rate = sdfgraph.consumption[edge]
                        peek_rate = len(sdfgraph.peek_tokens[edge])
                        if fillstate[edge]-(pop_rate+peek_rate) < 0:
                            firable = False
                    # If so, add to bootschedule and update fillstate
                    if firable:
                        for in_node in sdfgraph.in_nodes(node):
                            edge=(in_node,node)
                            fillstate[edge]=fillstate[edge]-sdfgraph.consumption[edge]
                        for out_node in sdfgraph.out_nodes(node):
                            edge=(node,out_node)
                            fillstate[edge]=fillstate[edge]+sdfgraph.production[edge]
                        rep[node] = rep[node] - 1
                        boot_schedule.append(node)
                        schedule_len = schedule_len - 1
            if init_sched_len == schedule_len:
                print 'ERROR: Boot schedule calculation failed.'
                sys.exit(1)
        self.boot_schedule = boot_schedule
        
        # test automatically if both of schedules are computed
        #self.check_schedules()
        return self.boot_schedule
Exemple #24
0
def shortest_greedy_generator(tasks_graph,platform_graph,SE_graph,ALU_graph):
    """
    Generator using a randomized greedy algorithm
    TODO: clean the code!!!
    """
    task_nb = len(tasks_graph.nodes())
    topological_order = topological_sorting(tasks_graph)
    roots_list = roots_filter(tasks_graph)

    routed_platform_graph = copy.deepcopy(platform_graph)
    routed_SE_graph = copy.deepcopy(SE_graph)
    routed_ALU_graph = copy.deepcopy(ALU_graph)

    root_subgraphs = rootgraphs(topological_order,roots_list)
    random.shuffle(root_subgraphs)

    random_topological_order = [item for sublist in root_subgraphs for item in sublist] # flattening the list

    edges_list_sorted = []
    for node in random_topological_order:
        for edge in tasks_graph.edges():
            if edge[0] == node:
                edges_list_sorted.append(edge)

    mapping = [0 for i in range(task_nb)]
    placed = [False for i in range(len(random_topological_order))]
    used_nodes = []
    for (source_node, target_node) in edges_list_sorted:
        if placed[source_node] == False and placed[target_node] == False:
            if source_node in roots_list:
                # DEPTH = 2
                local_depth = DEPTH+1
                available_start_nodes = []
                while available_start_nodes == []:
                    for node in routed_platform_graph.nodes():
                        if 'ALU' in node and node not in used_nodes and int(node.split('_')[1]) < local_depth:
                            available_start_nodes.append(node)
                    local_depth += 1

                chosen_source_node = available_start_nodes[random.randint(0,len(available_start_nodes)-1)]
                used_nodes.append(chosen_source_node)
                mapping[source_node] = (int(re.sub('\D', '', chosen_source_node.split('_')[0])),int(chosen_source_node.split('_')[1]))
                placed[source_node] = True
            # else:
                # source_node_location = 'ALU' + mapping[source_node][0] + '_' + mapping[source_node][1]
                closest_nodes = shortest_path(routed_platform_graph,chosen_source_node)[1]
                shortest_path_list = list(filter(lambda x: 'ALU' in x[0], sorted(closest_nodes.items(), key=lambda x: (x[1],x[0]))))[1:]
                # DEPTH = 2
                local_depth = DEPTH
                chosable_nodes = []
                while chosable_nodes == []:
                    for node, dist in shortest_path_list:
                        if node not in used_nodes and dist <= local_depth:
                            chosable_nodes.append(node)
                    local_depth += 1
                chosen_target_node = chosable_nodes[random.randint(0,len(chosable_nodes)-1)]
                used_nodes.append(chosen_target_node)
                mapping[target_node] = (int(re.sub('\D', '', chosen_target_node.split('_')[0])),int(chosen_target_node.split('_')[1]))    # coordinates
                placed[target_node] = True
                # routed_platform_graph.del_node(chosen_source_node)
                # routed_platform_graph.del_node(chosen_target_node)

        elif placed[source_node] == True and placed[target_node] == False:
            source_node_location = 'ALU' + str(mapping[source_node][0]) + '_' + str(mapping[source_node][1])
            closest_nodes = shortest_path(routed_platform_graph,source_node_location)[1]
            shortest_path_list = list(filter(lambda x: 'ALU' in x[0], sorted(closest_nodes.items(), key=lambda x: (x[1],x[0]))))[1:]
            # DEPTH = 2
            local_depth = DEPTH
            chosable_nodes = []
            while chosable_nodes == []:
                for node, dist in shortest_path_list:
                    if node not in used_nodes and dist <= local_depth:
                        chosable_nodes.append(node)
                local_depth += 1
            chosen_target_node = chosable_nodes[random.randint(0,len(chosable_nodes)-1)]
            used_nodes.append(chosen_target_node)
            mapping[target_node] = (int(re.sub('\D', '', chosen_target_node.split('_')[0])),int(chosen_target_node.split('_')[1]))    # coordinates
            # routed_platform_graph.del_node()
            placed[target_node] = True

        elif placed[source_node] == False and placed[target_node] == True:
            (target_x, target_y) = (mapping[target_node][0], mapping[target_node][1])
            local_depth = DEPTH
            chosable_nodes = []
            while chosable_nodes == []:
                chosable_nodes_coord = []
                delta = list(range(-local_depth,local_depth+1))
                for i in delta:
                    for j in delta:
                        (new_x, new_y) = (target_x+i, target_y+j)
                        if 0 <= new_x <= PE_col-1 and 0 <= new_y <= PE_row-1:
                            chosable_nodes_coord.append((new_x,new_y))

                chosable_nodes_coord.remove((target_x,target_y))

                for x,y in chosable_nodes_coord:
                    node_location = 'ALU' + str(x) + '_' + str(y)
                    closest_nodes = shortest_path(routed_platform_graph,node_location)[1]
                    filtered_closest_nodes = list(filter(lambda x: 'ALU' in x[0] and x[1] <= local_depth, sorted(closest_nodes.items(), key=lambda x: (x[1],x[0]))))[1:]
                    for node, _ in filtered_closest_nodes:
                        if node not in used_nodes and node not in chosable_nodes:
                            chosable_nodes.append(node)

                local_depth += 1

            chosen_source_node = chosable_nodes[random.randint(0,len(chosable_nodes)-1)]
            used_nodes.append(chosen_source_node)
            mapping[source_node] = (int(re.sub('\D', '', chosen_target_node.split('_')[0])),int(chosen_target_node.split('_')[1]))
            placed[source_node] = True

    # shift_mapping(mapping)

    # if len(list(set(mapping))) != len(tasks):
        # print('WTF')

    return mapping
Exemple #25
0
def astar_tasks_routing(tasks_graph,platform_graph,SE_graph,individual):
    """
    Routing using a shortest path algorithm (A*) and some BFS
    """
    # roots_list, sinks_list = rootsinks_filter(tasks_graph)
    topological_order = topological_sorting(tasks_graph)
    edges_list_sorted = []
    for node in topological_order:
        for edge in tasks_graph.edges():
            if edge[0] == node:
                edges_list_sorted.append(edge)

    weighted_tasks_graph = copy.deepcopy(tasks_graph)
    routed_platform_graph = copy.deepcopy(platform_graph)
    routed_SE_graph = copy.deepcopy(SE_graph)
    # SE_search_graph = euclidean()  # chow not working!?
    # platform_search_graph = euclidean()
    # # Optimizing all the original graphs
    # SE_search_graph.optimize(routed_SE_graph)
    # platform_search_graph.optimize(routed_platform_graph)
    # search_graph.optimize(routed_platform_graph)
    routable = True
    paths_list = []
    for edge in edges_list_sorted:
        source_node = 'ALU' + str(individual[edge[0]][0]) + '_' + str(individual[edge[0]][1])
        target_node = 'ALU' + str(individual[edge[1]][0]) + '_' + str(individual[edge[1]][1])

        # TODO: add stupid constraint for SE not routing west if coming from same ALU...

        if (source_node,target_node) not in routed_platform_graph.edges():
            routable_SE_graph = clean_unroutable_nodes(routed_SE_graph,source_node,target_node)
            # search_graph.optimize(routable_SE_graph)
            try:
                route_path = heuristic_search(routable_SE_graph,source_node,target_node,SE_search_graph)
                paths_list.append(route_path)
            except:
                routable = False
                weight = 10000  # high weight penalty to not consider this individual
                # TODO: some repair mechanism?
                weighted_tasks_graph.set_edge_weight(edge,weight)
                return weighted_tasks_graph, edges_list_sorted, paths_list

        else:
            # search_graph.optimize(routed_platform_graph)
            try:
                route_path = heuristic_search(routed_platform_graph,source_node,target_node,platform_search_graph)
                # routed_platform_graph.del_edge((source_node,target_node))
                paths_list.append(route_path)
            except:
                routable = False
                weight = 10000  # high weight penalty to not consider this individual
                # TODO: some repair mechanism?
                weighted_tasks_graph.set_edge_weight(edge,weight)
                return weighted_tasks_graph, edges_list_sorted, paths_list

        if routable:
            for i in range(len(route_path)-1):
                if 'ALU' in route_path[i] and 'ALU' in route_path[i+1]:
                    routed_platform_graph.del_edge((route_path[i],route_path[i+1]))
                else:
                    routed_platform_graph.del_edge((route_path[i],route_path[i+1]))
                    routed_SE_graph.del_edge((route_path[i],route_path[i+1]))

            weight = path_evaluation(platform_graph,route_path)
            weighted_tasks_graph.set_edge_weight(edge,weight)
        else:
            pass
            # weight = 10000  # high weight penalty to not consider this individual
            # # TODO: some repair mechanism?
            # weighted_tasks_graph.set_edge_weight(edge,weight)

    return weighted_tasks_graph, edges_list_sorted, paths_list
Exemple #26
0
def ccm_fast_export(releases, graphs):
    global acn_ancestors
    global users
    users = users()
    logger.basicConfig(filename='ccm_fast_export.log',level=logger.DEBUG)

    commit_lookup = {}

    # Get the  initial release
    for k, v in releases.iteritems():
        if k == 'delimiter':
            continue
        if k == 'ccm_types':
            continue
        if v['previous'] is None:
            release = k
            break
    logger.info("Starting at %s as initial release" % release)

    if 'created' not in releases[release]:
        initial_release_time = 0.0 # epoch for now since releases[release] has no 'created' key :(
    else:
        initial_release_time = time.mktime(releases[release]['created'].timetuple())
    mark = 0

    files = []
    # Create the initial release
    # get all the file objects:
    file_objects = [ccm_cache.get_object(o) for o in releases[release]['objects']]
    project_obj = ccm_cache.get_object(releases[release]['fourpartname'])
    paths = project_obj.get_members()
    for o in file_objects:
        if o.get_type() != 'dir':
            object_mark, mark = create_blob(o, mark)
            for p in paths[o.get_object_name()]:
                files.append('M ' + releases['ccm_types']['permissions'][o.get_type()] + ' :'+str(object_mark) + ' ' + p)

    empty_dirs = releases[release]['empty_dirs']
    logger.info("Empty dirs for release %s\n%s" %(release, empty_dirs))
    mark = create_blob_for_empty_dir(get_mark(mark))

    #file_list = create_file_list(objects, object_lookup, releases['ccm_types'], empty_dirs=empty_dirs, empty_dir_mark=mark)
    if empty_dirs:
        for d in empty_dirs:
            if mark:
                path = d + '/.gitignore'
                files.append('M 100644 :' + str(mark) + ' ' + path)

    mark = get_mark(mark)

    commit_info = ['reset refs/tags/' + release, 'commit refs/tags/' + release, 'mark :' + str(mark),
                   'author Nokia <*****@*****.**> ' + str(int(initial_release_time)) + " +0000",
                   'committer Nokia <*****@*****.**> ' + str(int(initial_release_time)) + " +0000", 'data 15',
                   'Initial commit', '\n'.join(files), '']
    print '\n'.join(commit_info)

    logger.info("git-fast-import:\n%s" %('\n'.join(commit_info)))

    tag_msg = 'Release: %s' %release
    annotated_tag = ['tag %s' % release,
               'from :%s' % str(mark),
               'tagger Nokia <*****@*****.**> ' + str(int(initial_release_time)) + " +0000",
               'data %s' % len(tag_msg),
               tag_msg]
    print '\n'.join(annotated_tag)
    
    commit_lookup[release] = mark
    # do the following releases (graphs)
    release_queue = deque(releases[release]['next'])
    while release_queue:
        release = release_queue.popleft()
        previous_release = releases[release]['previous']

        logger.info("Next release: %s" % release)
        commit_graph = graphs[release]['commit']
        commit_graph = fix_orphan_nodes(commit_graph, previous_release)

        commit_graph = ch.spaghettify_digraph(commit_graph, previous_release, release)

        #htg.commit_graph_to_image(commit_graph, releases[release], graphs[release]['task'], name=releases[release]['name']+'_after' )

        # Find the cutting nodes
        logger.info("Finding the cutting nodes")
        undirected = graph()
        undirected.add_nodes(commit_graph.nodes())
        [undirected.add_edge(edge) for edge in commit_graph.edges()]
        cutting_nodes = cut_nodes(undirected)
        del undirected

        # Create the reverse commit graph
        logger.info("Building the reverse commit graph")
        reverse_commit_graph = commit_graph.reverse()

        # Compute the accessibility matrix of the reverse commit graph
        logger.info("Compute the ancestors")
        ancestors = accessibility(reverse_commit_graph)
        del reverse_commit_graph

        logger.info("Ancestors of the release: %s" % str(ancestors[release]))

        # Clean up the ancestors matrix
        for k, v in ancestors.iteritems():
            if k in v:
                v.remove(k)

        # Get the commits order
        commits = topological_sorting(commit_graph)

        # Fix the commits order list
        commits.remove(previous_release)
        commits.remove(release)

        last_cutting_node = None

        # Check if the release (Synergy project has changed name, if it has the
        # 'base' directory name needs to be renamed
        if releases.has_key('delimiter'):
            delim = releases['delimiter']
        else:
            delim = '-'
        previous_name = previous_release.split(delim)[0]
        current_name = release.split(delim)[0]
        if current_name != previous_name:
            logger.info("Name changed: %s -> %s" %(previous_name, current_name))
            from_mark = commit_lookup[previous_release]
            mark, commit = rename_toplevel_dir(previous_name, current_name, release, releases, mark, from_mark)
            print '\n'.join(commit)
            # adjust the commit lookup
            commit_lookup[previous_release] = mark

        for counter, commit in enumerate(commits):
            logger.info("Commit %i/%i" % (counter+1, len(commits)))

            acn_ancestors = []
            if last_cutting_node is not None:
                acn_ancestors = ancestors[last_cutting_node]

            # Create the references lists. It lists the parents of the commit
            #reference = [commit_lookup[parent] for parent in ancestors[commit] if parent not in acn_ancestors]
            reference = [commit_lookup[parent] for parent in commit_graph.incidents(commit)]

            if len(reference) > 1:
                # Merge commit
                mark = create_merge_commit(commit, release, releases, mark, reference, graphs, set(ancestors[commit]) - set(acn_ancestors))
            else:
                # Normal commit
                mark = create_commit(commit, release, releases, mark, reference, graphs)

            # Update the lookup table
            commit_lookup[commit] = mark

            # Update the last cutting edge if necessary
            if commit in cutting_nodes:
                last_cutting_node = commit

        if last_cutting_node is not None:
            acn_ancestors = ancestors[last_cutting_node]

        reference = [commit_lookup[parent] for parent in ancestors[release] if parent not in acn_ancestors]
        logger.info("Reference %s" %str([parent for parent in ancestors[release] if parent not in acn_ancestors]))
        if not reference:
            logger.info("Reference previous %s, mark: %d" % (releases[release]['previous'], commit_lookup[releases[release]['previous']]))
            reference = [commit_lookup[ releases[release]['previous'] ] ]

        mark, merge_commit = create_release_merge_commit(releases, release, get_mark(mark), reference, graphs, set(ancestors[release]) - set(acn_ancestors))
        print '\n'.join(merge_commit)
        annotated_tag = create_annotated_tag(releases, release, mark)
        print '\n'.join(annotated_tag)

        commit_lookup[release] = mark
        release_queue.extend(releases[release]['next'])
        #release = releases[release]['next']
        #release = None

    #reset to master
    master = get_master_tag()
    reset = ['reset refs/heads/master', 'from :' + str(commit_lookup[master])]
    logger.info("git-fast-import:\n%s" %('\n'.join(reset)))
    print '\n'.join(reset)
Exemple #27
0
def ccm_fast_export(releases, graphs):
    global acn_ancestors
    global users
    users = users()
    logger.basicConfig(filename='ccm_fast_export.log',level=logger.DEBUG)

    commit_lookup = {}

    # Get the  initial release
    for k, v in releases.iteritems():
        if k == 'delimiter':
            continue
        if k == 'ccm_types':
            continue
        if v['previous'] is None:
            release = k
            break
    logger.info("Starting at %s as initial release" % release)

    if 'created' not in releases[release]:
        initial_release_time = 0.0 # epoch for now since releases[release] has no 'created' key :(
    else:
        initial_release_time = time.mktime(releases[release]['created'].timetuple())
    mark = 0

    files = []
    # Create the initial release
    # get all the file objects:
    file_objects = (ccm_cache.get_object(o) for o in releases[release]['objects'])
    project_obj = ccm_cache.get_object(releases[release]['fourpartname'])
    paths = project_obj.get_members()
    for o in file_objects:
        if o.get_type() != 'dir':
            object_mark, mark = create_blob(o, mark)
            for p in paths[o.get_object_name()]:
                files.append('M ' + releases['ccm_types']['permissions'][o.get_type()] + ' :'+str(object_mark) + ' ' + p)

    empty_dirs = releases[release]['empty_dirs']
    logger.info("Empty dirs for release %s\n%s" %(release, empty_dirs))
    mark = create_blob_for_empty_dir(get_mark(mark))

    #file_list = create_file_list(objects, object_lookup, releases['ccm_types'], empty_dirs=empty_dirs, empty_dir_mark=mark)
    if empty_dirs:
        for d in empty_dirs:
            if mark:
                path = d + '/.gitignore'
                files.append('M 100644 :' + str(mark) + ' ' + path)

    mark = get_mark(mark)

    commit_info = ['reset refs/tags/' + release, 'commit refs/tags/' + release, 'mark :' + str(mark),
                   'author Nokia <*****@*****.**> ' + str(int(initial_release_time)) + " +0000",
                   'committer Nokia <*****@*****.**> ' + str(int(initial_release_time)) + " +0000", 'data 15',
                   'Initial commit', '\n'.join(files), '']
    print '\n'.join(commit_info)

    logger.info("git-fast-import:\n%s" %('\n'.join(commit_info)))

    tag_msg = 'Release: %s' %release
    annotated_tag = ['tag %s' % release,
               'from :%s' % str(mark),
               'tagger Nokia <*****@*****.**> ' + str(int(initial_release_time)) + " +0000",
               'data %s' % len(tag_msg),
               tag_msg]
    print '\n'.join(annotated_tag)
    
    commit_lookup[release] = mark
    # do the following releases (graphs)
    release_queue = deque(releases[release]['next'])
    while release_queue:
        release = release_queue.popleft()
        previous_release = releases[release]['previous']

        logger.info("Next release: %s" % release)
        commit_graph = graphs[release]['commit']
        commit_graph = fix_orphan_nodes(commit_graph, previous_release)

        commit_graph = ch.spaghettify_digraph(commit_graph, previous_release, release)

        #htg.commit_graph_to_image(commit_graph, releases[release], graphs[release]['task'], name=releases[release]['name']+'_after' )

        # Find the cutting nodes
        logger.info("Finding the cutting nodes")
        undirected = graph()
        undirected.add_nodes(commit_graph.nodes())
        [undirected.add_edge(edge) for edge in commit_graph.edges()]
        cutting_nodes = cut_nodes(undirected)
        del undirected

        # Create the reverse commit graph
        logger.info("Building the reverse commit graph")
        reverse_commit_graph = commit_graph.reverse()

        # Compute the accessibility matrix of the reverse commit graph
        logger.info("Compute the ancestors")
        ancestors = accessibility(reverse_commit_graph)
        del reverse_commit_graph

        logger.info("Ancestors of the release: %s" % str(ancestors[release]))

        # Clean up the ancestors matrix
        for k, v in ancestors.iteritems():
            if k in v:
                v.remove(k)

        # Get the commits order
        commits = topological_sorting(commit_graph)

        # Fix the commits order list
        commits.remove(previous_release)
        commits.remove(release)

        last_cutting_node = None

        # Check if the release (Synergy project has changed name, if it has the
        # 'base' directory name needs to be renamed
        if releases.has_key('delimiter'):
            delim = releases['delimiter']
        else:
            delim = '-'
        previous_name = previous_release.split(delim)[0]
        current_name = release.split(delim)[0]
        if current_name != previous_name:
            logger.info("Name changed: %s -> %s" %(previous_name, current_name))
            from_mark = commit_lookup[previous_release]
            mark, commit = rename_toplevel_dir(previous_name, current_name, release, releases, mark, from_mark)
            print '\n'.join(commit)
            # adjust the commit lookup
            commit_lookup[previous_release] = mark

        for counter, commit in enumerate(commits):
            logger.info("Commit %i/%i" % (counter+1, len(commits)))

            acn_ancestors = []
            if last_cutting_node is not None:
                acn_ancestors = ancestors[last_cutting_node]

            # Create the references lists. It lists the parents of the commit
            #reference = [commit_lookup[parent] for parent in ancestors[commit] if parent not in acn_ancestors]
            reference = [commit_lookup[parent] for parent in commit_graph.incidents(commit)]

            if len(reference) > 1:
                # Merge commit
                mark = create_merge_commit(commit, release, releases, mark, reference, graphs, set(ancestors[commit]) - set(acn_ancestors))
            else:
                # Normal commit
                mark = create_commit(commit, release, releases, mark, reference, graphs)

            # Update the lookup table
            commit_lookup[commit] = mark

            # Update the last cutting edge if necessary
            if commit in cutting_nodes:
                last_cutting_node = commit

        if last_cutting_node is not None:
            acn_ancestors = ancestors[last_cutting_node]

        reference = [commit_lookup[parent] for parent in ancestors[release] if parent not in acn_ancestors]
        logger.info("Reference %s" %str([parent for parent in ancestors[release] if parent not in acn_ancestors]))
        if not reference:
            logger.info("Reference previous %s, mark: %d" % (releases[release]['previous'], commit_lookup[releases[release]['previous']]))
            reference = [commit_lookup[ releases[release]['previous'] ] ]

        mark, merge_commit = create_release_merge_commit(releases, release, get_mark(mark), reference, graphs, set(ancestors[release]) - set(acn_ancestors))
        print '\n'.join(merge_commit)
        annotated_tag = create_annotated_tag(releases, release, mark)
        print '\n'.join(annotated_tag)

        commit_lookup[release] = mark
        release_queue.extend(releases[release]['next'])
        #release = releases[release]['next']
        #release = None

    #reset to master
    master = get_master_tag()
    reset = ['reset refs/heads/master', 'from :' + str(commit_lookup[master])]
    logger.info("git-fast-import:\n%s" %('\n'.join(reset)))
    print '\n'.join(reset)
Exemple #28
0
def critical_path(graph):
    """
    Compute and return the critical path in an acyclic directed weighted LoopGraph.
    
    @attention: This function is only meaningful for directed weighted acyclic graphs
    
    @type graph: digraph 
    @param graph: Digraph
    
    @rtype: List
    @return: List containing all the nodes in the path (or an empty array if the LoopGraph
        contains a cycle)
    """
    #if the LoopGraph contains a cycle we return an empty array
    if not len(find_cycle(graph)) == 0:
        return []
    
    #this empty dictionary will contain a tuple for every single node
    #the tuple contains the information about the most costly predecessor 
    #of the given node and the cost of the path to this node
    #(predecessor, cost)
    node_tuples = {}
        
    topological_nodes = topological_sorting(graph)
    
    #all the tuples must be set to a default value for every node in the LoopGraph
    for node in topological_nodes: 
        node_tuples.update( {node :(None, 0)}  )
    
    #run trough all the nodes in a topological order
    for node in topological_nodes:
        predecessors =[]
        #we must check all the predecessors
        for pre in graph.incidents(node):
            max_pre = node_tuples[pre][1]
            predecessors.append( (pre, graph.edge_weight( (pre, node) ) + max_pre )   )
        
        max = 0; max_tuple = (None, 0)
        for i in predecessors:#look for the most costly predecessor
            if i[1] >= max:
                max = i[1]
                max_tuple = i
        #assign the maximum value to the given node in the node_tuples dictionary
        node_tuples[node] = max_tuple                 
    
    #find the critical node
    max = 0; critical_node = None
    for k,v in list(node_tuples.items()):
        if v[1] >= max:
            max= v[1]
            critical_node = k
            
    
    path = []
    #find the critical path with backtracking trought the dictionary
    def mid_critical_path(end):
        if node_tuples[end][0] != None:
            path.append(end)
            mid_critical_path(node_tuples[end][0])
        else:
            path.append(end)
    #call the recursive function
    mid_critical_path(critical_node)
    
    path.reverse()
    return path #return the array containing the critical path
Exemple #29
0
    'watermark',
    'zonesystem',
    'rawspeed'
])

add_edges(gr)

# make sure we don't have cycles:
cycle_list = find_cycle(gr)
if cycle_list:
    print "cycles:"
    print cycle_list
    exit(1)

# get us some sort order!
sorted_nodes = topological_sorting(gr)
length = len(sorted_nodes)
priority = 1000
for n in sorted_nodes:
    # now that should be the priority in the c file:
    print "%d %s" % (priority, n)
    filename = "../src/iop/%s.c" % n
    if not os.path.isfile(filename):
        filename = "../src/iop/%s.cc" % n
    if not os.path.isfile(filename):
        if not n == "rawspeed":
            print "could not find file `%s', maybe you're not running inside tools/?" % filename
        continue
    replace_all(
        filename, "( )*?(module->priority)( )*?(=).*?(;).*\n",
        "  module->priority = %d; // module order created by iop_dependencies.py, do not edit!\n"
Exemple #30
0
 def nodes(self):
     """ Return list of nodes in alphabetical order
     """
     return topological_sorting(self.graph)
 def getSortedNodeListIndex(self):
     ''' returns a list of the node names topologically sorted
     
     :return: a list of the node names topologically sorted
     '''
     return topological_sorting(self.graph)