Ejemplo n.º 1
0
def buildDAG(args, modified_files, formula_dir):
    oDag = dag.DAG()
    dMap = {}
    common_names = set([])
    for directory, d_list, f_list in os.walk(formula_dir):
        if 'meta.yaml' in f_list:
            meta = MetaData(os.path.join(directory, 'meta.yaml'))
            reqs = meta.meta['requirements']
            combined_deps = set(reqs.get('build', '')).union(reqs.get('run', ''))
            common_names.add(meta.name())
            dMap[meta.meta_path] = (meta.name(), combined_deps, meta)

    # Populate DAG
    [oDag.add_node(x) for x in dMap.keys()]

    # Create edges
    for ind_node, name, dependencies, meta, dag_node in _walkMapAndDag(dMap, oDag):
        controlled_dependencies = set(dependencies).intersection(common_names)
        if dMap[dag_node][0] in controlled_dependencies:
            oDag.add_edge(dag_node, ind_node)

    # Remove edges (skips, unmodified recipes, etc)
    for ind_node, name, dependencies, meta, dag_node in _walkMapAndDag(dMap, oDag):
        controlled_dependencies = set(dependencies).intersection(common_names)
        if ind_node not in modified_files and controlled_dependencies and args.dependencies:
            continue
        elif ind_node not in modified_files:
            oDag.delete_node_if_exists(ind_node)

    return oDag
Ejemplo n.º 2
0
    def schedule(self, testers):
        """
        Schedule supplied list of testers for execution.
        """
        # If any threads caused an exception, we have already closed down the queue and need to
        # not schedule any more jobs
        if self.run_pool._state:
            return

        # Instance the DAG class so we can share it amongst all the TesterData containers
        job_dag = dag.DAG()

        non_runnable_jobs = set([])
        name_to_job_container = {}

        # Increment our simple queue count with the number of testers the scheduler received
        with self.slot_lock:
            self.job_queue_count += len(testers)

        # Create a local dictionary of tester names to job containers. Add this dictionary to a
        # set. We will use this set as a way to gain access to their methods.
        for tester in testers:
            name_to_job_container[tester.getTestName()] = TesterData(tester, job_dag, self.options)
            self.tester_datas.add(name_to_job_container[tester.getTestName()])

        # Populate job_dag with testers. This method will also return any testers which caused failures
        # while building the DAG.
        skipped_or_failed_testers = self.buildDAG(name_to_job_container, job_dag)

        # Create a set of failing job containers
        for failed_tester in skipped_or_failed_testers:
            non_runnable_jobs.add(name_to_job_container[failed_tester.getTestName()])

        # Iterate over the jobs in our non_runnable_jobs and handle any downstream jobs affected by
        # 'job'. These will be our 'skipped dependency' tests.
        for job in non_runnable_jobs.copy():
            additionally_skipped = self.processDownstreamTests(job)
            non_runnable_jobs.update(additionally_skipped)
            job_dag.delete_node_if_exists(job)

        # Get a count of all the items still in the DAG. These will be the jobs that ultimately are queued
        runnable_jobs = job_dag.size()

        # Make sure we didn't drop a tester somehow
        if len(non_runnable_jobs) + runnable_jobs != len(testers):
            raise SchedulerError('Runnable tests in addition to Skipped tests does not match total scheduled test count!')

        # Assign a status thread to begin work on any skipped/failed jobs
        self.queueJobs(status_jobs=non_runnable_jobs)

        # Build our list of runnable jobs and set the tester's status to queued
        job_list = []
        if runnable_jobs:
            job_list = job_dag.ind_nodes()
            for job_container in job_list:
                tester = job_container.getTester()
                tester.setStatus('QUEUED', tester.bucket_pending)

        # Queue runnable jobs
        self.queueJobs(run_jobs=job_list)
Ejemplo n.º 3
0
 def removeAllDependencies(self):
     """ Flatten current DAG so that it no longer contains any dependency information """
     if self.__name_to_job and self.__job_dag.size():
         tmp_job_dag = dag.DAG()
         for job in self.__job_dag.topological_sort():
             tmp_job_dag.add_node(job)
         self.__job_dag = tmp_job_dag
     return self.__job_dag
Ejemplo n.º 4
0
def buildDAG(template_dir, args):
    package_dag = dag.DAG()
    for template_file in os.listdir(template_dir):
        tmp_job = Job(args,
                      package_file=os.path.join(template_dir, template_file),
                      name=template_file)
        package_dag.add_node(tmp_job)
    return buildEdges(package_dag, args)
Ejemplo n.º 5
0
    def schedule(self, dag_object):
        """
        Account for all the jobs in the DAG and submit the DAG for
        processing.
        """
        if type(dag_object) != type(dag.DAG()):
            raise SchedulerError('schedule method requires a DAG object')

        if self.worker_pool._state:
            raise SchedulerError('scheduler is no longer accepting jobs')

        with self.thread_lock:
            self.job_queue_count += dag_object.size()
            for job in dag_object.topological_sort():
                # create pointer to DAG for every job
                self.shared_dags[job] = JobContainer(dag_object)

        self.queueDAG(dag_object)
Ejemplo n.º 6
0
    def schedule(self, testers):
        """
        Schedule supplied list of testers for execution.
        """
        # If any threads caused an exception, we have already closed down the queue and need to
        # not schedule any more jobs
        if self.error_state:
            return

        # Instance the DAG class so we can share it amongst all the Job containers
        job_dag = dag.DAG()

        runnable_jobs = set([])
        non_runnable_jobs = set([])
        name_to_job_container = {}

        # Increment our simple queue count with the number of testers the scheduler received
        with self.job_queue_lock:
            self.job_queue_count += len(testers)

        # Create a local dictionary of tester names to job containers. Add this dictionary to a
        # set. We will use this set as a way to gain access to their methods.
        for tester in testers:
            if tester.getTestName() in name_to_job_container:
                tester.addCaveats('duplicate test')
                tester.setStatus(tester.bucket_skip.status, tester.bucket_skip)
                non_runnable_jobs.add(Job(tester, job_dag, self.options))
            else:
                name_to_job_container[tester.getTestName()] = Job(tester, job_dag, self.options)
                self.tester_datas.add(name_to_job_container[tester.getTestName()])

        # Populate job_dag with testers. This method will also return any testers which caused failures
        # while building the DAG.
        skipped_or_failed_testers = self.buildDAG(name_to_job_container, job_dag)

        # Create a set of failing job containers
        for failed_tester in skipped_or_failed_testers:
            non_runnable_jobs.add(name_to_job_container[failed_tester.getTestName()])

        # Iterate over the jobs in our non_runnable_jobs and handle any downstream jobs affected by
        # 'job'. These will be our 'skipped dependency' tests.
        for job in non_runnable_jobs.copy():
            additionally_skipped = self.processDownstreamTests(job)
            non_runnable_jobs.update(additionally_skipped)
            job_dag.delete_node_if_exists(job)

        # Make sure we didn't drop a tester somehow
        if len(non_runnable_jobs) + job_dag.size() != len(testers):
            raise SchedulerError('Runnable tests in addition to Skipped tests does not match total scheduled test count!')

        # Inform derived schedulers of the jobs we are skipping immediately
        self.reportSkipped(non_runnable_jobs)

        # Allow derived schedulers to modify the dag before we launch
        # TODO: We don't like this, and this will change when we move to better DAG handling.
        if job_dag.size():
            self.preLaunch(job_dag)

        # Build our list of runnable jobs and set the tester's status to queued
        if job_dag.size():
            runnable_jobs = job_dag.ind_nodes()
            for job in runnable_jobs:
                tester = job.getTester()
                tester.setStatus('QUEUED', tester.bucket_pending)

        # Queue generated jobs
        self.queueJobs(run_jobs=runnable_jobs, status_jobs=non_runnable_jobs)
Ejemplo n.º 7
0
 def __init__(self, options):
     self.__job_dag = dag.DAG()
     self.options = options
Ejemplo n.º 8
0
 def __init__(self, options):
     self.__job_dag = dag.DAG()
     self.__parallel_scheduling = None
     self.options = options
Ejemplo n.º 9
0
def buildDAG(args, modified_files, formula_dir):
    formula_dag = dag.DAG()
    for formula_file in os.listdir(formula_dir):
        if formula_file.endswith(".rb"):
            formula_dag.add_node(os.path.basename(formula_file)[:-3])
    return buildEdges(args, modified_files, formula_dir, formula_dag)
Ejemplo n.º 10
0
def buildDAG(args, modified_files, formula_dir):
    formula_dag = dag.DAG()
    for directory, d_list, f_list in os.walk(formula_dir):
        if 'meta.yaml' in f_list:
            formula_dag.add_node(os.path.join(directory))
    return buildEdges(args, modified_files, formula_dir, formula_dag)