Ejemplo n.º 1
0
    def _finish_jobs(self):
        # must be called from within lock
        # clear the global tofinish such that parallel calls do not interfere
        for job in self._tofinish:
            if self.handle_job_success:
                try:
                    self.get_executor(job).handle_job_success(job)
                except (RuleException, WorkflowError) as e:
                    # if an error occurs while processing job output,
                    # we do the same as in case of errors during execution
                    print_exception(e, self.workflow.linemaps)
                    self._handle_error(job)
                    continue

            if self.update_resources:
                # normal jobs have len=1, group jobs have len>1
                self.finished_jobs += len(job)
                self.running.remove(job)
                self._free_resources(job)

            if self.print_progress:
                if job.is_group():
                    for j in job:
                        logger.job_finished(jobid=j.jobid)
                else:
                    logger.job_finished(jobid=job.jobid)
                self.progress()

            self.dag.finish(job, update_dynamic=self.update_dynamic)
        self._tofinish.clear()
Ejemplo n.º 2
0
    def _proceed(
        self,
        job,
        update_dynamic=True,
        print_progress=False,
        update_resources=True,
        handle_job_success=True,
    ):
        """ Do stuff after job is finished. """
        with self._lock:
            if handle_job_success:
                # by calling this behind the lock, we avoid race conditions
                try:
                    self.get_executor(job).handle_job_success(job)
                except (RuleException, WorkflowError) as e:
                    # if an error occurs while processing job output,
                    # we do the same as in case of errors during execution
                    print_exception(e, self.workflow.linemaps)
                    self._handle_error(job)
                    return

            try:
                potential_new_ready_jobs = self.dag.finish(
                    job, update_dynamic=update_dynamic
                )
            except (RuleException, WorkflowError) as e:
                # if an error occurs while processing job output,
                # we do the same as in case of errors during execution
                print_exception(e, self.workflow.linemaps)
                self._handle_error(job)
                return

            if update_resources:
                # normal jobs have len=1, group jobs have len>1
                self.finished_jobs += len(job)
                self.running.remove(job)
                self._free_resources(job)

            if print_progress:
                if job.is_group():
                    for j in job:
                        logger.job_finished(jobid=j.jobid)
                else:
                    logger.job_finished(jobid=job.jobid)
                self.progress()

            if self.dryrun:
                if not self.running:
                    # During dryrun, only release when all running jobs are done.
                    # This saves a lot of time, as self.open_jobs has to be
                    # evaluated less frequently.
                    self._open_jobs.release()
            elif (
                not self.running
                or potential_new_ready_jobs
                or self.workflow.immediate_submit
            ):
                # go on scheduling if open jobs are ready or no job is running
                self._open_jobs.release()
Ejemplo n.º 3
0
    def _proceed(self, job,
                 update_dynamic=True,
                 print_progress=False,
                 update_resources=True):
        """ Do stuff after job is finished. """
        with self._lock:
            if update_resources:
                self.finished_jobs += 1
                self.running.remove(job)
                self._free_resources(job)

            self.dag.finish(job, update_dynamic=update_dynamic)

            logger.job_finished(jobid=self.dag.jobid(job))

            if print_progress:
                self.progress()

            if any(self.open_jobs) or not self.running:
                # go on scheduling if open jobs are ready or no job is running
                self._open_jobs.set()
Ejemplo n.º 4
0
    def _proceed(self, job,
                 update_dynamic=True,
                 print_progress=False,
                 update_resources=True):
        """ Do stuff after job is finished. """
        with self._lock:
            if update_resources:
                self.finished_jobs += 1
                self.running.remove(job)
                self._free_resources(job)

            self.dag.finish(job, update_dynamic=update_dynamic)

            logger.job_finished(jobid=self.dag.jobid(job))

            if print_progress:
                self.progress()

            if any(self.open_jobs) or not self.running:
                # go on scheduling if open jobs are ready or no job is running
                self._open_jobs.set()
Ejemplo n.º 5
0
    def _proceed(self,
                 job,
                 update_dynamic=True,
                 print_progress=False,
                 update_resources=True,
                 handle_job_success=True):
        """ Do stuff after job is finished. """
        with self._lock:
            if handle_job_success:
                # by calling this behind the lock, we avoid race conditions
                try:
                    self.get_executor(job).handle_job_success(job)
                except (RuleException, WorkflowError) as e:
                    # if an error occurs while processing job output,
                    # we do the same as in case of errors during execution
                    print_exception(e, self.workflow.linemaps)
                    self._handle_error(job)
                    return

            self.dag.finish(job, update_dynamic=update_dynamic)

            if update_resources:
                # normal jobs have len=1, group jobs have len>1
                self.finished_jobs += len(job)
                self.running.remove(job)
                self._free_resources(job)

            if print_progress:
                if job.is_group():
                    for j in job:
                        logger.job_finished(jobid=j.jobid)
                else:
                    logger.job_finished(jobid=job.jobid)
                self.progress()

            if any(self.open_jobs
                   ) or not self.running or self.workflow.immediate_submit:
                # go on scheduling if open jobs are ready or no job is running
                self._open_jobs.release()