def wrapper(self, *args, **kw): # Set the task name in the logger from flexget import logger logger.set_task(self.name) if self.output: # Hook up our log and stdout to give back to the requester old_stdout, old_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = Tee(self.output, sys.stdout), Tee(self.output, sys.stderr) # TODO: Use a filter to capture only the logging for this execution? streamhandler = logging.StreamHandler(self.output) streamhandler.setFormatter(FlexGetFormatter()) logging.getLogger().addHandler(streamhandler) old_loglevel = logging.getLogger().getEffectiveLevel() new_loglevel = logging.getLevelName(self.options.loglevel.upper()) if old_loglevel != new_loglevel: log.info('Setting loglevel to `%s` for this execution.' % self.options.loglevel) logging.getLogger().setLevel(new_loglevel) try: return func(self, *args, **kw) finally: logger.set_task('') if self.output: sys.stdout, sys.stderr = old_stdout, old_stderr logging.getLogger().removeHandler(streamhandler) if old_loglevel != new_loglevel: log.debug('Returning loglevel to `%s` after task execution.' % logging.getLevelName(old_loglevel)) logging.getLogger().setLevel(old_loglevel)
def run(self): from flexget.ui.webui import manager while True: kwargs = self.queue.get() or {} opts = kwargs.pop('options', None) parsed_options = kwargs.pop('parsed_options', None) output = kwargs.pop('output', None) if opts: # make copy of original options and apply options from opts old_opts = copy(manager.options) self._apply_options(manager.options, opts) if parsed_options: old_opts = manager.options manager.options = parsed_options if output: old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = output sys.stderr = output # TODO: Use a filter to capture only the logging for this execution streamhandler = logging.StreamHandler(output) streamhandler.setFormatter(FlexGetFormatter()) logging.getLogger().addHandler(streamhandler) try: manager.execute(**kwargs) finally: # Inform queue we are done processing this item. self.queue.task_done() # Restore manager's previous options and stdout if opts: manager.options = old_opts if output: # Write EOF to the output, so that a listener knows when the output is over output.write('EOF') sys.stdout = old_stdout sys.stderr = old_stderr logging.getLogger().removeHandler(streamhandler)
def run(self): from flexget.task import Task, TaskAbort while not self._shutdown_now: if self.run_schedules: self.queue_pending_jobs() # Grab the first job from the run queue and do it try: job = self.run_queue.get(timeout=0.5) except Queue.Empty: if self._shutdown_when_finished: self._shutdown_now = True continue if job.output: # Hook up our log and stdout to give back to the requester old_stdout, old_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = Tee(job.output, sys.stdout), Tee( job.output, sys.stderr) # TODO: Use a filter to capture only the logging for this execution? streamhandler = logging.StreamHandler(job.output) streamhandler.setFormatter(FlexGetFormatter()) logging.getLogger().addHandler(streamhandler) try: Task(self.manager, job.task, options=job.options).execute() except TaskAbort as e: log.debug('task %s aborted: %r' % (job.task, e)) finally: self.run_queue.task_done() job.finished_event.set() if job.output: sys.stdout, sys.stderr = old_stdout, old_stderr logging.getLogger().removeHandler(streamhandler) remaining_jobs = self.run_queue.qsize() if remaining_jobs: log.warning( 'Scheduler shut down with %s jobs remaining in the queue to run.' % remaining_jobs) log.debug('scheduler shut down')