def _dispatch_job(self, job): engine = self._engine_from_job(job) consume = True with logging_listener.LoggingListener(engine, log=LOG): LOG.debug("Dispatching engine %s for job: %s", engine, job) try: engine.run() except excp.WrappedFailure as e: if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): consume = False if LOG.isEnabledFor(logging.WARNING): if consume: LOG.warn("Job execution failed (consumption being" " skipped): %s [%s failures]", job, len(e)) else: LOG.warn("Job execution failed (consumption" " proceeding): %s [%s failures]", job, len(e)) # Show the failure/s + traceback (if possible)... for i, f in enumerate(e): LOG.warn("%s. %s", i + 1, f.pformat(traceback=True)) except NO_CONSUME_EXCEPTIONS: LOG.warn("Job execution failed (consumption being" " skipped): %s", job, exc_info=True) consume = False except Exception: LOG.warn("Job execution failed (consumption proceeding): %s", job, exc_info=True) else: LOG.info("Job completed successfully: %s", job) return async_utils.make_completed_future(consume)
def execute(self, retry): self.change_state(retry, states.RUNNING) kwargs = self._get_retry_args(retry) try: result = retry.execute(**kwargs) except Exception: result = misc.Failure() self.change_state(retry, states.FAILURE, result=result) else: self.change_state(retry, states.SUCCESS, result=result) return async_utils.make_completed_future((retry, ex.EXECUTED, result))
def _dispatch_job(self, job): engine = self._engine_from_job(job) listeners = self._listeners_from_job(job, engine) with ExitStack() as stack: for listener in listeners: stack.enter_context(listener) LOG.debug("Dispatching engine for job '%s'", job) consume = True try: for stage_func, event_name in [(engine.compile, 'compilation'), (engine.prepare, 'preparation'), (engine.validate, 'validation'), (engine.run, 'running')]: self._notifier.notify("%s_start" % event_name, { 'job': job, 'engine': engine, 'conductor': self, }) stage_func() self._notifier.notify("%s_end" % event_name, { 'job': job, 'engine': engine, 'conductor': self, }) except excp.WrappedFailure as e: if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): consume = False if LOG.isEnabledFor(logging.WARNING): if consume: LOG.warn( "Job execution failed (consumption being" " skipped): %s [%s failures]", job, len(e)) else: LOG.warn( "Job execution failed (consumption" " proceeding): %s [%s failures]", job, len(e)) # Show the failure/s + traceback (if possible)... for i, f in enumerate(e): LOG.warn("%s. %s", i + 1, f.pformat(traceback=True)) except NO_CONSUME_EXCEPTIONS: LOG.warn( "Job execution failed (consumption being" " skipped): %s", job, exc_info=True) consume = False except Exception: LOG.warn("Job execution failed (consumption proceeding): %s", job, exc_info=True) else: LOG.info("Job completed successfully: %s", job) return async_utils.make_completed_future(consume)
def revert(self, retry): self.change_state(retry, states.REVERTING) kwargs = self._get_retry_args(retry) kwargs['flow_failures'] = self._storage.get_failures() try: result = retry.revert(**kwargs) except Exception: result = misc.Failure() self.change_state(retry, states.FAILURE) else: self.change_state(retry, states.REVERTED) return async_utils.make_completed_future((retry, ex.REVERTED, result))
def _dispatch_job(self, job): engine = self._engine_from_job(job) listeners = self._listeners_from_job(job, engine) with misc.ListenerStack(LOG) as stack: stack.register(listeners) LOG.debug("Dispatching engine %s for job: %s", engine, job) consume = True try: engine.run() except excp.WrappedFailure as e: if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): consume = False if LOG.isEnabledFor(logging.WARNING): if consume: LOG.warn( "Job execution failed (consumption being" " skipped): %s [%s failures]", job, len(e)) else: LOG.warn( "Job execution failed (consumption" " proceeding): %s [%s failures]", job, len(e)) # Show the failure/s + traceback (if possible)... for i, f in enumerate(e): LOG.warn("%s. %s", i + 1, f.pformat(traceback=True)) except NO_CONSUME_EXCEPTIONS: LOG.warn( "Job execution failed (consumption being" " skipped): %s", job, exc_info=True) consume = False except Exception: LOG.warn("Job execution failed (consumption proceeding): %s", job, exc_info=True) else: LOG.info("Job completed successfully: %s", job) return async_utils.make_completed_future(consume)
def test_make_completed_future(self): result = object() future = au.make_completed_future(result) self.assertTrue(future.done()) self.assertIs(future.result(), result)
def test_make_completed_future(self): result = object() future = au.make_completed_future(result) self.assertTrue(future.done()) self.assertIs(future.result(), result)
def test_make_completed_future_exception(self): result = IOError("broken") future = au.make_completed_future(result, exception=True) self.assertTrue(future.done()) self.assertRaises(IOError, future.result) self.assertIsNotNone(future.exception())
def revert_task(self, task, task_uuid, arguments, result, failures, progress_callback=None): return async_utils.make_completed_future( _revert_task(task, arguments, result, failures, progress_callback))
def execute_task(self, task, task_uuid, arguments, progress_callback=None): return async_utils.make_completed_future( _execute_task(task, arguments, progress_callback))
def test_make_completed_future_exception(self): result = IOError("broken") future = au.make_completed_future(result, exception=True) self.assertTrue(future.done()) self.assertRaises(IOError, future.result) self.assertIsNotNone(future.exception())
def revert_task(self, task, task_uuid, arguments, result, failures, progress_callback=None): return async_utils.make_completed_future( _revert_task(task, arguments, result, failures, progress_callback))
def execute_task(self, task, task_uuid, arguments, progress_callback=None): return async_utils.make_completed_future( _execute_task(task, arguments, progress_callback))