Exemple #1
0
 def test_basic_failure(self):
     flow = lf.Flow("test")
     flow.add(test_utils.TaskWithFailure("test-1"))
     e = self._make_engine(flow)
     log, handler = self._make_logger()
     with logging_listeners.LoggingListener(e, log=log):
         self.assertRaises(RuntimeError, e.run)
     self.assertGreater(0, handler.counts[logging.DEBUG])
     for levelno in _LOG_LEVELS - set([logging.DEBUG]):
         self.assertEqual(0, handler.counts[levelno])
     self.assertEqual(1, len(handler.exc_infos))
Exemple #2
0
 def test_basic(self):
     flow = lf.Flow("test")
     flow.add(test_utils.TaskNoRequiresNoReturns("test-1"))
     e = self._make_engine(flow)
     log, handler = self._make_logger()
     with logging_listeners.LoggingListener(e, log=log):
         e.run()
     self.assertGreater(0, handler.counts[logging.DEBUG])
     for levelno in _LOG_LEVELS - set([logging.DEBUG]):
         self.assertEqual(0, handler.counts[levelno])
     self.assertEqual([], handler.exc_infos)
Exemple #3
0
    def _listeners_from_job(self, job, engine):
        def task_transition(state, details):
            LOG.info("Taskflow transitioning to state {0}."
                     " Details: {1}".format(state, details))

        def flow_transition(state, details):
            LOG.info("Taskflow transitioning to state {0}."
                     " Details: {1}".format(state, details))

        engine.atom_notifier.register(Notifier.ANY, task_transition)
        engine.notifier.register(Notifier.ANY, flow_transition)

        listeners = super(NotifyingConductor,
                          self)._listeners_from_job(job, engine)

        listeners.append(logging_listener.LoggingListener(engine, log=LOG))
        return listeners
Exemple #4
0
 def _dispatch_job(self, job):
     engine = self._engine_from_job(job)
     consume = True
     with logging_listener.LoggingListener(engine, log=LOG):
         LOG.debug("Dispatching engine %s for job: %s", engine, job)
         try:
             engine.run()
         except excp.WrappedFailure as e:
             if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)):
                 consume = False
             if LOG.isEnabledFor(logging.WARNING):
                 if consume:
                     LOG.warn(
                         "Job execution failed (consumption being"
                         " skipped): %s [%s failures]", job, len(e))
                 else:
                     LOG.warn(
                         "Job execution failed (consumption"
                         " proceeding): %s [%s failures]", job, len(e))
                 # Show the failure/s + traceback (if possible)...
                 for i, f in enumerate(e):
                     LOG.warn("%s. %s", i + 1, f.pformat(traceback=True))
         except NO_CONSUME_EXCEPTIONS:
             LOG.warn(
                 "Job execution failed (consumption being"
                 " skipped): %s",
                 job,
                 exc_info=True)
             consume = False
         except Exception:
             LOG.warn("Job execution failed (consumption proceeding): %s",
                      job,
                      exc_info=True)
         else:
             LOG.info("Job completed successfully: %s", job)
     return async_utils.make_completed_future(consume)
 def _listeners_from_job(self, job, engine):
     listeners = super(ExecutorConductor,
                       self)._listeners_from_job(job, engine)
     listeners.append(
         logging_listener.LoggingListener(engine, log=self._log))
     return listeners
 def _listeners_from_job(self, job, engine):
     listeners = super(BlockingConductor, self)._listeners_from_job(job,
                                                                    engine)
     listeners.append(logging_listener.LoggingListener(engine, log=LOG))
     return listeners