Ejemplo n.º 1
0
class QueueConsumer(ConsumerMixin):
    def __init__(self, connection, queues, handler):
        self.connection = connection
        self._dispatcher = BufferedDispatcher()
        self._queues = queues
        self._handler = handler

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=self._queues, accept=['pickle'], callbacks=[self.process])

        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)

        return [consumer]

    def process(self, body, message):
        try:
            self._dispatcher.dispatch(self._process_message, body)
        finally:
            message.ack()

    def _process_message(self, body):
        try:
            if not isinstance(body, self._handler.message_type):
                raise TypeError('Received an unexpected type "%s" for payload.' % type(body))

            self._handler.process(body)
        except:
            LOG.exception('%s failed to process message: %s', self.__class__.__name__, body)
Ejemplo n.º 2
0
class LiveActionUpdateQueueConsumer(ConsumerMixin):
    def __init__(self, connection, notifier):
        self.connection = connection
        self._dispatcher = BufferedDispatcher()
        self._notifier = notifier

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONUPDATE_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        LOG.debug('process_task')
        LOG.debug('     body: %s', body)
        LOG.debug('     message.properties: %s', message.properties)
        LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            if body.status in ACTION_COMPLETE_STATES:
                self._notifier.handle_action_complete(body)
        except:
            LOG.exception('Sending notifications/action trigger failed. Message body : %s', body)
Ejemplo n.º 3
0
Archivo: worker.py Proyecto: timff/st2
class Worker(ConsumerMixin):
    def __init__(self, connection):
        self.connection = connection
        self.rules_engine = RulesEngine()
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[RULESENGINE_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        # LOG.debug('process_task')
        # LOG.debug('     body: %s', body)
        # LOG.debug('     message.properties: %s', message.properties)
        # LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body['trigger'],
                                      body['payload'])
        finally:
            message.ack()

    def _do_process_task(self, trigger, payload):
        trigger_instance = container_utils.create_trigger_instance(
            trigger, payload or {}, datetime.datetime.utcnow())

        if trigger_instance:
            self.rules_engine.handle_trigger_instance(trigger_instance)
Ejemplo n.º 4
0
 def test_dispatch_simple(self):
     dispatcher = BufferedDispatcher(dispatch_pool_size=10)
     mock_handler = mock.MagicMock()
     expected = []
     for i in range(10):
         dispatcher.dispatch(mock_handler, i, i + 1)
         expected.append((i, i + 1))
     while mock_handler.call_count < 10:
         eventlet.sleep(0.01)
     dispatcher.shutdown()
     call_args_list = [(args[0][0], args[0][1]) for args in mock_handler.call_args_list]
     self.assertItemsEqual(expected, call_args_list)
Ejemplo n.º 5
0
    def __init__(self, connection, queues, handler):
        self.connection = connection

        self._queues = queues
        self._handler = handler

        workflows_pool_size = cfg.CONF.actionrunner.workflows_pool_size
        actions_pool_size = cfg.CONF.actionrunner.actions_pool_size
        self._workflows_dispatcher = BufferedDispatcher(dispatch_pool_size=workflows_pool_size,
                                                        name='workflows-dispatcher')
        self._actions_dispatcher = BufferedDispatcher(dispatch_pool_size=actions_pool_size,
                                                      name='actions-dispatcher')
Ejemplo n.º 6
0
class ActionsQueueConsumer(QueueConsumer):
    """
    Special Queue Consumer for action runner which uses multiple BufferedDispatcher pools:

    1. For regular (non-workflow) actions
    2. One for workflow actions

    This way we can ensure workflow actions never block non-workflow actions.
    """

    def __init__(self, connection, queues, handler):
        self.connection = connection

        self._queues = queues
        self._handler = handler

        workflows_pool_size = cfg.CONF.actionrunner.workflows_pool_size
        actions_pool_size = cfg.CONF.actionrunner.actions_pool_size
        self._workflows_dispatcher = BufferedDispatcher(
            dispatch_pool_size=workflows_pool_size, name="workflows-dispatcher"
        )
        self._actions_dispatcher = BufferedDispatcher(
            dispatch_pool_size=actions_pool_size, name="actions-dispatcher"
        )

    def process(self, body, message):
        try:
            if not isinstance(body, self._handler.message_type):
                raise TypeError(
                    'Received an unexpected type "%s" for payload.' % type(body)
                )

            action_is_workflow = getattr(body, "action_is_workflow", False)
            if action_is_workflow:
                # Use workflow dispatcher queue
                dispatcher = self._workflows_dispatcher
            else:
                # Use queue for regular or workflow actions
                dispatcher = self._actions_dispatcher

            LOG.debug('Using BufferedDispatcher pool: "%s"', str(dispatcher))
            dispatcher.dispatch(self._process_message, body)
        except:
            LOG.exception(
                "%s failed to process message: %s", self.__class__.__name__, body
            )
        finally:
            # At this point we will always ack a message.
            message.ack()

    def shutdown(self):
        self._workflows_dispatcher.shutdown()
        self._actions_dispatcher.shutdown()
Ejemplo n.º 7
0
    def __init__(self, connection, queues, handler):
        self.connection = connection

        self._queues = queues
        self._handler = handler

        workflows_pool_size = cfg.CONF.actionrunner.workflows_pool_size
        actions_pool_size = cfg.CONF.actionrunner.actions_pool_size
        self._workflows_dispatcher = BufferedDispatcher(dispatch_pool_size=workflows_pool_size,
                                                        name='workflows-dispatcher')
        self._actions_dispatcher = BufferedDispatcher(dispatch_pool_size=actions_pool_size,
                                                      name='actions-dispatcher')
Ejemplo n.º 8
0
 def test_dispatch_starved(self):
     dispatcher = BufferedDispatcher(dispatch_pool_size=2,
                                     monitor_thread_empty_q_sleep_time=0.01,
                                     monitor_thread_no_workers_sleep_time=0.01)
     mock_handler = mock.MagicMock()
     expected = []
     for i in range(10):
         dispatcher.dispatch(mock_handler, i, i + 1)
         expected.append((i, i + 1))
     while mock_handler.call_count < 10:
         eventlet.sleep(0.01)
     dispatcher.shutdown()
     call_args_list = [(args[0][0], args[0][1]) for args in mock_handler.call_args_list]
     self.assertItemsEqual(expected, call_args_list)
Ejemplo n.º 9
0
class ActionsQueueConsumer(QueueConsumer):
    """
    Special Queue Consumer for action runner which uses multiple BufferedDispatcher pools:

    1. For regular (non-workflow) actions
    2. One for workflow actions

    This way we can ensure workflow actions never block non-workflow actions.
    """

    def __init__(self, connection, queues, handler):
        self.connection = connection

        self._queues = queues
        self._handler = handler

        workflows_pool_size = cfg.CONF.actionrunner.workflows_pool_size
        actions_pool_size = cfg.CONF.actionrunner.actions_pool_size
        self._workflows_dispatcher = BufferedDispatcher(dispatch_pool_size=workflows_pool_size,
                                                        name='workflows-dispatcher')
        self._actions_dispatcher = BufferedDispatcher(dispatch_pool_size=actions_pool_size,
                                                      name='actions-dispatcher')

    def process(self, body, message):
        try:
            if not isinstance(body, self._handler.message_type):
                raise TypeError('Received an unexpected type "%s" for payload.' % type(body))

            action_is_workflow = getattr(body, 'action_is_workflow', False)
            if action_is_workflow:
                # Use workflow dispatcher queue
                dispatcher = self._workflows_dispatcher
            else:
                # Use queue for regular or workflow actions
                dispatcher = self._actions_dispatcher

            LOG.debug('Using BufferedDispatcher pool: "%s"', str(dispatcher))
            dispatcher.dispatch(self._process_message, body)
        except:
            LOG.exception('%s failed to process message: %s', self.__class__.__name__, body)
        finally:
            # At this point we will always ack a message.
            message.ack()

    def shutdown(self):
        self._workflows_dispatcher.shutdown()
        self._actions_dispatcher.shutdown()
Ejemplo n.º 10
0
class QueueConsumer(ConsumerMixin):
    def __init__(self, connection, queues, handler):
        self.connection = connection
        self._dispatcher = BufferedDispatcher()
        self._queues = queues
        self._handler = handler

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(
            queues=self._queues, accept=["pickle"], callbacks=[self.process]
        )

        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)

        return [consumer]

    def process(self, body, message):
        try:
            if not isinstance(body, self._handler.message_type):
                raise TypeError(
                    'Received an unexpected type "%s" for payload.' % type(body)
                )

            self._dispatcher.dispatch(self._process_message, body)
        except:
            LOG.exception(
                "%s failed to process message: %s", self.__class__.__name__, body
            )
        finally:
            # At this point we will always ack a message.
            message.ack()

    def _process_message(self, body):
        try:
            self._handler.process(body)
        except:
            LOG.exception(
                "%s failed to process message: %s", self.__class__.__name__, body
            )
Ejemplo n.º 11
0
class ActionStateQueueConsumer(ConsumerMixin):
    def __init__(self, connection, tracker):
        self.connection = connection
        self._dispatcher = BufferedDispatcher()
        self._tracker = tracker

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONSTATE_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        LOG.debug('process_task')
        LOG.debug('     body: %s', body)
        LOG.debug('     message.properties: %s', message.properties)
        LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            self._add_to_querier(body)
        except:
            LOG.exception('Add query_context failed. Message body : %s', body)

    def _add_to_querier(self, body):
        querier = self._tracker.get_querier(body.query_module)
        context = QueryContext.from_model(body)
        querier.add_queries(query_contexts=[context])
        return
Ejemplo n.º 12
0
class ActionStateQueueConsumer(ConsumerMixin):
    def __init__(self, connection, tracker):
        self.connection = connection
        self._dispatcher = BufferedDispatcher()
        self._tracker = tracker

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONSTATE_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        # LOG.debug('process_task')
        # LOG.debug('     body: %s', body)
        # LOG.debug('     message.properties: %s', message.properties)
        # LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            self._add_to_querier(body)
        except:
            LOG.exception('Add query_context failed. Message body : %s', body)

    def _add_to_querier(self, body):
        querier = self._tracker.get_querier(body.query_module)
        context = QueryContext.from_model(body)
        querier.add_queries(query_contexts=[context])
        return
Ejemplo n.º 13
0
class LiveActionUpdateQueueConsumer(ConsumerMixin):
    def __init__(self, connection, notifier):
        self.connection = connection
        self._dispatcher = BufferedDispatcher()
        self._notifier = notifier

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONUPDATE_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        LOG.debug('process_task')
        LOG.debug('     body: %s', body)
        LOG.debug('     message.properties: %s', message.properties)
        LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            if body.status in ACTION_COMPLETE_STATES:
                self._notifier.handle_action_complete(body)
        except:
            LOG.exception(
                'Sending notifications/action trigger failed. Message body : %s',
                body)
Ejemplo n.º 14
0
 def test_dispatch_simple(self):
     dispatcher = BufferedDispatcher(dispatch_pool_size=10)
     mock_handler = mock.MagicMock()
     expected = []
     for i in range(10):
         dispatcher.dispatch(mock_handler, i, i + 1)
         expected.append((i, i + 1))
     while mock_handler.call_count < 10:
         eventlet.sleep(0.01)
     dispatcher.shutdown()
     call_args_list = [(args[0][0], args[0][1]) for args in mock_handler.call_args_list]
     self.assertItemsEqual(expected, call_args_list)
Ejemplo n.º 15
0
 def test_dispatch_starved(self):
     dispatcher = BufferedDispatcher(dispatch_pool_size=2,
                                     monitor_thread_empty_q_sleep_time=0.01,
                                     monitor_thread_no_workers_sleep_time=0.01)
     mock_handler = mock.MagicMock()
     expected = []
     for i in range(10):
         dispatcher.dispatch(mock_handler, i, i + 1)
         expected.append((i, i + 1))
     while mock_handler.call_count < 10:
         eventlet.sleep(0.01)
     dispatcher.shutdown()
     call_args_list = [(args[0][0], args[0][1]) for args in mock_handler.call_args_list]
     self.assertItemsEqual(expected, call_args_list)
Ejemplo n.º 16
0
Archivo: worker.py Proyecto: timff/st2
 def __init__(self, connection):
     self.connection = connection
     self.container = RunnerContainer()
     self._dispatcher = BufferedDispatcher()
Ejemplo n.º 17
0
Archivo: worker.py Proyecto: timff/st2
class Worker(ConsumerMixin):

    def __init__(self, connection):
        self.connection = connection
        self.container = RunnerContainer()
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONRUNNER_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        # LOG.debug('process_task')
        # LOG.debug('     body: %s', body)
        # LOG.debug('     message.properties: %s', message.properties)
        # LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            self.execute_action(body)
        except Exception:
            LOG.exception('execute_action failed. Message body : %s', body)

    def execute_action(self, liveaction):
        try:
            liveaction_db = get_liveaction_by_id(liveaction.id)
        except StackStormDBObjectNotFoundError:
            LOG.exception('Failed to find liveaction %s in the database.',
                          liveaction.id)
            raise

        # Update liveaction status to "running"
        liveaction_db = update_liveaction_status(status=LIVEACTION_STATUS_RUNNING,
                                                 liveaction_id=liveaction_db.id)
        # Launch action
        LOG.audit('Launching action execution.',
                  extra={'liveaction': liveaction_db.to_serializable_dict()})

        try:
            result = self.container.dispatch(liveaction_db)
            LOG.debug('Runner dispatch produced result: %s', result)
        except Exception:
            liveaction_db = update_liveaction_status(status=LIVEACTION_STATUS_FAILED,
                                                     liveaction_id=liveaction_db.id)
            raise

        if not result:
            raise ActionRunnerException('Failed to execute action.')

        return result
Ejemplo n.º 18
0
 def __init__(self, connection, tracker):
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
     self._tracker = tracker
Ejemplo n.º 19
0
 def __init__(self, connection, queues, handler):
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
     self._queues = queues
     self._handler = handler
Ejemplo n.º 20
0
 def __init__(self, connection, queues, handler):
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
     self._queues = queues
     self._handler = handler
Ejemplo n.º 21
0
class Worker(ConsumerMixin):

    def __init__(self, connection):
        self.connection = connection
        self.container = RunnerContainer()
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONRUNNER_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        # LOG.debug('process_task')
        # LOG.debug('     body: %s', body)
        # LOG.debug('     message.properties: %s', message.properties)
        # LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            self.execute_action(body)
        except:
            LOG.exception('execute_action failed. Message body : %s', body)

    def execute_action(self, actionexecution):
        try:
            actionexec_db = get_actionexec_by_id(actionexecution.id)
        except StackStormDBObjectNotFoundError:
            LOG.exception('Failed to find ActionExecution %s in the database.',
                          actionexecution.id)
            raise

        # Update ActionExecution status to "running"
        actionexec_db = update_actionexecution_status(ACTIONEXEC_STATUS_RUNNING,
                                                      actionexec_db.id)
        # Launch action
        LOG.audit('Launching action execution.',
                  extra={'actionexec': actionexec_db.to_serializable_dict()})

        try:
            result = self.container.dispatch(actionexec_db)
            LOG.debug('Runner dispatch produced result: %s', result)
        except Exception:
            actionexec_db = update_actionexecution_status(ACTIONEXEC_STATUS_FAILED,
                                                          actionexec_db.id)
            raise

        if not result:
            raise ActionRunnerException('Failed to execute action.')

        return result
Ejemplo n.º 22
0
class Worker(ConsumerMixin):

    def __init__(self, connection):
        self.connection = connection
        self.container = RunnerContainer()
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONRUNNER_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        # LOG.debug('process_task')
        # LOG.debug('     body: %s', body)
        # LOG.debug('     message.properties: %s', message.properties)
        # LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            self.execute_action(body)
        except Exception:
            LOG.exception('execute_action failed. Message body : %s', body)

    def execute_action(self, liveaction):
        # Note: We only want to execute actions which haven't completed yet
        if liveaction.status in [LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED]:
            LOG.info('Ignoring liveaction %s which has already finished', liveaction.id)
            return

        try:
            liveaction_db = get_liveaction_by_id(liveaction.id)
        except StackStormDBObjectNotFoundError:
            LOG.exception('Failed to find liveaction %s in the database.',
                          liveaction.id)
            raise
        # stamp liveaction with process_info
        runner_info = system_info.get_process_info()

        # Update liveaction status to "running"
        liveaction_db = update_liveaction_status(status=LIVEACTION_STATUS_RUNNING,
                                                 runner_info=runner_info,
                                                 liveaction_id=liveaction_db.id)
        action_execution_db = executions.update_execution(liveaction_db)

        # Launch action
        extra = {'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db}
        LOG.audit('Launching action execution.', extra=extra)

        # the extra field will not be shown in non-audit logs so temporarily log at info.
        LOG.info('{~}action_execution: %s / {~}live_action: %s',
                 action_execution_db.id, liveaction_db.id)
        try:
            result = self.container.dispatch(liveaction_db)
            LOG.debug('Runner dispatch produced result: %s', result)
            if not result:
                raise ActionRunnerException('Failed to execute action.')
        except Exception:
            liveaction_db = update_liveaction_status(status=LIVEACTION_STATUS_FAILED,
                                                     liveaction_id=liveaction_db.id)
            raise

        return result
Ejemplo n.º 23
0
 def __init__(self, connection, tracker):
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
     self._tracker = tracker
Ejemplo n.º 24
0
class Worker(ConsumerMixin):
    def __init__(self, connection):
        self.connection = connection
        self.container = RunnerContainer()
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        consumer = Consumer(queues=[ACTIONRUNNER_WORK_Q],
                            accept=['pickle'],
                            callbacks=[self.process_task])
        # use prefetch_count=1 for fair dispatch. This way workers that finish an item get the next
        # task and the work does not get queued behind any single large item.
        consumer.qos(prefetch_count=1)
        return [consumer]

    def process_task(self, body, message):
        # LOG.debug('process_task')
        # LOG.debug('     body: %s', body)
        # LOG.debug('     message.properties: %s', message.properties)
        # LOG.debug('     message.delivery_info: %s', message.delivery_info)
        try:
            self._dispatcher.dispatch(self._do_process_task, body)
        finally:
            message.ack()

    def _do_process_task(self, body):
        try:
            self.execute_action(body)
        except Exception:
            LOG.exception('execute_action failed. Message body : %s', body)

    def execute_action(self, liveaction):
        # Note: We only want to execute actions which haven't completed yet
        if liveaction.status in [
                LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
        ]:
            LOG.info('Ignoring liveaction %s which has already finished',
                     liveaction.id)
            return

        try:
            liveaction_db = get_liveaction_by_id(liveaction.id)
        except StackStormDBObjectNotFoundError:
            LOG.exception('Failed to find liveaction %s in the database.',
                          liveaction.id)
            raise
        # stamp liveaction with process_info
        runner_info = system_info.get_process_info()

        # Update liveaction status to "running"
        liveaction_db = update_liveaction_status(
            status=LIVEACTION_STATUS_RUNNING,
            runner_info=runner_info,
            liveaction_id=liveaction_db.id)
        action_execution_db = executions.update_execution(liveaction_db)

        # Launch action
        extra = {
            'action_execution_db': action_execution_db,
            'liveaction_db': liveaction_db
        }
        LOG.audit('Launching action execution.', extra=extra)

        # the extra field will not be shown in non-audit logs so temporarily log at info.
        LOG.info('{~}action_execution: %s / {~}live_action: %s',
                 action_execution_db.id, liveaction_db.id)
        try:
            result = self.container.dispatch(liveaction_db)
            LOG.debug('Runner dispatch produced result: %s', result)
            if not result:
                raise ActionRunnerException('Failed to execute action.')
        except Exception:
            liveaction_db = update_liveaction_status(
                status=LIVEACTION_STATUS_FAILED,
                liveaction_id=liveaction_db.id)
            raise

        return result
Ejemplo n.º 25
0
 def __init__(self, connection, notifier):
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
     self._notifier = notifier
Ejemplo n.º 26
0
Archivo: worker.py Proyecto: timff/st2
 def __init__(self, connection):
     self.connection = connection
     self.rules_engine = RulesEngine()
     self._dispatcher = BufferedDispatcher()
Ejemplo n.º 27
0
class Historian(ConsumerMixin):
    def __init__(self, connection, timeout=60, wait=3):
        self.wait = wait
        self.timeout = timeout
        self.connection = connection
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        return [
            Consumer(queues=[QUEUES['create']],
                     accept=['pickle'],
                     callbacks=[self.process_create]),
            Consumer(queues=[QUEUES['update']],
                     accept=['pickle'],
                     callbacks=[self.process_update])
        ]

    def process_create(self, body, message):
        try:
            self._dispatcher.dispatch(self.record_action_execution, body)
        finally:
            message.ack()

    def process_update(self, body, message):
        try:
            self._dispatcher.dispatch(self.update_action_execution_history,
                                      body)
        finally:
            message.ack()

    def record_action_execution(self, body):
        try:
            history_id = bson.ObjectId()
            execution = ActionExecution.get_by_id(str(body.id))
            action_ref = ResourceReference.from_string_reference(
                ref=execution.action)
            action_db, _ = action_utils.get_action_by_dict({
                'name':
                action_ref.name,
                'pack':
                action_ref.pack
            })
            runner = RunnerType.get_by_name(action_db.runner_type['name'])

            attrs = {
                'id': history_id,
                'action': vars(ActionAPI.from_model(action_db)),
                'runner': vars(RunnerTypeAPI.from_model(runner)),
                'execution': vars(ActionExecutionAPI.from_model(execution))
            }

            if 'rule' in execution.context:
                rule = reference.get_model_from_ref(
                    Rule, execution.context.get('rule', {}))
                attrs['rule'] = vars(RuleAPI.from_model(rule))

            if 'trigger_instance' in execution.context:
                trigger_instance_id = execution.context.get(
                    'trigger_instance', {})
                trigger_instance_id = trigger_instance_id.get('id', None)
                trigger_instance = TriggerInstance.get_by_id(
                    trigger_instance_id)
                trigger = reference.get_model_by_resource_ref(
                    db_api=Trigger, ref=trigger_instance.trigger)
                trigger_type = reference.get_model_by_resource_ref(
                    db_api=TriggerType, ref=trigger.type)
                trigger_instance = reference.get_model_from_ref(
                    TriggerInstance,
                    execution.context.get('trigger_instance', {}))
                attrs['trigger_instance'] = vars(
                    TriggerInstanceAPI.from_model(trigger_instance))
                attrs['trigger'] = vars(TriggerAPI.from_model(trigger))
                attrs['trigger_type'] = vars(
                    TriggerTypeAPI.from_model(trigger_type))

            parent = ActionExecutionHistory.get(
                execution__id=execution.context.get('parent', ''))
            if parent:
                attrs['parent'] = str(parent.id)
                if str(history_id) not in parent.children:
                    parent.children.append(str(history_id))
                    ActionExecutionHistory.add_or_update(parent)

            history = ActionExecutionHistoryDB(**attrs)
            history = ActionExecutionHistory.add_or_update(history)
        except:
            LOG.exception('An unexpected error occurred while creating the '
                          'action execution history.')
            raise

    def update_action_execution_history(self, body):
        try:
            count = self.timeout / self.wait
            # Allow up to 1 minute for the post event to create the history record.
            for i in range(count):
                history = ActionExecutionHistory.get(
                    execution__id=str(body.id))
                if history:
                    execution = ActionExecution.get_by_id(str(body.id))
                    history.execution = vars(
                        ActionExecutionAPI.from_model(execution))
                    history = ActionExecutionHistory.add_or_update(history)
                    return
                if i >= count:
                    # If wait failed, create the history record regardless.
                    self.record_action_execution(body)
                    return
                eventlet.sleep(self.wait)
        except:
            LOG.exception('An unexpected error occurred while updating the '
                          'action execution history.')
            raise
Ejemplo n.º 28
0
class Historian(ConsumerMixin):

    def __init__(self, connection, timeout=60, wait=3):
        self.wait = wait
        self.timeout = timeout
        self.connection = connection
        self._dispatcher = BufferedDispatcher()

    def shutdown(self):
        self._dispatcher.shutdown()

    def get_consumers(self, Consumer, channel):
        return [Consumer(queues=[QUEUES['create']], accept=['pickle'],
                         callbacks=[self.process_create]),
                Consumer(queues=[QUEUES['update']], accept=['pickle'],
                         callbacks=[self.process_update])]

    def process_create(self, body, message):
        try:
            self._dispatcher.dispatch(self.record_action_execution, body)
        finally:
            message.ack()

    def process_update(self, body, message):
        try:
            self._dispatcher.dispatch(self.update_action_execution_history, body)
        finally:
            message.ack()

    def record_action_execution(self, body):
        try:
            history_id = bson.ObjectId()
            execution = ActionExecution.get_by_id(str(body.id))
            action_ref = ResourceReference.from_string_reference(ref=execution.action)
            action_db, _ = action_utils.get_action_by_dict(
                {'name': action_ref.name,
                 'pack': action_ref.pack})
            runner = RunnerType.get_by_name(action_db.runner_type['name'])

            attrs = {
                'id': history_id,
                'action': vars(ActionAPI.from_model(action_db)),
                'runner': vars(RunnerTypeAPI.from_model(runner)),
                'execution': vars(ActionExecutionAPI.from_model(execution))
            }

            if 'rule' in execution.context:
                rule = reference.get_model_from_ref(Rule, execution.context.get('rule', {}))
                attrs['rule'] = vars(RuleAPI.from_model(rule))

            if 'trigger_instance' in execution.context:
                trigger_instance_id = execution.context.get('trigger_instance', {})
                trigger_instance_id = trigger_instance_id.get('id', None)
                trigger_instance = TriggerInstance.get_by_id(trigger_instance_id)
                trigger = reference.get_model_by_resource_ref(db_api=Trigger,
                                                              ref=trigger_instance.trigger)
                trigger_type = reference.get_model_by_resource_ref(db_api=TriggerType,
                                                                   ref=trigger.type)
                trigger_instance = reference.get_model_from_ref(
                    TriggerInstance, execution.context.get('trigger_instance', {}))
                attrs['trigger_instance'] = vars(TriggerInstanceAPI.from_model(trigger_instance))
                attrs['trigger'] = vars(TriggerAPI.from_model(trigger))
                attrs['trigger_type'] = vars(TriggerTypeAPI.from_model(trigger_type))

            parent = ActionExecutionHistory.get(execution__id=execution.context.get('parent', ''))
            if parent:
                attrs['parent'] = str(parent.id)
                if str(history_id) not in parent.children:
                    parent.children.append(str(history_id))
                    ActionExecutionHistory.add_or_update(parent)

            history = ActionExecutionHistoryDB(**attrs)
            history = ActionExecutionHistory.add_or_update(history)
        except:
            LOG.exception('An unexpected error occurred while creating the '
                          'action execution history.')
            raise

    def update_action_execution_history(self, body):
        try:
            count = self.timeout / self.wait
            # Allow up to 1 minute for the post event to create the history record.
            for i in range(count):
                history = ActionExecutionHistory.get(execution__id=str(body.id))
                if history:
                    execution = ActionExecution.get_by_id(str(body.id))
                    history.execution = vars(ActionExecutionAPI.from_model(execution))
                    history = ActionExecutionHistory.add_or_update(history)
                    return
                if i >= count:
                    # If wait failed, create the history record regardless.
                    self.record_action_execution(body)
                    return
                eventlet.sleep(self.wait)
        except:
            LOG.exception('An unexpected error occurred while updating the '
                          'action execution history.')
            raise
Ejemplo n.º 29
0
 def __init__(self, connection, notifier):
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
     self._notifier = notifier
Ejemplo n.º 30
0
 def __init__(self, connection, timeout=60, wait=3):
     self.wait = wait
     self.timeout = timeout
     self.connection = connection
     self._dispatcher = BufferedDispatcher()
Ejemplo n.º 31
0
 def __init__(self, connection):
     self.connection = connection
     self.container = RunnerContainer()
     self._dispatcher = BufferedDispatcher()
Ejemplo n.º 32
0
 def __init__(self, connection, timeout=60, wait=3):
     self.wait = wait
     self.timeout = timeout
     self.connection = connection
     self._dispatcher = BufferedDispatcher()