def get_listener(): global _listener if not _listener: with Connection(transport_utils.get_messaging_urls()) as conn: _listener = Listener(conn) eventlet.spawn_n(listen, _listener) return _listener
def start(self): try: self.connection = Connection(transport_utils.get_messaging_urls()) self._updates_thread = eventlet.spawn(self.run) except: LOG.exception('Failed to start sensor_watcher.') self.connection.release()
def register_exchanges(): LOG.debug("Registering exchanges...") connection_urls = transport_utils.get_messaging_urls() with transport_utils.get_connection() as conn: # Use ConnectionRetryWrapper to deal with rmq clustering etc. retry_wrapper = ConnectionRetryWrapper( cluster_size=len(connection_urls), logger=LOG ) def wrapped_register_exchanges(connection, channel): for exchange in EXCHANGES: _do_register_exchange( exchange=exchange, connection=connection, channel=channel, retry_wrapper=retry_wrapper, ) retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges) def wrapped_predeclare_queues(connection, channel): for queue in QUEUES: _do_predeclare_queue(channel=channel, queue=queue) retry_wrapper.run(connection=conn, wrapped_callback=wrapped_predeclare_queues)
def main(queue, exchange, routing_key='#'): exchange = Exchange(exchange, type='topic') queue = Queue(name=queue, exchange=exchange, routing_key=routing_key, auto_delete=True) with Connection(transport_utils.get_messaging_urls()) as connection: watcher = QueueConsumer(connection=connection, queue=queue) watcher.run()
def get_listener(name): global _stream_listener global _execution_output_listener if name == 'stream': if not _stream_listener: with Connection(transport_utils.get_messaging_urls()) as conn: _stream_listener = StreamListener(conn) eventlet.spawn_n(listen, _stream_listener) return _stream_listener elif name == 'execution_output': if not _execution_output_listener: with Connection(transport_utils.get_messaging_urls()) as conn: _execution_output_listener = ExecutionOutputListener(conn) eventlet.spawn_n(listen, _execution_output_listener) return _execution_output_listener else: raise ValueError('Invalid listener name: %s' % (name))
def test_process_message(self): with Connection(transport_utils.get_messaging_urls()) as conn: tracker = ResultsTracker(conn, [ACTIONSTATE_WORK_Q]) tracker._bootstrap() state = ActionStateConsumerTests.get_state( ActionStateConsumerTests.liveactions['liveaction1.yaml']) tracker._queue_consumer._process_message(state) querier = tracker.get_querier('tests.resources.test_querymodule') self.assertEqual(querier._query_contexts.qsize(), 1)
def _cleanup_old_queues(self): with Connection(transport_utils.get_messaging_urls()) as connection: for q in self.OLD_QS: bound_q = q(connection.default_channel) try: bound_q.delete() except: print('Failed to delete %s.' % q.name) traceback.print_exc()
def __init__(self, urls=None): """ :param urls: Connection URLs to use. If not provided it uses a default value from th config. :type urls: ``list`` """ urls = urls or transport_utils.get_messaging_urls() connection = transport_utils.get_connection( urls=urls, connection_kwargs={"failover_strategy": "round-robin"}) self.pool = connection.Pool(limit=10) self.cluster_size = len(urls)
def __init__(self, urls=None): """ :param urls: Connection URLs to use. If not provided it uses a default value from th config. :type urls: ``list`` """ urls = urls or transport_utils.get_messaging_urls() connection = transport_utils.get_connection(urls=urls, connection_kwargs={'failover_strategy': 'round-robin'}) self.pool = connection.Pool(limit=10) self.cluster_size = len(urls)
def register_exchanges(): LOG.debug('Registering exchanges...') connection_urls = transport_utils.get_messaging_urls() with Connection(connection_urls) as conn: # Use ConnectionRetryWrapper to deal with rmq clustering etc. retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls), logger=LOG) def wrapped_register_exchanges(connection, channel): for exchange in EXCHANGES: _do_register_exchange(exchange=exchange, connection=connection, channel=channel, retry_wrapper=retry_wrapper) retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges)
def __init__(self, exchange): urls = transport_utils.get_messaging_urls() self._state_publisher = SharedPoolPublishers().get_publisher(urls=urls) self._state_exchange = exchange
def get_handler(): with Connection(transport_utils.get_messaging_urls()) as conn: return FakeMessageHandler(conn, [FAKE_WORK_Q])
def __init__(self, logger=LOG): self._publisher = AnnouncementPublisher( urls=transport_utils.get_messaging_urls()) self._logger = logger
def get_tracker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])
def get_tracker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ResultsTracker(conn, [RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE])
def _get_publisher(cls): if not cls.publisher: cls.publisher = FakeModelPublisher(transport_utils.get_messaging_urls()) return cls.publisher
def __init__(self, logger=LOG): self._publisher = TriggerInstancePublisher( urls=transport_utils.get_messaging_urls()) self._logger = logger
def get_notifier(): with Connection(transport_utils.get_messaging_urls()) as conn: return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
def _get_publisher(cls): if not cls.publisher: cls.publisher = transport.liveaction.LiveActionPublisher( urls=transport_utils.get_messaging_urls()) return cls.publisher
def get_scheduler_entrypoint(): with Connection(transport_utils.get_messaging_urls()) as conn: return SchedulerEntrypoint(conn, [ACTIONSCHEDULER_REQUEST_QUEUE])
def get_scheduler(): with Connection(transport_utils.get_messaging_urls()) as conn: return ActionExecutionScheduler(conn, [ACTIONRUNNER_REQUEST_Q])
def _get_publisher(cls): if not cls.publisher: cls.publisher = transport.workflow.WorkflowExecutionPublisher( urls=transport_utils.get_messaging_urls()) return cls.publisher
def get_engine(): with kombu.Connection(txpt_utils.get_messaging_urls()) as conn: return WorkflowExecutionHandler(conn, WORKFLOW_EXECUTION_QUEUES)
def main(exchange, routing_key, payload): exchange = Exchange(exchange, type='topic') publisher = PoolPublisher(urls=transport_utils.get_messaging_urls()) publisher.publish(payload=payload, exchange=exchange, routing_key=routing_key)
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return TriggerInstanceDispatcher(conn, [RULESENGINE_WORK_QUEUE])
def _get_publisher(cls): if not cls.publisher: cls.publisher = transport.reactor.TriggerCUDPublisher( urls=transport_utils.get_messaging_urls()) return cls.publisher
def __init__(self, logger=LOG): self._publisher = TriggerInstancePublisher(urls=transport_utils.get_messaging_urls()) self._logger = logger
def _get_publisher(cls): if not cls.publisher: cls.publisher = transport.execution.ActionExecutionOutputPublisher( urls=transport_utils.get_messaging_urls()) return cls.publisher
def get_scheduler(): with Connection(transport_utils.get_messaging_urls()) as conn: return ActionExecutionScheduler(conn, [ACTIONSCHEDULER_REQUEST_QUEUE])
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ActionExecutionDispatcher(conn, [ACTIONRUNNER_WORK_Q, ACTIONRUNNER_CANCEL_Q])
def __init__(self, logger=LOG): self._publisher = AnnouncementPublisher(urls=transport_utils.get_messaging_urls()) self._logger = logger
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ActionExecutionDispatcher(conn, ACTIONRUNNER_QUEUES)
def get_engine(): with kombu.Connection(txpt_utils.get_messaging_urls()) as conn: return WorkflowDispatcher(conn, WORKFLOW_EXECUTION_QUEUES)
def _get_publisher(cls): if not cls.publisher: cls.publisher = transport.actionexecutionstate.ActionExecutionStatePublisher( urls=transport_utils.get_messaging_urls()) return cls.publisher
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ActionExecutionDispatcher( conn, [ACTIONRUNNER_WORK_Q, ACTIONRUNNER_CANCEL_Q])
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return TriggerInstanceDispatcher(conn, [RULESENGINE_WORK_Q])
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ExecutionsExporter(conn, [EXPORTER_WORK_Q])