def test_response(self): barrier = threading_utils.Event() on_response = mock.MagicMock() on_response.side_effect = lambda *args, **kwargs: barrier.set() handlers = {pr.RESPONSE: on_response} p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ 'polling_interval': POLLING_INTERVAL, }) t = threading_utils.daemon_thread(p.start) t.start() p.wait() resp = pr.Response(pr.RUNNING) p.publish(resp, TEST_TOPIC) self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT)) self.assertTrue(barrier.is_set()) p.stop() t.join() self.assertTrue(on_response.called) on_response.assert_called_with(resp.to_dict(), mock.ANY)
def __init__(self, uuid, exchange, topics, transition_timeout=pr.REQUEST_TIMEOUT, url=None, transport=None, transport_options=None, retry_options=None, worker_expiry=pr.EXPIRES_AFTER): self._uuid = uuid self._ongoing_requests = {} self._ongoing_requests_lock = threading.RLock() self._transition_timeout = transition_timeout self._proxy = proxy.Proxy(uuid, exchange, on_wait=self._on_wait, url=url, transport=transport, transport_options=transport_options, retry_options=retry_options) # NOTE(harlowja): This is the most simplest finder impl. that # doesn't have external dependencies (outside of what this engine # already requires); it though does create periodic 'polling' traffic # to workers to 'learn' of the tasks they can perform (and requires # pre-existing knowledge of the topics those workers are on to gather # and update this information). self._finder = wt.ProxyWorkerFinder(uuid, self._proxy, topics, worker_expiry=worker_expiry) self._proxy.dispatcher.type_handlers.update({ pr.RESPONSE: dispatcher.Handler(self._process_response, validator=pr.Response.validate), pr.NOTIFY: dispatcher.Handler( self._finder.process_response, validator=functools.partial(pr.Notify.validate, response=True)), }) # Thread that will run the message dispatching (and periodically # call the on_wait callback to do various things) loop... self._helper = None self._messages_processed = { 'finder': self._finder.messages_processed, }
def test_notify(self): barrier = threading_utils.Event() on_notify = mock.MagicMock() on_notify.side_effect = lambda *args, **kwargs: barrier.set() handlers = {pr.NOTIFY: on_notify} p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ 'polling_interval': POLLING_INTERVAL, }) t = threading_utils.daemon_thread(p.start) t.start() p.wait() p.publish(pr.Notify(), TEST_TOPIC) self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT)) p.stop() t.join() self.assertTrue(on_notify.called) on_notify.assert_called_with({}, mock.ANY)
def __init__(self, topic, exchange, executor, endpoints, url=None, transport=None, transport_options=None, retry_options=None): type_handlers = { pr.NOTIFY: [ delayed(executor)(self._process_notify), functools.partial(pr.Notify.validate, response=False), ], pr.REQUEST: [ delayed(executor)(self._process_request), pr.Request.validate, ], } self._proxy = proxy.Proxy(topic, exchange, type_handlers=type_handlers, url=url, transport=transport, transport_options=transport_options, retry_options=retry_options) self._topic = topic self._endpoints = dict([(endpoint.name, endpoint) for endpoint in endpoints])
def proxy(self, reset_master_mock=False, **kwargs): proxy_kwargs = dict(topic=self.topic, exchange_name=self.exchange_name, url=self.broker_url, type_handlers={}) proxy_kwargs.update(kwargs) p = proxy.Proxy(**proxy_kwargs) if reset_master_mock: self.resetMasterMock() return p
def proxy(self, reset_master_mock=False, **kwargs): proxy_kwargs = dict(uuid=self.uuid, exchange_name=self.exchange_name, on_message=self.on_message_mock, url=self.broker_url) proxy_kwargs.update(kwargs) p = proxy.Proxy(**proxy_kwargs) if reset_master_mock: self._reset_master_mock() return p
def __init__(self, uuid, exchange, workers_info, **kwargs): self._uuid = uuid self._proxy = proxy.Proxy(uuid, exchange, self._on_message, self._on_wait, **kwargs) self._proxy_thread = None self._remote_tasks = {} # TODO(skudriashev): This data should be collected from workers # using broadcast messages directly. self._workers_info = {} for topic, tasks in workers_info.items(): for task in tasks: self._workers_info[task] = topic
def __init__(self, uuid, exchange, topics, transition_timeout=pr.REQUEST_TIMEOUT, url=None, transport=None, transport_options=None, retry_options=None): self._uuid = uuid self._requests_cache = wt.RequestsCache() self._transition_timeout = transition_timeout type_handlers = { pr.RESPONSE: dispatcher.Handler(self._process_response, validator=pr.Response.validate), } self._proxy = proxy.Proxy(uuid, exchange, type_handlers=type_handlers, on_wait=self._on_wait, url=url, transport=transport, transport_options=transport_options, retry_options=retry_options) # NOTE(harlowja): This is the most simplest finder impl. that # doesn't have external dependencies (outside of what this engine # already requires); it though does create periodic 'polling' traffic # to workers to 'learn' of the tasks they can perform (and requires # pre-existing knowledge of the topics those workers are on to gather # and update this information). self._finder = wt.ProxyWorkerFinder(uuid, self._proxy, topics) self._finder.notifier.register(wt.WorkerFinder.WORKER_ARRIVED, self._on_worker) self._helpers = tu.ThreadBundle() self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start), after_start=lambda t: self._proxy.wait(), before_join=lambda t: self._proxy.stop()) p_worker = periodics.PeriodicWorker.create([self._finder]) if p_worker: self._helpers.bind(lambda: tu.daemon_thread(p_worker.start), before_join=lambda t: p_worker.stop(), after_join=lambda t: p_worker.reset(), before_start=lambda t: p_worker.reset())
def __init__(self, topic, exchange, executor, endpoints, **kwargs): handlers = { pr.NOTIFY: [ delayed(executor)(self._process_notify), functools.partial(pr.Notify.validate, response=False), ], pr.REQUEST: [ delayed(executor)(self._process_request), pr.Request.validate, ], } self._proxy = proxy.Proxy(topic, exchange, handlers, on_wait=None, **kwargs) self._topic = topic self._endpoints = dict([(endpoint.name, endpoint) for endpoint in endpoints])
def __init__(self, uuid, exchange, topics, transition_timeout=pr.REQUEST_TIMEOUT, url=None, transport=None, transport_options=None, retry_options=None): self._uuid = uuid self._topics = topics self._requests_cache = wt.RequestsCache() self._workers = wt.TopicWorkers() self._transition_timeout = transition_timeout type_handlers = { pr.NOTIFY: [ self._process_notify, functools.partial(pr.Notify.validate, response=True), ], pr.RESPONSE: [ self._process_response, pr.Response.validate, ], } self._proxy = proxy.Proxy(uuid, exchange, type_handlers, on_wait=self._on_wait, url=url, transport=transport, transport_options=transport_options, retry_options=retry_options) self._periodic = wt.PeriodicWorker(tt.Timeout(pr.NOTIFY_PERIOD), [self._notify_topics]) self._helpers = tu.ThreadBundle() self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start), after_start=lambda t: self._proxy.wait(), before_join=lambda t: self._proxy.stop()) self._helpers.bind(lambda: tu.daemon_thread(self._periodic.start), before_join=lambda t: self._periodic.stop(), after_join=lambda t: self._periodic.reset(), before_start=lambda t: self._periodic.reset())
def __init__(self, uuid, exchange, topics, transition_timeout=pr.REQUEST_TIMEOUT, **kwargs): self._uuid = uuid self._topics = topics self._requests_cache = cache.RequestsCache() self._transition_timeout = transition_timeout self._workers_cache = cache.WorkersCache() self._workers_arrival = threading.Condition() handlers = { pr.NOTIFY: [ self._process_notify, functools.partial(pr.Notify.validate, response=True), ], pr.RESPONSE: [ self._process_response, pr.Response.validate, ], } self._proxy = proxy.Proxy(uuid, exchange, handlers, self._on_wait, **kwargs) self._proxy_thread = None self._periodic = PeriodicWorker(tt.Timeout(pr.NOTIFY_PERIOD), [self._notify_topics]) self._periodic_thread = None
def test_multi_message(self): message_count = 30 barrier = latch.Latch(message_count) countdown = lambda data, message: barrier.countdown() on_notify = mock.MagicMock() on_notify.side_effect = countdown on_response = mock.MagicMock() on_response.side_effect = countdown on_request = mock.MagicMock() on_request.side_effect = countdown handlers = { pr.NOTIFY: on_notify, pr.RESPONSE: on_response, pr.REQUEST: on_request, } p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, transport='memory', transport_options={ 'polling_interval': POLLING_INTERVAL, }) t = threading_utils.daemon_thread(p.start) t.start() p.wait() for i in range(0, message_count): j = i % 3 if j == 0: p.publish(pr.Notify(), TEST_TOPIC) elif j == 1: p.publish(pr.Response(pr.RUNNING), TEST_TOPIC) else: p.publish( pr.Request(test_utils.DummyTask("dummy_%s" % i), uuidutils.generate_uuid(), pr.EXECUTE, [], None), TEST_TOPIC) self.assertTrue(barrier.wait(test_utils.WAIT_TIMEOUT)) self.assertEqual(0, barrier.needed) p.stop() t.join() self.assertTrue(on_notify.called) self.assertTrue(on_response.called) self.assertTrue(on_request.called) self.assertEqual(10, on_notify.call_count) self.assertEqual(10, on_response.call_count) self.assertEqual(10, on_request.call_count) call_count = sum([ on_notify.call_count, on_response.call_count, on_request.call_count, ]) self.assertEqual(message_count, call_count)
def __init__(self, uuid, exchange, executor, endpoints, **kwargs): self._proxy = proxy.Proxy(uuid, exchange, self._on_message, **kwargs) self._executor = executor self._endpoints = dict([(endpoint.name, endpoint) for endpoint in endpoints])