def test_service_disconnect_with_active_async_worker( container_factory, rabbit_manager, rabbit_config): """ Break the connection between a service's queue consumer and rabbit while the service has an active async worker (e.g. event handler). """ container = container_factory(ExampleService, rabbit_config) container.start() # get the service's queue consumer connection while we know it's the # only active connection vhost = rabbit_config['vhost'] connections = get_rabbit_connections(vhost, rabbit_manager) assert len(connections) == 1 queue_consumer_conn = connections[0]['name'] # disconnect the service's queue consumer while it's running the worker eventlet.spawn(disconnect_on_event, rabbit_manager, queue_consumer_conn) # dispatch an event data = uuid.uuid4().hex dispatch = event_dispatcher(rabbit_config) dispatch('srcservice', 'exampleevent', data) # `handle` will have been called twice with the same the `data`, because # rabbit will have redelivered the un-ack'd message from the first call def event_handled_twice(): assert handle_called.call_args_list == [call(data), call(data)] assert_stops_raising(event_handled_twice) connections = get_rabbit_connections(vhost, rabbit_manager) assert queue_consumer_conn not in [conn['name'] for conn in connections]
def test_deadlock_due_to_slow_workers( self, service_cls, container_factory, config ): """ Deadlock will occur if the unack'd messages grows beyond the size of the worker pool at any point. The QueueConsumer will block waiting for a worker and pending RPC replies will not be ack'd. Any running workers therefore never complete, and the worker pool remains exhausted. """ container = container_factory(service_cls, config) container.start() count = 2 dispatch = event_dispatcher(config) for _ in range(count): dispatch("service", "event1", 1) dispatch("service", "event2", 1) counter = itertools.count(start=1) def cb(worker_ctx, res, exc_info): if next(counter) == count: return True with entrypoint_waiter( container, 'handle_event1', timeout=5, callback=cb ): pass
def test_runner_with_duplicate_services( runner_factory, rabbit_config, service_cls, tracker ): # host Service multiple times runner = runner_factory(rabbit_config) runner.add_service(service_cls) runner.add_service(service_cls) # no-op runner.start() # it should only be hosted once assert len(runner.containers) == 1 container = list(runner.containers)[0] # test events (only one service is hosted) event_data = "event" dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, "handle"): dispatch('srcservice', "testevent", event_data) assert tracker.call_args_list == [call(event_data)] # test rpc arg = "arg" with ServiceRpcProxy("service", rabbit_config) as proxy: proxy.handle(arg) assert tracker.call_args_list == [call(event_data), call(arg)]
def test_webhook_timeout_retry(web_container_config, container_factory, rmock=None): test_url = 'http://example.org' test_cb_url = 'http://example.org/cb' web_container_config['WEBHOOK_DELAY_INTERVAL'] = 1 web_container_config['WEBHOOK_BACKOFF_FACTOR'] = 1 container = container_factory(WebhookService, web_container_config) storage = replace_dependencies(container, 'storage') storage.get_webhooks_for_url = lambda url: [test_cb_url] container.start() dispatch = event_dispatcher(web_container_config) # 1 failed response and then a valid one rmock.post(test_cb_url, [{ 'exc': requests.exceptions.ConnectTimeout }, { 'status_code': 200 }]) with entrypoint_waiter(container, 'send_response'): dispatch('url_crawler', 'url_crawled', {'checked-url': test_url}) requests_l = filter_mock_requests(test_cb_url, rmock.request_history) assert len(requests_l) == 2 request = requests_l[-1] assert request.method == 'POST' assert request.url == test_cb_url assert request.json() == {'data': {'checked-url': test_url}}
def test_runner_with_duplicate_services(runner_factory, rabbit_config, service_cls, tracker): # host Service multiple times runner = runner_factory(rabbit_config) runner.add_service(service_cls) runner.add_service(service_cls) # no-op runner.start() # it should only be hosted once assert len(runner.containers) == 1 container = list(runner.containers)[0] # test events (only one service is hosted) event_data = "event" dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, "handle"): dispatch('srcservice', "testevent", event_data) assert tracker.call_args_list == [call(event_data)] # test rpc arg = "arg" with ServiceRpcProxy("service", rabbit_config) as proxy: proxy.handle(arg) assert tracker.call_args_list == [call(event_data), call(arg)]
def test_entrypoint_waiter(container_factory, rabbit_config): container = container_factory(Service, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, "handle"): dispatch("srcservice", "eventtype", "")
def broadcast_event(service, event, payload, confirms=False): config = { 'AMQP_URI': BROKER_URL } dispatcher = event_dispatcher(config, use_confirms=confirms) dispatcher(service, event, payload) return True
def test_entrypoint_waiter(container_factory, rabbit_config): container = container_factory(Service, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle'): dispatch('srcservice', 'eventtype', "")
def test_runner_with_duplicate_services(runner_factory, rabbit_config): # host Service multiple times runner = runner_factory(rabbit_config) runner.add_service(Service) runner.add_service(Service) # no-op runner.start() # it should only be hosted once assert len(runner.containers) == 1 # test events (only one service is hosted) event_data = "msg" with event_dispatcher('srcservice', rabbit_config) as dispatch: dispatch(TestEvent(event_data)) with eventlet.Timeout(1): while len(received) == 0: eventlet.sleep() assert received == [event_data] # test rpc arg = "msg" del received[:] with RpcProxy("service", rabbit_config) as proxy: proxy.handle(arg) with eventlet.Timeout(1): while len(received) == 0: eventlet.sleep() assert received == [arg]
def test_service_disconnect_with_active_async_worker(container_factory, rabbit_manager, rabbit_config): """ Break the connection between a service's queue consumer and rabbit while the service has an active async worker (e.g. event handler). """ container = container_factory(ExampleService, rabbit_config) container.start() # get the service's queue consumer connection while we know it's the # only active connection vhost = rabbit_config['vhost'] connections = get_rabbit_connections(vhost, rabbit_manager) assert len(connections) == 1 queue_consumer_conn = connections[0]['name'] # disconnect the service's queue consumer while it's running the worker eventlet.spawn(disconnect_on_event, rabbit_manager, queue_consumer_conn) # dispatch an event data = uuid.uuid4().hex with event_dispatcher('srcservice', rabbit_config) as dispatch: dispatch(ExampleEvent(data)) # `handle` will have been called twice with the same the `data`, because # rabbit will have redelivered the un-ack'd message from the first call def event_handled_twice(): assert handle_called.call_args_list == [call(data), call(data)] assert_stops_raising(event_handled_twice) connections = get_rabbit_connections(vhost, rabbit_manager) assert queue_consumer_conn not in [conn['name'] for conn in connections]
def test_handle_order_created(config, products, redis_client, service_container): dispatch = event_dispatcher(config) payload = { 'order': { 'order_details': [ { 'product_id': 'LZ129', 'quantity': 2 }, { 'product_id': 'LZ127', 'quantity': 4 }, ] } } with entrypoint_waiter(service_container, 'handle_order_created'): dispatch('orders', 'order_created', payload) product_one, product_two, product_three = [ redis_client.hgetall('products:{}'.format(id_)) for id_ in ('LZ127', 'LZ129', 'LZ130') ] assert b'6' == product_one[b'in_stock'] assert b'9' == product_two[b'in_stock'] assert b'12' == product_three[b'in_stock']
def test_runner_with_duplicate_services(runner_factory, rabbit_config): # host Service multiple times runner = runner_factory(rabbit_config) runner.add_service(Service) runner.add_service(Service) # no-op runner.start() # it should only be hosted once assert len(runner.containers) == 1 # test events (only one service is hosted) event_data = "msg" dispatch = event_dispatcher(rabbit_config) dispatch('srcservice', 'testevent', event_data) with eventlet.Timeout(1): while len(received) == 0: eventlet.sleep() assert received == [event_data] # test rpc arg = "msg" del received[:] with ServiceRpcProxy("service", rabbit_config) as proxy: proxy.handle(arg) assert received == [arg]
def test_handle_order_created(config, products, redis_client, service_container): dispatch = event_dispatcher(config) payload = { "order": { "order_details": [ { "product_id": "LZ129", "quantity": 2 }, { "product_id": "LZ127", "quantity": 4 }, ] } } with entrypoint_waiter(service_container, "handle_order_created"): dispatch("orders", "order_created", payload) product_one, product_two, product_three = [ redis_client.hgetall("products:{}".format(id_)) for id_ in ("LZ127", "LZ129", "LZ130") ] assert "6" == product_one["in_stock"] assert "9" == product_two["in_stock"] assert "12" == product_three["in_stock"]
def test_event_interface(container_factory, rabbit_config): container = container_factory(MailingService, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle_event'): dispatch("payments", "payment_received", "payload")
def test_will_update_cache(self, indexer_service, config, data): payload = {'price': 101.0, 'name': 'Tesla', 'id': 1, 'quantity': 99} container = indexer_service.container dispatch = event_dispatcher(config) with entrypoint_waiter(container, 'handle_product_updated'): dispatch('products', 'product_updated', payload) assert CACHE[payload['id']] == payload
def test_confirms_disabled(self, warnings, rabbit_config): # no exception will be raised if confirms are disabled, # even when mandatory delivery is requested, # but there will be a warning raised dispatch = event_dispatcher(rabbit_config, mandatory=True, use_confirms=False) dispatch("srcservice", "bogus", "payload") assert warnings.warn.called
def test_entrypoint_waiter(container_factory, rabbit_config): container = container_factory(Service, rabbit_config) container.start() class ExampleEvent(Event): type = "eventtype" with event_dispatcher('srcservice', rabbit_config) as dispatch: with entrypoint_waiter(container, 'handle'): dispatch(ExampleEvent(""))
def test_service_integration(container_factory, rabbit_config): config = rabbit_config config['REDIS_URIS'] = MY_REDIS_URIS service4 = container_factory(Service4, config) service4.start() payload = '776223' dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(service4, 'receive_publication'): dispatch('service3', 'number_published', payload)
def get_dependency(self, worker_ctx): """ Inject a dispatch method onto the service instance """ headers = self.get_message_headers(worker_ctx) kwargs = self.kwargs dispatcher = event_dispatcher(self.config, headers=headers, **kwargs) def dispatch(event_type, event_data): dispatcher(self.service_name, event_type, event_data) return dispatch
def test_restrict_entrypoints(container_factory, rabbit_config): method_called = Mock() class OnceProvider(EntrypointProvider): """ Entrypoint that spawns a worker exactly once, as soon as the service container started. """ def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def start(self): self.container.spawn_worker(self, self.args, self.kwargs) @entrypoint def once(*args, **kwargs): return DependencyFactory(OnceProvider, args, kwargs) class ExampleEvent(Event): type = "eventtype" class Service(object): @rpc @once("assert not seen") def handler_one(self, arg): method_called(arg) @event_handler('srcservice', 'eventtype') def handler_two(self, msg): method_called(msg) container = container_factory(Service, rabbit_config) # disable the entrypoints on handler_one restrict_entrypoints(container, "handler_two") container.start() # verify the rpc entrypoint on handler_one is disabled with RpcProxy("service", rabbit_config) as service_proxy: with pytest.raises(MethodNotFound) as exc_info: service_proxy.handler_one("msg") assert exc_info.value.message == "handler_one" # dispatch an event to handler_two msg = "msg" with event_dispatcher('srcservice', rabbit_config) as dispatch: with entrypoint_waiter(container, 'handler_two'): dispatch(ExampleEvent(msg)) # method_called should have exactly one call, derived from the event # handler and not from the disabled @once entrypoint method_called.assert_called_once_with(msg)
def test_event_interface(container_factory, rabbit_config): container = container_factory(ServiceB, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) # prints "service b received payload" before "exited" with entrypoint_waiter(container, 'handle_event'): dispatch("service_a", "event_type", "payload") print("exited")
def test_regular_parameters(self, parameter, mock_container, producer): """ Verify that most parameters can be specified at instantiation time. """ config = {'AMQP_URI': 'memory://localhost'} value = Mock() dispatch = event_dispatcher(config, **{parameter: value}) dispatch("service-name", "event-type", "event-data") assert producer.publish.call_args[1][parameter] == value
def test_dispatch(container_factory, rabbit_config): config = rabbit_config container = container_factory(Service, config) container.start() msg = "msg" dispatch = event_dispatcher(config) with entrypoint_waiter(container, 'handler', timeout=1): dispatch('srcservice', 'testevent', msg) handler_called.assert_called_once_with(msg)
def test_update_is_called_when_battle_finishes(rabbit_config): container = ServiceContainer(ScoreService, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with mock.patch.object(ScoreService, 'update_players_score') as mock_method: with entrypoint_waiter(container, 'update_players_score'): dispatch('battle_service', 'battle_finished', [0, 1, 2]) mock_method.assert_called_once_with([0, 1, 2]) container.stop()
def get_single_news(news_type, news_id): """Get single user details""" try: response_object = rpc_get_news(news_type, news_id) dispatcher = event_dispatcher(BROKER_CONFIG) dispatcher('recommendation_sender', 'receiver', { 'user_id': request.cookies.get('user_id'), 'news': response_object['news'], }) return jsonify(response_object), 200 except Exception as e: return error_response(e, 500)
def test_crawler_triggers_webhook(runner_factory, web_container_config): """Is crawler_container dispatching to webhook_container?""" runner = runner_factory(web_container_config, CrawlerService, WebhookService) webhook_container = get_container(runner, WebhookService) storage_w = replace_dependencies(webhook_container, 'storage') dispatch = event_dispatcher(web_container_config) runner.start() with entrypoint_waiter(webhook_container, 'send_response'): dispatch( 'http_server', 'url_to_check', ['http://example.org/test_crawling_group', 'datagouvfr', None]) assert storage_w.get_webhooks_for_url.call_count == 1
def test_crawler_triggers_webhook(runner_factory, web_container_config): """Is crawler_container dispatching to webhook_container?""" runner = runner_factory(web_container_config, CrawlerService, WebhookService) webhook_container = get_container(runner, WebhookService) storage_w = replace_dependencies(webhook_container, 'storage') dispatch = event_dispatcher(web_container_config) runner.start() with entrypoint_waiter(webhook_container, 'send_response'): dispatch('http_server', 'url_to_check', ['http://example.org/test_crawling_group', 'datagouvfr', None]) assert storage_w.get_webhooks_for_url.call_count == 1
def get_single_news(news_type, news_id): """Get single user details""" try: response_object = rpc_get_news(news_type, news_id) dispatcher = event_dispatcher(BROKER_CONFIG) #añadido cap10, código del despachador #añadido cap10. El mensaje enviado al microservicio de Servicio de Recomendación está compuesto por el ID del usuario que busca la noticia. Este user_id es recogido de una cookie de petición como si estuviéramos simulando un usuario que ha iniciado sesión. El otro elemento que compone el mensaje es JSON con todos los datos de las noticias dispatcher('recommendation_sender', 'receiver', { 'user_id': request.cookies.get('user_id'), 'news': response_object['news'], }) return jsonify(response_object), 200 except Exception as e: error_response(e, 500)
def test_dispatch(container_factory, rabbit_config): config = rabbit_config container = container_factory(Service, config) container.start() msg = "msg" with event_dispatcher('srcservice', config) as dispatch: dispatch(TestEvent(msg)) with wait_for_call(1, handler_called): handler_called.assert_called_once_with(msg)
def test_multiple_runners_coexist(runner_factory, rabbit_config, rabbit_manager): runner1 = runner_factory(rabbit_config, Service) runner1.start() runner2 = runner_factory(rabbit_config, Service) runner2.start() vhost = rabbit_config['vhost'] # verify there are two event queues with a single consumer each def check_consumers(): evt_queues = [ queue for queue in rabbit_manager.get_queues(vhost) if queue['name'].startswith('evt-srcservice-testevent') ] assert len(evt_queues) == 2 for queue in evt_queues: assert queue['consumers'] == 1 # rabbit's management API seems to lag assert_stops_raising(check_consumers) # test events (both services will receive if in "broadcast" mode) event_data = "msg" dispatch = event_dispatcher(rabbit_config) dispatch('srcservice', "testevent", event_data) with eventlet.Timeout(1): while len(received) < 2: eventlet.sleep() assert received == [event_data, event_data] # verify there are two consumers on the rpc queue rpc_queue = rabbit_manager.get_queue(vhost, 'rpc-service') assert rpc_queue['consumers'] == 2 # test rpc (only one service will respond) del received[:] arg = "msg" with ServiceRpcProxy('service', rabbit_config) as proxy: proxy.handle(arg) with eventlet.Timeout(1): while len(received) == 0: eventlet.sleep() assert received == [arg]
def test_crawling_url(container_factory, rabbit_config, web_container_config): crawler_container = container_factory(CrawlerService, web_container_config) storage, dispatch_dep = replace_dependencies(crawler_container, 'storage', 'dispatch') crawler_container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(crawler_container, 'check_url'): dispatch('http_server', 'url_to_check', ['http://example.org/test_crawling_url', None, None]) assert storage.store_url.call_count == 1 assert storage.store_group.call_count == 0 assert storage.store_metadata.call_count == 1 # fired 'url_crawled' assert dispatch_dep.call_count == 1
def dispatch_event( self, event_type, service_name="scheduler_service", event_data="" ): """ Dispatch an event """ logging.info( "Event %s start - Service %s - Data %s ", event_type, service_name, event_data ) dispatch = event_dispatcher(self.CONFIG) dispatch(service_name, event_type, event_data)
def test_multiple_runners_coexist(runner_factory, rabbit_config, rabbit_manager, service_cls, tracker): runner1 = runner_factory(rabbit_config, service_cls) runner1.start() runner2 = runner_factory(rabbit_config, service_cls) runner2.start() vhost = rabbit_config['vhost'] # verify there are two event queues with a single consumer each def check_consumers(): evt_queues = [ queue for queue in rabbit_manager.get_queues(vhost) if queue['name'].startswith('evt-srcservice-testevent') ] assert len(evt_queues) == 2 for queue in evt_queues: assert queue['consumers'] == 1 # rabbit's management API seems to lag assert_stops_raising(check_consumers) # test events (both services will receive if in "broadcast" mode) event_data = "event" dispatch = event_dispatcher(rabbit_config) container1 = list(runner1.containers)[0] container2 = list(runner2.containers)[0] with entrypoint_waiter(container1, "handle"): with entrypoint_waiter(container2, "handle"): dispatch('srcservice', "testevent", event_data) assert tracker.call_args_list == [call(event_data), call(event_data)] # verify there are two consumers on the rpc queue rpc_queue = rabbit_manager.get_queue(vhost, 'rpc-service') assert rpc_queue['consumers'] == 2 # test rpc (only one service will respond) arg = "arg" with ServiceRpcProxy('service', rabbit_config) as proxy: proxy.handle(arg) assert tracker.call_args_list == [ call(event_data), call(event_data), call(arg) ]
def test_multiple_runners_coexist( runner_factory, rabbit_config, rabbit_manager ): runner1 = runner_factory(rabbit_config, Service) runner1.start() runner2 = runner_factory(rabbit_config, Service) runner2.start() vhost = rabbit_config['vhost'] # verify there are two event queues with a single consumer each def check_consumers(): evt_queues = [queue for queue in rabbit_manager.get_queues(vhost) if queue['name'].startswith('evt-srcservice-testevent')] assert len(evt_queues) == 2 for queue in evt_queues: assert queue['consumers'] == 1 # rabbit's management API seems to lag assert_stops_raising(check_consumers) # test events (both services will receive if in "broadcast" mode) event_data = "msg" dispatch = event_dispatcher(rabbit_config) dispatch('srcservice', "testevent", event_data) with eventlet.Timeout(1): while len(received) < 2: eventlet.sleep() assert received == [event_data, event_data] # verify there are two consumers on the rpc queue rpc_queue = rabbit_manager.get_queue(vhost, 'rpc-service') assert rpc_queue['consumers'] == 2 # test rpc (only one service will respond) del received[:] arg = "msg" with ServiceRpcProxy('service', rabbit_config) as proxy: proxy.handle(arg) with eventlet.Timeout(1): while len(received) == 0: eventlet.sleep() assert received == [arg]
def get_event_dispatcher(): global nameko_event_dispatcher if not nameko_event_dispatcher: NAMEKO_CONFIG = getattr(settings, 'NAMEKO_CONFIG', {}) if not NAMEKO_CONFIG: raise ImproperlyConfigured('NAMEKO_CONFIG must be specified') # Lazy instantiation, acquire lock first to prevent dupication init with create_event_dispatcher_lock: if not nameko_event_dispatcher: # double check inside lock is importance # init nameko_event_dispatcher nameko_event_dispatcher = event_dispatcher( NAMEKO_CONFIG['default'] if 'default' in NAMEKO_CONFIG else NAMEKO_CONFIG) # Finish instantiation, lock will be released automaticaly when exit this block return nameko_event_dispatcher
def test_entrypoint_waiter_result_teardown_race( container_factory, rabbit_config, counter ): tracker = Mock() class TrackingDependency(DependencyProvider): def worker_result(self, worker_ctx, res, exc_info): tracker.worker_result() def worker_teardown(self, worker_ctx): tracker.worker_teardown() class Service(object): name = "service" tracker = TrackingDependency() @event_handler('srcservice', 'eventtype') def handle(self, msg): tracker.handle(msg) container = container_factory(Service, rabbit_config) container.start() def wait_for_two_calls(worker_ctx, res, exc_info): return counter.count() > 1 dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle', callback=wait_for_two_calls): # dispatch the first message # wait until teardown has happened with wait_for_call(TrackingDependency, 'worker_teardown'): dispatch('srcservice', 'eventtype', "msg") assert tracker.worker_teardown.call_count == 1 assert tracker.worker_result.call_count == 1 assert tracker.handle.call_count == 1 # dispatch the second event dispatch('srcservice', 'eventtype', "msg") # we should wait for the second teardown to complete before exiting # the entrypoint waiter assert tracker.worker_teardown.call_count == 2 assert tracker.worker_result.call_count == 2 assert tracker.handle.call_count == 2
def test_multiple_runners_coexist( runner_factory, rabbit_config, rabbit_manager, service_cls, tracker ): runner1 = runner_factory(rabbit_config, service_cls) runner1.start() runner2 = runner_factory(rabbit_config, service_cls) runner2.start() vhost = rabbit_config['vhost'] # verify there are two event queues with a single consumer each def check_consumers(): evt_queues = [queue for queue in rabbit_manager.get_queues(vhost) if queue['name'].startswith('evt-srcservice-testevent')] assert len(evt_queues) == 2 for queue in evt_queues: assert queue['consumers'] == 1 # rabbit's management API seems to lag assert_stops_raising(check_consumers) # test events (both services will receive if in "broadcast" mode) event_data = "event" dispatch = event_dispatcher(rabbit_config) container1 = list(runner1.containers)[0] container2 = list(runner2.containers)[0] with entrypoint_waiter(container1, "handle"): with entrypoint_waiter(container2, "handle"): dispatch('srcservice', "testevent", event_data) assert tracker.call_args_list == [call(event_data), call(event_data)] # verify there are two consumers on the rpc queue rpc_queue = rabbit_manager.get_queue(vhost, 'rpc-service') assert rpc_queue['consumers'] == 2 # test rpc (only one service will respond) arg = "arg" with ServiceRpcProxy('service', rabbit_config) as proxy: proxy.handle(arg) assert tracker.call_args_list == [ call(event_data), call(event_data), call(arg) ]
def test_entrypoint_waiter_duplicate(container_factory, rabbit_config): class Service(object): name = "service" @event_handler('srcservice', 'eventtype') def handle_event(self, msg): handle_event(msg) container = container_factory(Service, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle_event'): with entrypoint_waiter(container, 'handle_event'): dispatch('srcservice', 'eventtype', "msg") assert handle_event.call_args_list == [call("msg")]
def test_event_dispatcher_over_ssl(self, container_factory, rabbit_ssl_config, rabbit_config): class Service(object): name = "service" @event_handler("service", "event") def echo(self, event_data): return event_data container = container_factory(Service, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_ssl_config) with entrypoint_waiter(container, 'echo') as result: dispatch("service", "event", "payload") assert result.get() == "payload"
def test_entrypoint_waiter_result(container_factory, rabbit_config): class Service(object): name = "service" @event_handler('srcservice', 'eventtype') def handle_event(self, msg): return msg.upper() container = container_factory(Service, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle_event') as result: dispatch('srcservice', 'eventtype', "msg") res = result.get() assert res == "MSG"
def test_crawling_head_offender_url( container_factory, web_container_config, rmock=None): url_to_check = 'http://example-head.com/test_crawling_url' rmock.head(url_to_check) rmock.get(url_to_check, text='xxx') web_container_config['HEAD_DOMAINS_BLACKLIST'] = ['example-head.com'] crawler_container = container_factory(CrawlerService, web_container_config) storage = replace_dependencies(crawler_container, 'storage') crawler_container.start() dispatch = event_dispatcher(web_container_config) with entrypoint_waiter(crawler_container, 'check_url'): dispatch('http_server', 'url_to_check', [url_to_check, None, None]) assert storage.store_url.call_count == 1 assert storage.store_group.call_count == 0 assert storage.store_metadata.call_count == 1 # check that no HEAD method was called requests_l = filter_mock_requests(url_to_check, rmock.request_history) assert len(requests_l) == 1 assert requests_l[0].method == 'GET'
def make_nameko_helper(config): """Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage. """ module = ModuleType('nameko') module.__doc__ = """Nameko shell helper for making rpc calls and dispatching events. Usage: >>> n.rpc.service.method() "reply" >>> n.dispatch_event('service', 'event_type', 'event_data') """ proxy = ClusterRpcProxy(config) module.rpc = proxy.start() module.dispatch_event = event_dispatcher(config) module.config = config module.disconnect = proxy.stop return module
def test_restrict_entrypoints(container_factory, rabbit_config): method_called = Mock() class Service(object): name = "service" @rpc @once("assert not seen") def handler_one(self, arg): method_called(arg) @event_handler("srcservice", "eventtype") def handler_two(self, msg): method_called(msg) container = container_factory(Service, rabbit_config) # disable the entrypoints on handler_one restrict_entrypoints(container, "handler_two") container.start() # verify the rpc entrypoint on handler_one is disabled with ServiceRpcProxy("service", rabbit_config) as service_proxy: with pytest.raises(MethodNotFound) as exc_info: service_proxy.handler_one("msg") assert str(exc_info.value) == "handler_one" # dispatch an event to handler_two msg = "msg" dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, "handler_two"): dispatch("srcservice", "eventtype", msg) # method_called should have exactly one call, derived from the event # handler and not from the disabled @once entrypoint method_called.assert_called_once_with(msg)
def test_event_broadcast(self, container_factory, rabbit_config): from examples.event_broadcast import ListenerService container_1 = container_factory(ListenerService, rabbit_config) container_2 = container_factory(ListenerService, rabbit_config) container_1.start() container_2.start() dispatch = event_dispatcher(rabbit_config) with patch.object(ListenerService, 'ping') as ping: waiter_1 = entrypoint_waiter(container_1, 'ping') waiter_2 = entrypoint_waiter(container_2, 'ping') with waiter_1, waiter_2: dispatch("monitor", "ping", "payload") assert ping.call_count == 2 # test without the patch to catch any errors in the handler method with entrypoint_waiter(container_1, 'ping'): dispatch("monitor", "ping", "payload")
def test_entrypoint_waiter_nested(container_factory, rabbit_config): class Service(object): name = "service" @event_handler('srcservice', 'eventtype1') def handle_event1(self, msg): handle_event(1) @event_handler('srcservice', 'eventtype2') def handle_event2(self, msg): handle_event(2) container = container_factory(Service, rabbit_config) container.start() dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle_event1'): with entrypoint_waiter(container, 'handle_event2'): dispatch('srcservice', 'eventtype1', "") dispatch('srcservice', 'eventtype2', "") assert call(1) in handle_event.call_args_list assert call(2) in handle_event.call_args_list
def test_entrypoint_waiter_with_callback(container_factory, rabbit_config): class Service(object): name = "service" @event_handler('srcservice', 'eventtype') def handle_event(self, msg): return msg container = container_factory(Service, rabbit_config) container.start() results = [] def cb(worker_ctx, res, exc_info): results.append((res, exc_info)) return len(results) == 2 dispatch = event_dispatcher(rabbit_config) with entrypoint_waiter(container, 'handle_event', callback=cb): dispatch('srcservice', 'eventtype', "msg1") dispatch('srcservice', 'eventtype', "msg2") assert results == [("msg1", None), ("msg2", None)]
def increment_forever(): dispatch = event_dispatcher(rabbit_config) for count in itertools.count(): dispatch('srcservice', 'eventtype', count) time.sleep() # force yield
def dispatcher(): config = get_nameko_config() with event_dispatcher('cinch', config) as dispatch: yield dispatch
from nameko.standalone.events import event_dispatcher config = { 'AMQP_URI': AMQP_URI # e.g. "pyamqp://*****:*****@localhost" } dispatch = event_dispatcher(config) dispatch("service_a", "event_type", "payløad")