def test_reconnect_on_socket_error(rabbit_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 1 container.spawn_managed_thread = spawn_thread connection_revived = Mock() queue_consumer = QueueConsumer().bind(container) queue_consumer.setup() queue_consumer.on_connection_revived = connection_revived handler = MessageHandler() queue_consumer.register_provider(handler) queue_consumer.start() with patch.object( Connection, 'drain_events', autospec=True) as drain_events: drain_events.side_effect = socket.error('test-error') def check_reconnected(): assert connection_revived.call_count > 1 assert_stops_raising(check_reconnected) queue_consumer.unregister_provider(handler) queue_consumer.stop()
def sagemaker_session(): boto_mock = Mock(name='boto_session', region_name=REGION) sms = Mock(name='sagemaker_session', boto_session=boto_mock) sms.boto_region_name = REGION sms.default_bucket = Mock(name='default_bucket', return_value=BUCKET_NAME) sms.config = None return sms
def test_consume_from_rabbit(rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.worker_ctx_cls = CustomWorkerContext container.service_name = "service" container.config = rabbit_config container.max_workers = 10 def spawn_thread(method, protected): return eventlet.spawn(method) container.spawn_managed_thread = spawn_thread worker_ctx = CustomWorkerContext(container, None, None) factory = DependencyFactory( ConsumeProvider, queue=foobar_queue, requeue_on_error=False) consumer = factory.create_and_bind_instance("injection_name", container) # prepare and start dependencies consumer.prepare() consumer.queue_consumer.prepare() consumer.start() consumer.queue_consumer.start() # test queue, exchange and binding created in rabbit exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) bindings = rabbit_manager.get_queue_bindings(vhost, foobar_queue.name) assert "foobar_ex" in [exchange['name'] for exchange in exchanges] assert "foobar_queue" in [queue['name'] for queue in queues] assert "foobar_ex" in [binding['source'] for binding in bindings] # test message consumed from queue container.spawn_worker.return_value = worker_ctx headers = {'nameko.language': 'en', 'nameko.customheader': 'customvalue'} rabbit_manager.publish( vhost, foobar_ex.name, '', 'msg', properties=dict(headers=headers)) ctx_data = { 'language': 'en', 'customheader': 'customvalue', } with wait_for_call(CONSUME_TIMEOUT, container.spawn_worker) as method: method.assert_called_once_with( consumer, ('msg', ), {}, context_data=ctx_data, handle_result=ANY_PARTIAL) handle_result = method.call_args[1]['handle_result'] # ack message handle_result(worker_ctx, 'result') # stop will hang if the consumer hasn't acked or requeued messages with eventlet.timeout.Timeout(CONSUME_TIMEOUT): consumer.stop()
def test_publish_to_queue(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_name = "srcservice" container.config = empty_config ctx_data = {"language": "en"} service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("publish"), data=ctx_data) publisher = Publisher(queue=foobar_queue).bind(container, "publish") producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.setup() maybe_declare.assert_called_once_with(foobar_queue, connection) # test publish msg = "msg" headers = {"nameko.language": "en", "nameko.call_id_stack": ["srcservice.publish.0"]} service.publish = publisher.get_dependency(worker_ctx) service.publish(msg, publish_kwarg="value") producer.publish.assert_called_once_with( msg, headers=headers, exchange=foobar_ex, retry=True, retry_policy=DEFAULT_RETRY_POLICY, publish_kwarg="value" )
def test_responder_worker_exc(mock_publish): message = Mock() message.properties = {"reply_to": ""} container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ""} responder = Responder(message) # serialisable exception worker_exc = Exception("error") result, exc_info = responder.send_response(container, None, (Exception, worker_exc, "tb")) assert result is None assert exc_info == (Exception, worker_exc, "tb") expected_msg = { "result": None, "error": { "exc_path": "exceptions.Exception", "value": "error", "exc_type": "Exception", "exc_args": ("error",), }, } (msg,), _ = mock_publish.call_args assert msg == expected_msg
def test_dispatch_to_rabbit(rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_name = "srcservice" container.config = rabbit_config service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider()) dispatcher = EventDispatcher().bind(container, 'dispatch') dispatcher.setup() dispatcher.start() # we should have an exchange but no queues exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) assert "srcservice.events" in [exchange['name'] for exchange in exchanges] assert queues == [] # manually add a queue to capture the events rabbit_manager.create_queue(vhost, "event-sink", auto_delete=True) rabbit_manager.create_queue_binding( vhost, "srcservice.events", "event-sink", routing_key="eventtype") service.dispatch = dispatcher.get_dependency(worker_ctx) service.dispatch("eventtype", "msg") # test event receieved on manually added queue messages = rabbit_manager.get_messages(vhost, "event-sink") assert ['msg'] == [msg['payload'] for msg in messages]
def test_dispatch_to_rabbit(rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = rabbit_config service = Mock() worker_ctx = WorkerContext(container, service, None) dispatcher = EventDispatcher() dispatcher.bind("dispatch", container) dispatcher.prepare() dispatcher.start() # we should have an exchange but no queues exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) assert "srcservice.events" in [exchange['name'] for exchange in exchanges] assert queues == [] # manually add a queue to capture the events rabbit_manager.create_queue(vhost, "event-sink", auto_delete=True) rabbit_manager.create_binding(vhost, "srcservice.events", "event-sink", rt_key=ExampleEvent.type) dispatcher.inject(worker_ctx) service.dispatch(ExampleEvent("msg")) # test event receieved on manually added queue messages = rabbit_manager.get_messages(vhost, "event-sink") assert ['msg'] == [msg['payload'] for msg in messages]
def test_publish_to_exchange(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = empty_config service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("publish")) publisher = PublishProvider(exchange=foobar_ex) publisher.bind("publish", container) producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.prepare() maybe_declare.assert_called_once_with(foobar_ex, connection) # test publish msg = "msg" publisher.inject(worker_ctx) service.publish(msg, publish_kwarg="value") headers = { 'nameko.call_id_stack': ['srcservice.publish.0'] } producer.publish.assert_called_once_with( msg, headers=headers, exchange=foobar_ex, retry=True, retry_policy=DEFAULT_RETRY_POLICY, publish_kwarg="value")
def test_reconnect_on_socket_error(): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: None} container.max_workers = 1 container.spawn_managed_thread = spawn_thread connection_revived = Mock() queue_consumer = QueueConsumer() queue_consumer.on_connection_revived = connection_revived queue_consumer.bind("queue_consumer", container) handler = MessageHandler() queue_consumer.register_provider(handler) queue_consumer.start() with patch.object( Connection, 'drain_events', autospec=True) as drain_events: drain_events.side_effect = socket.error('test-error') def check_reconnected(): assert connection_revived.call_count > 1 assert_stops_raising(check_reconnected)
def test_event_dispatcher(empty_config): container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = empty_config service = Mock() worker_ctx = WorkerContext(container, service, "dispatch") event_dispatcher = EventDispatcher() event_dispatcher.bind("dispatch", container) path = 'nameko.messaging.PublishProvider.prepare' with patch(path, autospec=True) as prepare: # test start method event_dispatcher.prepare() assert event_dispatcher.exchange.name == "srcservice.events" assert prepare.called evt = Mock(type="eventtype", data="msg") event_dispatcher.inject(worker_ctx) producer = Mock() with patch.object( event_dispatcher, 'get_producer', autospec=True) as get_producer: get_producer.return_value = as_context_manager(producer) # test dispatch service.dispatch(evt) headers = event_dispatcher.get_message_headers(worker_ctx) producer.publish.assert_called_once_with( evt.data, exchange=event_dispatcher.exchange, headers=headers, routing_key=evt.type)
def test_send_email_no_auth(self, smtplib): """Email is sent as expected""" app = Mock() app.config = { 'SMTP_FROM': '*****@*****.**', 'SMTP_HOST': 'remote-host', 'SMTP_PORT': 9160, 'SMTP_USER': None, 'SMTP_PASS': None, 'SMTP_CERT': None, } send_email(app, '*****@*****.**', 'da subject', 'body content') smtplib.SMTP.assert_called_with('remote-host', 9160) args = smtplib.SMTP().sendmail.call_args assert args[0][:2] == (app.config['SMTP_FROM'], ['*****@*****.**']) assert args[0][2].endswith('body content') print(args[0][2]) assert 'To: [email protected]' in args[0][2] assert 'Subject: da subject' in args[0][2]
def test_kill_closes_connections(rabbit_manager, rabbit_config): container = Mock() container.config = rabbit_config container.max_workers = 1 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) class Handler(object): queue = ham_queue def handle_message(self, body, message): pass queue_consumer.register_provider(Handler()) queue_consumer.start() # kill should close all connections queue_consumer.kill() # no connections should remain for our vhost vhost = rabbit_config['vhost'] connections = get_rabbit_connections(vhost, rabbit_manager) if connections: for connection in connections: assert connection['vhost'] != vhost
def test_get_vm_uuid_not_vm(self): vm = Mock() vm.config = Mock() vm.config.uuid = 'this is the uuid' self.pv_service.find_vm_by_name = Mock(return_value=vm) self.assertRaises(ValueError, self.vm_loader.load_vm_uuid_by_name, self.si, self.vc_model, 'path')
def test_publish_to_queue(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = empty_config ctx_data = {'language': 'en'} service = Mock() worker_ctx = WorkerContext(container, service, "publish", data=ctx_data) publisher = PublishProvider(queue=foobar_queue) publisher.bind("publish", container) producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.prepare() maybe_declare.assert_called_once_with(foobar_queue, connection) # test publish msg = "msg" headers = { 'nameko.language': 'en', 'nameko.call_id_stack': ['srcservice.publish.0'], } publisher.inject(worker_ctx) service.publish(msg) producer.publish.assert_called_once_with( msg, headers=headers, exchange=foobar_ex)
def test_unserialisable_headers(rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.service_name = "service" container.config = rabbit_config container.spawn_managed_thread = eventlet.spawn ctx_data = {'language': 'en', 'customheader': None} service = Mock() worker_ctx = CustomWorkerContext( container, service, 'method', data=ctx_data) publisher = PublishProvider(exchange=foobar_ex, queue=foobar_queue) publisher.bind("publish", container) publisher.prepare() publisher.start() publisher.inject(worker_ctx) service.publish("msg") messages = rabbit_manager.get_messages(vhost, foobar_queue.name) assert messages[0]['properties']['headers'] == { 'nameko.language': 'en', 'nameko.call_id_stack': ['service.method.0'], # no `customheader` }
def test_unserialisable_headers(rabbit_manager, rabbit_config): vhost = rabbit_config["vhost"] container = Mock(spec=ServiceContainer) container.service_name = "service" container.config = rabbit_config container.spawn_managed_thread = eventlet.spawn ctx_data = {"language": "en", "customheader": None} service = Mock() worker_ctx = CustomWorkerContext(container, service, DummyProvider("method"), data=ctx_data) publisher = Publisher(exchange=foobar_ex, queue=foobar_queue).bind(container, "publish") publisher.setup() publisher.start() service.publish = publisher.get_dependency(worker_ctx) service.publish("msg") messages = rabbit_manager.get_messages(vhost, foobar_queue.name) assert messages[0]["properties"]["headers"] == { "nameko.language": "en", "nameko.call_id_stack": ["service.method.0"], # no `customheader` }
def test_responder_unserializable_exc(mock_publish): message = Mock() message.properties = {"reply_to": ""} container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ""} responder = Responder(message) # unserialisable exception worker_exc = Exception(object()) result, exc_info = responder.send_response(container, True, (Exception, worker_exc, "tb")) # responder will return the TypeError from json.dumps assert result is None assert exc_info == (TypeError, ANY, ANY) assert exc_info[1].message == ("{} is not JSON " "serializable".format(worker_exc.args[0])) # and publish a dictionary-serialized UnserializableValueError # (where the unserialisable value is a dictionary-serialized worker_exc) serialized_exc = serialize(worker_exc) expected_msg = { "result": None, "error": { "exc_path": "nameko.exceptions.UnserializableValueError", "value": "Unserializable value: `{}`".format(serialized_exc), "exc_type": "UnserializableValueError", "exc_args": (), }, } (msg,), _ = mock_publish.call_args assert msg == expected_msg
def test_vm_get_network_by_name_1(self): # Arrange pv_service = pyVmomiService(None, None, Mock()) pv_service.wait_for_task = Mock() network = Mock() network.name = 'main_network' backing = Mock() backing.network = network virtual_card = create_autospec(vim.vm.device.VirtualEthernetCard) virtual_card.macAddress = 'AA-BB' virtual_card.backing = backing hardware = Mock() hardware.device = [virtual_card] config = Mock() config.hardware = hardware vm = Mock() vm.config = config # Act actual_network = pv_service.get_network_by_mac_address(vm, 'BB-CC') # Assert self.assertIsNone(actual_network)
def test_event_dispatcher(empty_config): container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = empty_config service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("dispatch")) event_dispatcher = EventDispatcher(retry_policy={'max_retries': 5}).bind( container, attr_name="dispatch") event_dispatcher.setup() service.dispatch = event_dispatcher.get_dependency(worker_ctx) from mock import ANY with patch('nameko.standalone.events.producers') as mock_producers: with mock_producers[ANY].acquire() as mock_producer: service.dispatch('eventtype', 'msg') headers = event_dispatcher.get_message_headers(worker_ctx) mock_producer.publish.assert_called_once_with( 'msg', exchange=ANY, headers=headers, routing_key='eventtype', retry=True, retry_policy={'max_retries': 5}) _, call_kwargs = mock_producer.publish.call_args exchange = call_kwargs['exchange'] assert exchange.name == 'srcservice.events'
def test_nova_responder_cannot_str_exc(mock_publish): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ''} responder = NovaResponder("msgid") class BadException(Exception): def __str__(self): raise Exception('boom') # un-str-able exception exc = BadException() result, exc_info = responder.send_response( container, True, (BadException, exc, "tb")) assert result is True assert exc_info == (BadException, exc, "tb") assert mock_publish.call_count == 2 data_call, _ = mock_publish.call_args_list (data_msg,), _ = data_call assert data_msg == { 'failure': ('BadException', "[__str__ failed]"), 'result': True, 'ending': False }
def test_update_cluster_with_same_discovery(self): riak_discovery = Mock() riak_discovery.name = "riak" dns_discovery = Mock() dns_discovery.name = "dns" cluster = Mock() cluster.name = "app" cluster.discovery = "riak" cluster.config = {"discovery": "riak"} writer = Writer("/etc/configs") writer.add_configurable(Cluster, "app", cluster) writer.add_configurable(Discovery, "riak", riak_discovery) writer.add_configurable(Discovery, "dns", dns_discovery) writer.update_configurable( Cluster, cluster.name, {"discovery": "riak"} ) self.assertEqual(writer.nodes_updated.is_set(), True) self.assertEqual(riak_discovery.stop_watching.called, False) self.assertEqual(dns_discovery.start_watching.called, False)
def test_nova_responder(mock_publish): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ''} responder = NovaResponder("msgid") # serialisable result result, exc_info = responder.send_response(container, True, None) assert result is True assert exc_info is None assert mock_publish.call_count == 2 data_call, marker_call = mock_publish.call_args_list (data_msg,), _ = data_call (marker_msg,), _ = marker_call assert data_msg == { 'failure': None, 'result': True, 'ending': False } assert marker_msg == { 'failure': None, 'result': None, 'ending': True }
def test_create_pad(self): # Define desired values my_subdomain = 'sub' my_key = 'key' my_secret = 'secret' my_title = 'My Title' my_content = 'My exciting content!' new_pad_id = 'qo0m5xqu9' # Create a bunch of Mock objects mock_settings = Mock() mock_settings.config = { 'hackpad_subdomain': my_subdomain, 'hackpad_key': my_key, 'hackpad_secret': my_secret, } mock_session = Mock() mock_session.create_hackpad.return_value = \ {'padId': new_pad_id} # just a random id # Actually test HackpadWrapper hackpad = HackpadWrapper(mock_settings, mock_session) result = hackpad.create_pad(my_title, my_content) assert mock_session.create_hackpad.called # doesn't work for some reason... # assert mock_session.create_hackpad.assert_called_with(my_title, my_content) assert result == new_pad_id
def test_update_report_to_send(self, fromConfig, fromOptions, getLogger): options = Mock() options.interval = 0 options.oneshot = True options.print_ = False options.log_dir = '' options.log_file = '' virtwho = VirtWho(self.logger, options, config_dir="/nonexistant") report = Mock() report.hash.return_value = "hash" config = Mock() report.config = config config.hash.return_value = "config_hash" config.name.return_value = "config_name" self.assertTrue(virtwho.update_report_to_send(report)) self.assertTrue(len(virtwho.configs_ready) == 1 and config in virtwho.configs_ready) self.assertTrue(virtwho.reports_to_send[config.hash].hash == report.hash) # Pretend we sent the report for that config virtwho.configs_ready = [] virtwho.reports[config.hash] = report.hash del virtwho.reports_to_send[config.hash] # if we receive the same report twice we should not send it self.assertFalse(virtwho.update_report_to_send(report)) self.assertFalse(virtwho.configs_ready) self.assertFalse(virtwho.reports_to_send)
def test_nova_rpc_provider(empty_config): rpc_consumer = Mock() message = Mock(headers={}) message_body = { 'method': 'method', 'args': {"arg": "arg_value"}, 'msg_id': 'msg_id', '_context_user_id': 'user_id' } class Service(object): def method(self, arg): pass container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_cls = Service container.worker_ctx_cls = WorkerContext container.service_name = "service" container.config = empty_config entrypoint = NovaRpc().bind(container, "method") entrypoint.setup() entrypoint.rpc_consumer = rpc_consumer container.spawn_worker.side_effect = ContainerBeingKilled() entrypoint.handle_message(message_body, message) assert rpc_consumer.requeue_message.called
def test_command(self): context = Mock() context.config = { 'output': {'poll_frequency_in_seconds': 1} } command = RemoveCommand(context) fn = command.get_formatter_for_type(OSTREE_TYPE_ID) self.assertEqual(fn, format_unit)
def fake_app_patch(test_case): project = create_project('myproject') app_config = create_app_config(project, 'my_app') app = Mock() app.__version__ = '0' app.config = app_config app.url = '/-app-/' return patch.object(c, 'app', app, create=True)
def mk_test_csv_writer(): request = Mock() request.config = Config w = TestCSVWriter(request=request, context=None) w.set_datas([ {'libelle': '123456789'}, ]) return w
def fake_app_patch(test_case): project = create_project("myproject") app_config = create_app_config(project, "my_app") app = Mock() app.__version__ = "0" app.config = app_config app.url = "/-app-/" return patch.object(c, "app", app, create=True)
def test_consume_provider(empty_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.worker_ctx_cls = WorkerContext container.service_name = "service" container.config = empty_config worker_ctx = WorkerContext(container, None, DummyProvider()) spawn_worker = container.spawn_worker spawn_worker.return_value = worker_ctx queue_consumer = Mock() consume_provider = Consumer(queue=foobar_queue, requeue_on_error=False).bind(container, "consume") consume_provider.queue_consumer = queue_consumer message = Mock(headers={}) # test lifecycle consume_provider.setup() queue_consumer.register_provider.assert_called_once_with(consume_provider) consume_provider.stop() queue_consumer.unregister_provider.assert_called_once_with(consume_provider) # test handling successful call queue_consumer.reset_mock() consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]["handle_result"] handle_result(worker_ctx, "result") queue_consumer.ack_message.assert_called_once_with(message) # test handling failed call without requeue queue_consumer.reset_mock() consume_provider.requeue_on_error = False consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]["handle_result"] handle_result(worker_ctx, None, (Exception, Exception("Error"), "tb")) queue_consumer.ack_message.assert_called_once_with(message) # test handling failed call with requeue queue_consumer.reset_mock() consume_provider.requeue_on_error = True consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]["handle_result"] handle_result(worker_ctx, None, (Exception, Exception("Error"), "tb")) assert not queue_consumer.ack_message.called queue_consumer.requeue_message.assert_called_once_with(message) # test requeueing on ContainerBeingKilled (even without requeue_on_error) queue_consumer.reset_mock() consume_provider.requeue_on_error = False spawn_worker.side_effect = ContainerBeingKilled() consume_provider.handle_message("body", message) assert not queue_consumer.ack_message.called queue_consumer.requeue_message.assert_called_once_with(message)
def test_lifecycle(rabbit_manager, rabbit_config): container = Mock() container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread.side_effect = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) handler = MessageHandler() queue_consumer.register_provider(handler) queue_consumer.start() # making sure the QueueConsumer uses the container to spawn threads container.spawn_managed_thread.assert_called_once_with(ANY, protected=True) vhost = rabbit_config['vhost'] rabbit_manager.publish(vhost, 'spam', '', 'shrub') message = handler.wait() gt = eventlet.spawn(queue_consumer.unregister_provider, handler) # wait for the handler to be removed with eventlet.Timeout(TIMEOUT): while len(queue_consumer._consumers): eventlet.sleep() # remove_consumer has to wait for all messages to be acked assert not gt.dead # the consumer should have stopped and not accept any new messages rabbit_manager.publish(vhost, 'spam', '', 'ni') # this should cause the consumer to finish shutting down queue_consumer.ack_message(message) with eventlet.Timeout(TIMEOUT): gt.wait() # there should be a message left on the queue messages = rabbit_manager.get_messages(vhost, 'ham') assert ['ni'] == [msg['payload'] for msg in messages] queue_consumer.kill()
def test_reply_listener(get_rpc_exchange, queue_consumer): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {} container.service_name = "exampleservice" exchange = Mock() get_rpc_exchange.return_value = exchange reply_listener = ReplyListener().bind(container) forced_uuid = uuid.uuid4().hex with patch('nameko.rpc.uuid', autospec=True) as patched_uuid: patched_uuid.uuid4.return_value = forced_uuid reply_listener.setup() queue_consumer.setup() queue = reply_listener.queue assert queue.name == "rpc.reply-exampleservice-{}".format(forced_uuid) assert queue.exchange == exchange assert queue.routing_key == forced_uuid queue_consumer.register_provider.assert_called_once_with(reply_listener) correlation_id = 1 reply_event = reply_listener.get_reply_event(correlation_id) assert reply_listener._reply_events == {1: reply_event} message = Mock() message.properties.get.return_value = correlation_id reply_listener.handle_message("msg", message) queue_consumer.ack_message.assert_called_once_with(message) assert reply_event.ready() assert reply_event.wait() == "msg" assert reply_listener._reply_events == {} with patch('nameko.rpc._log', autospec=True) as log: reply_listener.handle_message("msg", message) assert log.debug.call_args == call('Unknown correlation id: %s', correlation_id)
def test_get_portgroup_by_vswitch(vmware_fixture): vmware_handler, client, si_mock = vmware_fixture portgroup1 = Mock(vswitch="ab-cd-switch1") portgroup2 = Mock(vswitch="ab-cd-switch0") portgroup3 = Mock(vswitch="ab-cd-switch2") view1 = Mock(vim.View) view1.name = "host" view1.config = Mock(network=Mock( portgroup=[portgroup1, portgroup2, portgroup3])) container = Mock(view=[view1]) view_manager = Mock() view_manager.CreateContainerView = Mock(return_value=container) si_mock.RetrieveContent.return_value.viewManager = view_manager assert client.get_portgroup_by_vswitch("host", "switch1") == [portgroup1]
def test_stop_while_starting(rabbit_config): started = Event() container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread = spawn_thread class BrokenConnConsumer(QueueConsumer): def consume(self, *args, **kwargs): started.send(None) # kombu will retry again and again on broken connections # so we have to make sure the event is reset to allow consume # to be called again started.reset() return super(BrokenConnConsumer, self).consume(*args, **kwargs) queue_consumer = BrokenConnConsumer().bind(container) queue_consumer.setup() handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object(Connection, 'connect', autospec=True) as connect: # patch connection to raise an error connect.side_effect = TimeoutError('test') # try to start the queue consumer gt = eventlet.spawn(queue_consumer.start) # wait for the queue consumer to begin starting and # then immediately stop it started.wait() with eventlet.Timeout(TIMEOUT): queue_consumer.unregister_provider(handler) queue_consumer.stop() with eventlet.Timeout(TIMEOUT): # we expect the queue_consumer.start thread to finish # almost immediately adn when it does the queue_consumer thread # should be dead too while not gt.dead: eventlet.sleep() assert queue_consumer._gt.dead
def test_reentrant_start_stops(rabbit_config): container = Mock() container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) queue_consumer.start() gt = queue_consumer._gt # nothing should happen as the consumer has already been started queue_consumer.start() assert gt is queue_consumer._gt queue_consumer.kill()
def test_send_email_auth(self, smtplib): """IF SMTP_USER and SMTP_PASS are provided, smtp.login() is called""" app = Mock() app.config = { "SMTP_FROM": "*****@*****.**", "SMTP_HOST": "remote-host", "SMTP_PORT": 9160, "SMTP_USER": "******", "SMTP_PASS": "******", "SMTP_CERT": None, } send_email(app, "*****@*****.**", "da subject", "body content") smtplib.SMTP.assert_called_with("remote-host", 9160) smtplib.SMTP().login.assert_called_with("user", "pass")
def test_reentrant_start_stops(): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 3 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) queue_consumer.start() gt = queue_consumer._gt # nothing should happen as the consumer has already been started queue_consumer.start() assert gt is queue_consumer._gt queue_consumer.kill()
def test_dry_run_does_not_send_message(self, get_topic): topic = get_topic.return_value manager = Mock() manager.config = {'tasks': {}} task = Mock(wraps=Task(manager, 'fake')) task.options.test = True event = Mock() task.accepted = [event] e = notify_sns.SNSNotificationEmitter({ 'aws_region': 'test', 'sns_topic_arn': 'arn' }) e.send_notifications(task) event.render.assert_called_once_with(notify_sns.DEFAULT_TEMPLATE_VALUE) assert not topic.publish.called
def test_that_fwdundoall_call_confd(self): self._client = Mock().return_value user_id = 2 agi = Mock() agi.get_variable.return_value = user_id agi.config = {'confd': {'client': self._client}} fwdundoall(agi, None, None) disabled = {'enabled': False} expected_body = { 'busy': disabled, 'noanswer': disabled, 'unconditional': disabled } self._client.users(user_id).update_forwards.assert_called_once_with( expected_body)
def test_responder(mock_publish): message = Mock() message.properties = {'reply_to': ''} container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ''} responder = Responder(message) # serialisable result result, exc_info = responder.send_response(container, True, None) assert result is True assert exc_info is None expected_msg = {'result': True, 'error': None} (msg, ), _ = mock_publish.call_args assert msg == expected_msg
def test_has_portgroup(vmware_fixture): vmware_handler, client, si_mock = vmware_fixture vnic = Mock() spec = Mock() spec.name = "p1" vnic.spec = spec view1 = Mock(vim.View) view1.name = "host" view1.config = Mock(network=Mock(portgroup=[vnic])) container = Mock(view=[view1]) view_manager = Mock() view_manager.CreateContainerView = Mock(return_value=container) si_mock.RetrieveContent.return_value.viewManager = view_manager assert client.has_portgroup("host", "p1") is True assert client.has_portgroup("host", "p187") is False
def test_basics(self): capture_controller = create_capture_controller() context = Mock() context.aborted = False context.config = capture_controller.config capture_controller.setup_capture(context) # XXX AVOID: Due to pytest capture mode # Theory4ActiveCaptureController.check_invariants(capture_controller) capture_controller.start_capture() sys.stdout.write("HELLO\n") sys.stderr.write("world\n") capture_controller.stop_capture() assert capture_controller.captured.output == "HELLO\nworld\n" # -- FINALLY: capture_controller.teardown_capture()
def test_remove_interfaces_from_vm_no_filter(self): # arrange device1 = Mock(spec=vim.vm.device.VirtualEthernetCard) device2 = Mock(spec=vim.vm.device.VirtualSoundCard) vm = Mock() vm.config = Mock() vm.config.hardware() vm.config.hardware.device = [device2, device1] virtual_switch_to_machine_connector = VirtualSwitchToMachineDisconnectCommand( Mock(), Mock(), 'anetwork') # act res = virtual_switch_to_machine_connector.remove_interfaces_from_vm_task( vm) # assert self.assertTrue(res)
def configure_participant(self): ctrl = Mock() ctrl.message = "start" ctrl.config = ConfigParser() ctrl.config.add_section("obs") ctrl.config.set("obs", "oscrc", "oscrc_file") self.participant.handle_lifecycle_control(ctrl) self.mut.subprocess = Mock() self.mut.subprocess.Popen.side_effect = self.mock_specify self.participant.obs.getFile.return_value = TEST_SPEC self.specify_out = TEST_SPEC self.wid = Workitem(WI_TEMPLATE) self.wid.fields.msg = None self.wid.fields.ev.actions = self.fake_actions self.wid.fields.ev.namespace = "test"
def test_nova_responder(mock_publish): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ''} responder = NovaResponder("msgid") # serialisable result result, exc_info = responder.send_response(container, True, None) assert result is True assert exc_info is None assert mock_publish.call_count == 2 data_call, marker_call = mock_publish.call_args_list (data_msg, ), _ = data_call (marker_msg, ), _ = marker_call assert data_msg == {'failure': None, 'result': True, 'ending': False} assert marker_msg == {'failure': None, 'result': None, 'ending': True}
def test_publish_custom_headers(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = empty_config ctx_data = {'language': 'en', 'customheader': 'customvalue'} service = Mock() worker_ctx = CustomWorkerContext(container, service, DummyProvider('method'), data=ctx_data) publisher = PublishProvider(queue=foobar_queue) publisher.bind("publish", container) producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.prepare() maybe_declare.assert_called_once_with(foobar_queue, connection) # test publish msg = "msg" headers = { 'nameko.language': 'en', 'nameko.customheader': 'customvalue', 'nameko.call_id_stack': ['srcservice.method.0'] } publisher.inject(worker_ctx) service.publish(msg, publish_kwarg="value") producer.publish.assert_called_once_with(msg, headers=headers, exchange=foobar_ex, retry=True, retry_policy=DEFAULT_RETRY_POLICY, publish_kwarg="value")
def setUp(self): self.store = StateGroupStore() hs = Mock(spec_set=[ "config", "get_datastore", "get_auth", "get_state_handler", "get_clock", "get_state_resolution_handler", ]) hs.config = default_config("tesths", True) hs.get_datastore.return_value = self.store hs.get_state_handler.return_value = None hs.get_clock.return_value = MockClock() hs.get_auth.return_value = Auth(hs) hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs) self.state = StateHandler(hs) self.event_id = 0
def test_publish_to_rabbit(rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.service_name = "service" container.config = rabbit_config container.spawn_managed_thread = eventlet.spawn ctx_data = {'language': 'en', 'customheader': 'customvalue'} service = Mock() worker_ctx = CustomWorkerContext(container, service, DummyProvider('method'), data=ctx_data) publisher = PublishProvider(exchange=foobar_ex, queue=foobar_queue) publisher.bind("publish", container) # test queue, exchange and binding created in rabbit publisher.prepare() publisher.start() exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) bindings = rabbit_manager.get_queue_bindings(vhost, foobar_queue.name) assert "foobar_ex" in [exchange['name'] for exchange in exchanges] assert "foobar_queue" in [queue['name'] for queue in queues] assert "foobar_ex" in [binding['source'] for binding in bindings] # test message published to queue publisher.inject(worker_ctx) service.publish("msg") messages = rabbit_manager.get_messages(vhost, foobar_queue.name) assert ['msg'] == [msg['payload'] for msg in messages] # test message headers assert messages[0]['properties']['headers'] == { 'nameko.language': 'en', 'nameko.customheader': 'customvalue', 'nameko.call_id_stack': ['service.method.0'], }
def test_on_consume_error_kills_consumer(): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 1 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) handler = MessageHandler() queue_consumer.register_provider(handler) with patch.object(queue_consumer, 'on_consume_ready') as on_consume_ready: on_consume_ready.side_effect = Exception('err') queue_consumer.start() with pytest.raises(Exception): queue_consumer._gt.wait()
def test_responder_cannot_repr_exc(mock_publish): message = Mock() message.properties = {'reply_to': ''} container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ''} responder = Responder(message) class CannotRepr(object): def __repr__(self): raise Exception('error') # un-repr-able exception worker_exc = Exception(CannotRepr()) # send_response should not throw responder.send_response(container, True, (Exception, worker_exc, "tb"))
def test_ServiceEncryptedStorage(empty_data_folder): specter_mock = Mock() specter_mock.config = {"uid": ""} specter_mock.user_manager = Mock() specter_mock.user_manager.users = [""] user1 = User.from_json( user_dict={ "id": "user1", "username": "******", "password": hash_password("somepassword"), "config": {}, "is_admin": False, "services": None, }, specter=specter_mock, ) user2 = User.from_json( user_dict={ "id": "user2", "username": "******", "password": hash_password("somepassword"), "config": {}, "is_admin": False, "services": None, }, specter=specter_mock, ) user1._generate_user_secret("muh") # Can set and get service storage fields service_storage = ServiceEncryptedStorage(empty_data_folder, user1) service_storage.set_service_data("a_service_id", {"somekey": "green"}) assert service_storage.get_service_data("a_service_id") == { "somekey": "green" } assert service_storage.get_service_data("another_service_id") == {} # We expect a call for a user that isn't logged in to fail with pytest.raises(ServiceEncryptedStorageError) as execinfo: ServiceEncryptedStorage(empty_data_folder, user2) assert "must be authenticated with password" in str(execinfo.value)
def test_publish_to_queue(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_name = "srcservice" container.config = empty_config ctx_data = {'language': 'en'} service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("publish"), data=ctx_data) publisher = Publisher(queue=foobar_queue).bind(container, "publish") producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.setup() maybe_declare.assert_called_once_with(foobar_queue, connection) # test publish msg = "msg" headers = { 'nameko.language': 'en', 'nameko.call_id_stack': ['srcservice.publish.0'], } service.publish = publisher.get_dependency(worker_ctx) service.publish(msg, publish_kwarg="value") producer.publish.assert_called_once_with(msg, headers=headers, exchange=foobar_ex, retry=True, retry_policy=DEFAULT_RETRY_POLICY, publish_kwarg="value")
def test_as_edge_source_target_mock(self): def _mock_edge(name, source, target): edges = Mock() edges.source.name = source edges.target.name = target edges.name = name return edges circuit = Mock() circuit.config = {} circuit.edges = { "edge1": _mock_edge('edge1', "default", "nodeother"), "edge2": _mock_edge('edge2', "nodeother", "default"), "edge3": _mock_edge('edge3', "default", "nodeother") } create_node_population(str(TEST_DATA_DIR / 'nodes.h5'), "default", circuit=circuit) assert circuit.nodes['default'].source_in_edges() == {"edge1", "edge3"} assert circuit.nodes['default'].target_in_edges() == {"edge2"}
def test_rpc_consumer(get_rpc_exchange, queue_consumer): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {} container.service_name = "exampleservice" container.service_cls = Mock(rpcmethod=lambda: None) exchange = Mock() get_rpc_exchange.return_value = exchange consumer = RpcConsumer().bind(container) entrypoint = Rpc().bind(container, "rpcmethod") entrypoint.rpc_consumer = consumer entrypoint.setup() consumer.setup() queue_consumer.setup() queue = consumer.queue assert queue.name == "rpc-exampleservice" assert queue.routing_key == "exampleservice.*" assert queue.exchange == exchange assert queue.durable queue_consumer.register_provider.assert_called_once_with(consumer) consumer.register_provider(entrypoint) assert consumer._providers == set([entrypoint]) routing_key = "exampleservice.rpcmethod" assert consumer.get_provider_for_method(routing_key) == entrypoint routing_key = "exampleservice.invalidmethod" with pytest.raises(MethodNotFound): consumer.get_provider_for_method(routing_key) consumer.unregister_provider(entrypoint) assert consumer._providers == set()
def test_event_dispatcher(empty_config): container = Mock(spec=ServiceContainer) container.service_name = "srcservice" container.config = empty_config service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("dispatch")) event_dispatcher = EventDispatcher() event_dispatcher.bind("dispatch", container) path = 'nameko.messaging.PublishProvider.prepare' with patch(path, autospec=True) as prepare: # test start method event_dispatcher.prepare() assert event_dispatcher.exchange.name == "srcservice.events" assert prepare.called evt = Mock(type="eventtype", data="msg") event_dispatcher.inject(worker_ctx) producer = Mock() with patch.object(event_dispatcher, 'get_producer', autospec=True) as get_producer: get_producer.return_value = as_context_manager(producer) # test dispatch service.dispatch(evt, retry_policy={'max_retries': 5}) headers = event_dispatcher.get_message_headers(worker_ctx) producer.publish.assert_called_once_with( evt.data, exchange=event_dispatcher.exchange, headers=headers, routing_key=evt.type, retry=True, retry_policy={'max_retries': 5})
def test_error_stops_consumer_thread(): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 3 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer() queue_consumer.bind("queue_consumer", container) handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object(Connection, 'drain_events', autospec=True) as drain_events: drain_events.side_effect = Exception('test') queue_consumer.start() with pytest.raises(Exception) as exc_info: queue_consumer._gt.wait() assert exc_info.value.args == ('test', )
def test_https(self, _process_ssl_settings): """ Test with https:// as the scheme. """ url = 'https://martin.com/test' importer = Mock() importer.config = { importer_constants.KEY_SSL_CA_CERT: 'CA Cert', importer_constants.KEY_SSL_CLIENT_CERT: 'Client Cert', importer_constants.KEY_SSL_CLIENT_KEY: 'Client Key'} importer.tls_ca_cert_path = '/path/to/ca.crt' importer.tls_client_cert_path = '/path/to/client.crt' importer.tls_client_key_path = '/path/to/client.key' downloader = Importer.get_downloader_for_db_importer(importer, url, '/working/dir') self.assertTrue(isinstance(downloader, threaded.HTTPThreadedDownloader)) self.assertEqual(downloader.config.ssl_ca_cert_path, '/path/to/ca.crt') self.assertEqual(downloader.config.ssl_client_cert_path, '/path/to/client.crt') self.assertEqual(downloader.config.ssl_client_key_path, '/path/to/client.key') self.assertEqual(downloader.config.working_dir, '/working/dir') _process_ssl_settings.assert_called_once_with()
def test_run(self, transform): repo_id = 'test-repo' units = [1, 2, 3] documents = [str(u) for u in units] context = Mock() transform.side_effect = documents context.server.repo_unit.search.return_value = Mock(response_body=units) keywords = {'repo-id': repo_id, 'kw-1': 'v-1'} context.config = { 'output': {'poll_frequency_in_seconds': 1} } # test command = SearchCommand(context) command.run(**keywords) # validation keywords.pop('repo-id') context.server.repo_unit.search.assert_called_once_with(repo_id, **keywords) context.prompt.render_title.assert_called_once_with(SearchCommand.TITLE) context.prompt.render_document_list.assert_called_once_with( documents, order=SearchCommand.ORDER)
def test_nova_responder_unserializale_result(mock_publish): container = Mock() container.config = {AMQP_URI_CONFIG_KEY: ''} responder = NovaResponder("msgid") # unserialisable result obj = object() result, exc_info = responder.send_response(container, obj, None) assert result is None assert exc_info == (TypeError, ANY, ANY) assert mock_publish.call_count == 2 data_call, _ = mock_publish.call_args_list (data_msg, ), _ = data_call assert data_msg == { 'failure': ('TypeError', "{} is not JSON serializable".format(obj)), 'result': None, 'ending': False }
def test_concurrency(): container = Mock() container.config = config container.service_name = "fooservice" entrypoint = DummyProvider() service_instance = Mock() def inject(worker_ctx): orm_session = OrmSession(DeclBase) orm_session.container = container return orm_session.acquire_injection(worker_ctx) # get injections concurrently pile = GreenPile() for _ in xrange(CONCURRENT_REQUESTS): worker_ctx = WorkerContext(container, service_instance, entrypoint) pile.spawn(inject, worker_ctx) results = set(pile) # injections should all be unique assert len(results) == CONCURRENT_REQUESTS