def __init__(self, vumi_helper=None, **kw): self._msg_helper = MessageHelper(**kw) self.transport_name = self._msg_helper.transport_name self._vumi_helper = vumi_helper self.mdb = None if self._vumi_helper is not None: self.mdb = self._vumi_helper.get_vumi_api().mdb
def setUp(self): self.workerhelper = WorkerHelper() self.addCleanup(self.workerhelper.cleanup) self.persistencehelper = PersistenceHelper() yield self.persistencehelper.setup() self.addCleanup(self.persistencehelper.cleanup) self.messagehelper = MessageHelper() self.addCleanup(self.messagehelper.cleanup)
def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) if riak_import_error is not None: import_skip(riak_import_error, 'riak') self.manager = self.persistence_helper.get_riak_manager() self.msg_helper = self.add_helper(MessageHelper())
class TestMessageStoreAPI(VumiTestCase): @inlineCallbacks def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) try: from vumi.components.message_store_api import ( MatchResource, MessageStoreAPIWorker) except ImportError, e: import_skip(e, 'riakasaurus', 'riakasaurus.riak') self.msg_helper = self.add_helper(MessageHelper()) self.worker_helper = self.add_helper(WorkerHelper()) self.match_resource = MatchResource self.base_path = '/api/v1/' self.worker = yield self.worker_helper.get_worker( MessageStoreAPIWorker, self.persistence_helper.mk_config({ 'web_path': self.base_path, 'web_port': 0, 'health_path': '/health/', })) self.store = self.worker.store self.addr = self.worker.webserver.getHost() self.url = 'http://%s:%s%s' % (self.addr.host, self.addr.port, self.base_path) self.tag = ("pool", "tag") self.batch_id = yield self.store.batch_start([self.tag])
class TestMessageStoreResource(VumiTestCase): @inlineCallbacks def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) try: from vumi.components.message_store_resource import ( MessageStoreResourceWorker) except ImportError, e: import_skip(e, 'riakasaurus', 'riakasaurus.riak') self.worker_helper = self.add_helper(WorkerHelper()) config = self.persistence_helper.mk_config({ 'twisted_endpoint': 'tcp:0', 'web_path': '/resource_path/', }) worker = yield self.worker_helper.get_worker( MessageStoreResourceWorker, config) yield worker.startService() port = yield worker.services[0]._waitingForPort addr = port.getHost() self.msg_helper = self.add_helper(MessageHelper()) self.url = 'http://%s:%s' % (addr.host, addr.port) self.store = worker.store self.addCleanup(self.stop_server, port)
def test_rebuild_cache(self): """ Rebuilding the info cache for a batch will clear all cached data and rebuild it from the given QueryMessageStore. """ msg_helper = self.add_helper(MessageHelper()) batch_info_cache = self.batch_manager.batch_info_cache # Fill the message store backend with the data we want in the rebuilt # cache. yield self.backend.add_inbound_message(msg_helper.make_inbound("in 1"), batch_ids=["mybatch"]) yield self.backend.add_outbound_message( msg_helper.make_outbound("out 1"), batch_ids=["mybatch"]) yield self.backend.add_outbound_message( msg_helper.make_outbound("out 2"), batch_ids=["mybatch"]) # Fill the cache with some nonsense that we want to throw out when # rebuilding. yield batch_info_cache.add_inbound_message_key("mybatch", "in1", 12345) yield batch_info_cache.add_inbound_message_key("mybatch", "in2", 12345) old_in = yield batch_info_cache.get_inbound_message_count("mybatch") old_out = yield batch_info_cache.get_outbound_message_count("mybatch") self.assertEqual((old_in, old_out), (2, 0)) # Rebuild the cache. qms = QueryMessageStore(self.manager, self.redis) yield self.batch_manager.rebuild_cache("mybatch", qms) new_in = yield batch_info_cache.get_inbound_message_count("mybatch") new_out = yield batch_info_cache.get_outbound_message_count("mybatch") self.assertEqual((new_in, new_out), (1, 2))
def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) self.worker_helper = self.add_helper(WorkerHelper()) self.msg_helper = self.add_helper(MessageHelper()) riak, redis = yield self.create_managers() self.operational_store = OperationalMessageStore(riak, redis) self.batch_manager = MessageStoreBatchManager(riak, redis)
def set_up_tests(self, manager): """ This should be called from .setUp(). """ self.manager = manager self.backend = MessageStoreRiakBackend(self.manager) self.msg_helper = self.add_helper(MessageHelper()) self.msg_seq_helper = self.add_helper( MessageSequenceHelper(self.backend, self.msg_helper))
def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) self.manager = self.persistence_helper.get_riak_manager() self.add_cleanup(self.manager.close_manager) self.redis = yield self.persistence_helper.get_redis_manager() self.store = OperationalMessageStore(self.manager, self.redis) self.backend = self.store.riak_backend self.bi_cache = self.store.batch_info_cache self.msg_helper = self.add_helper(MessageHelper())
def __init__(self, dispatcher_class, use_riak=False, **msg_helper_args): self.dispatcher_class = dispatcher_class self.worker_helper = WorkerHelper() self.persistence_helper = PersistenceHelper(use_riak=use_riak) self.msg_helper = MessageHelper(**msg_helper_args) self.dispatch_helper = MessageDispatchHelper(self.msg_helper, self.worker_helper) # Proxy methods from our helpers. generate_proxies(self, self.msg_helper) generate_proxies(self, self.worker_helper) generate_proxies(self, self.dispatch_helper)
class TestMessageStoreBase(VumiTestCase): @inlineCallbacks def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) try: from vumi.components.message_store import MessageStore except ImportError, e: import_skip(e, 'riak') self.redis = yield self.persistence_helper.get_redis_manager() self.manager = self.persistence_helper.get_riak_manager() self.store = MessageStore(self.manager, self.redis) self.msg_helper = self.add_helper(MessageHelper())
def setUp(self): self.config = { 'transport_names': ['transport1'], 'exposed_names': ['app1', 'app2'], 'toaddr_mappings': { 'app1': 'to:.*:1', 'app2': 'to:app2', }, } self.dispatcher = DummyDispatcher(self.config) self.router = ToAddrRouter(self.dispatcher, self.config) yield self.router.setup_routing() self.msg_helper = self.add_helper(MessageHelper())
def __init__(self, application_class, use_riak=False, **msg_helper_args): self.application_class = application_class self.persistence_helper = PersistenceHelper(use_riak=use_riak) self.msg_helper = MessageHelper(**msg_helper_args) self.transport_name = self.msg_helper.transport_name self.worker_helper = WorkerHelper(self.msg_helper.transport_name) self.dispatch_helper = MessageDispatchHelper(self.msg_helper, self.worker_helper) # Proxy methods from our helpers. generate_proxies(self, self.msg_helper) generate_proxies(self, self.worker_helper) generate_proxies(self, self.dispatch_helper) generate_proxies(self, self.persistence_helper)
def setUp(self): self.clock = Clock() MessengerTransport.clock = self.clock self.remote_server = MockHttpServer(lambda _: 'OK') yield self.remote_server.start() self.addCleanup(self.remote_server.stop) self.tx_helper = self.add_helper( HttpRpcTransportHelper(PatchedMessengerTransport)) self.msg_helper = self.add_helper(MessageHelper()) connection_pool = HTTPConnectionPool(reactor, persistent=False) treq._utils.set_global_pool(connection_pool)
def test_vumimessage_field_excludes_cache(self): msg_helper = self.add_helper(MessageHelper()) msg_model = self.manager.proxy(self.VumiMessageModel) cache_attr = TransportUserMessage._CACHE_ATTRIBUTE msg = msg_helper.make_inbound("foo", extra="bar") msg.cache["cache"] = "me" self.assertEqual(msg[cache_attr], {"cache": "me"}) m1 = msg_model("foo", msg=msg) self.assertTrue(cache_attr not in m1.msg) yield m1.save() m2 = yield msg_model.load("foo") self.assertTrue(cache_attr not in m2.msg) self.assertEqual(m2.msg, m1.msg)
def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True, is_sync=False)) self.msg_helper = self.add_helper(MessageHelper()) # Since we're never loading the actual objects, we can't detect # tombstones. Therefore, each test needs its own bucket prefix. config = self.persistence_helper.mk_config({})["riak_manager"].copy() config["bucket_prefix"] = "%s-%s" % ( uuid4().hex, config["bucket_prefix"]) self.riak_manager = self.persistence_helper.get_riak_manager(config) self.redis_manager = yield self.persistence_helper.get_redis_manager() self.mdb = MessageStore(self.riak_manager, self.redis_manager) self.expected_bucket_prefix = "bucket" self.default_args = [ "-b", self.expected_bucket_prefix, ]
def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True, is_sync=False)) self.msg_helper = self.add_helper(MessageHelper()) # Since we're never loading the actual objects, we can't detect # tombstones. Therefore, each test needs its own bucket prefix. self.expected_bucket_prefix = "bucket-%s" % (uuid4().hex,) self.riak_manager = self.persistence_helper.get_riak_manager({ "bucket_prefix": self.expected_bucket_prefix, }) self.add_cleanup(self.riak_manager.close_manager) self.redis_manager = yield self.persistence_helper.get_redis_manager() self.mdb = MessageStore(self.riak_manager, self.redis_manager) self.default_args = [ "-b", self.expected_bucket_prefix, ]
def test_vumimessage_field(self): msg_helper = self.add_helper(MessageHelper()) msg_model = self.manager.proxy(self.VumiMessageModel) msg = msg_helper.make_inbound("foo", extra="bar") m1 = msg_model("foo", msg=msg) yield m1.save() m2 = yield msg_model.load("foo") self.assertEqual(m1.msg, m2.msg) self.assertEqual(m2.msg, msg) self.assertRaises(ValidationError, setattr, m1, "msg", "foo") # test extra keys are removed msg2 = msg_helper.make_inbound("foo") m1.msg = msg2 self.assertTrue("extra" not in m1.msg)
def __init__(self, transport_class, use_riak=False, **msg_helper_args): self.transport_class = transport_class self.persistence_helper = PersistenceHelper(use_riak=use_riak) self.msg_helper = MessageHelper(**msg_helper_args) self.transport_name = self.msg_helper.transport_name self.worker_helper = WorkerHelper(connector_name=self.transport_name, status_connector_name="%s.status" % (self.transport_name, )) self.dispatch_helper = MessageDispatchHelper(self.msg_helper, self.worker_helper) # Proxy methods from our helpers. generate_proxies(self, self.msg_helper) generate_proxies(self, self.worker_helper) generate_proxies(self, self.dispatch_helper) generate_proxies(self, self.persistence_helper)
def setUp(self): config = { "transport_names": [ "transport_1", "transport_2", ], "exposed_names": ["round_robin"], "router_class": ("vumi.dispatchers.load_balancer." "LoadBalancingRouter"), } if self.reply_affinity is not None: config['reply_affinity'] = self.reply_affinity if self.rewrite_transport_names is not None: config['rewrite_transport_names'] = self.rewrite_transport_names self.dispatcher = DummyDispatcher(config) self.router = LoadBalancingRouter(self.dispatcher, config) self.add_cleanup(self.router.teardown_routing) yield self.router.setup_routing() self.msg_helper = self.add_helper(MessageHelper())
def setUp(self): config = { "transport_names": [ "transport_1", "transport_2", "transport_3", ], "exposed_names": ["muxed"], "router_class": "vumi.dispatchers.base.FromAddrMultiplexRouter", "fromaddr_mappings": { "thing1@muxme": "transport_1", "thing2@muxme": "transport_2", "thing3@muxme": "transport_3", }, } self.dispatcher = DummyDispatcher(config) self.router = FromAddrMultiplexRouter(self.dispatcher, config) self.add_cleanup(self.router.teardown_routing) yield self.router.setup_routing() self.msg_helper = self.add_helper(MessageHelper())
class MessageStoreCacheTestCase(VumiTestCase): start_batch = True @inlineCallbacks def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) try: from vumi.components.message_store import MessageStore except ImportError, e: import_skip(e, 'riak') self.redis = yield self.persistence_helper.get_redis_manager() self.manager = yield self.persistence_helper.get_riak_manager() self.store = yield MessageStore(self.manager, self.redis) self.cache = self.store.cache self.batch_id = 'a-batch-id' if self.start_batch: yield self.cache.batch_start(self.batch_id) self.msg_helper = self.add_helper(MessageHelper())
def setUp(self): self.msg_helper = self.add_helper(MessageHelper()) self.request = DummyRequest(['']) self.formatter = CsvFormatter()
class TestBaseRouterWorker(VumiTestCase, JunebugTestBase): DEFAULT_ROUTER_WORKER_CONFIG = { 'inbound_ttl': 60, 'outbound_ttl': 60 * 60 * 24 * 2, 'metric_window': 1.0, 'destinations': [], } @inlineCallbacks def setUp(self): self.workerhelper = WorkerHelper() self.addCleanup(self.workerhelper.cleanup) self.persistencehelper = PersistenceHelper() yield self.persistencehelper.setup() self.addCleanup(self.persistencehelper.cleanup) self.messagehelper = MessageHelper() self.addCleanup(self.messagehelper.cleanup) @inlineCallbacks def get_router_worker(self, config=None): if config is None: config = {} config = conjoin( self.persistencehelper.mk_config( self.DEFAULT_ROUTER_WORKER_CONFIG), config) TestRouter._create_worker = self.workerhelper.get_worker worker = yield self.workerhelper.get_worker(TestRouter, config) returnValue(worker) @inlineCallbacks def test_start_router_worker_no_destinations(self): """ If there are no destinations specified, no workers should be started. The setup_router function should be called on the implementation. """ worker = yield self.get_router_worker() self.assertEqual(len(worker.namedServices), 0) self.assertTrue(worker.setup_called) @inlineCallbacks def test_start_router_with_destinations(self): """ If there are destinations specified, then a worker should be started for every destination. """ worker = yield self.get_router_worker({ 'destinations': [ { 'id': 'test-destination1', }, { 'id': 'test-destination2', }, ], }) self.assertTrue(worker.setup_called) self.assertEqual(sorted(worker.namedServices.keys()), [ 'test-destination1', 'test-destination2']) for connector in worker.connectors.values(): self.assertFalse(connector.paused) @inlineCallbacks def test_teardown_router(self): """ Tearing down a router should pause all connectors, and call the teardown method of the router implementation """ worker = yield self.get_router_worker({ 'destinations': [{'id': 'test-destination1'}], }) self.assertFalse(worker.teardown_called) for connector in worker.connectors.values(): self.assertFalse(connector.paused) yield worker.teardown_worker() self.assertTrue(worker.teardown_called) for connector in worker.connectors.values(): self.assertTrue(connector.paused) @inlineCallbacks def test_consume_channel(self): """ consume_channel should set up the appropriate connector, as well as attach the specified callbacks for messages and events. """ worker = yield self.get_router_worker({}) messages = [] events = [] def message_callback(channelid, message): assert channelid == 'testchannel' messages.append(message) def event_callback(channelid, event): assert channelid == 'testchannel' events.append(event) yield worker.consume_channel( 'testchannel', message_callback, event_callback) # Because this is only called in setup, and we're creating connectors # after setup, we need to unpause them worker.unpause_connectors() self.assertEqual(messages, []) inbound = self.messagehelper.make_inbound('test message') yield self.workerhelper.dispatch_inbound(inbound, 'testchannel') self.assertEqual(messages, [inbound]) self.assertEqual(events, []) event = self.messagehelper.make_ack() yield self.workerhelper.dispatch_event(event, 'testchannel') self.assertEqual(events, [event]) @inlineCallbacks def test_send_inbound_to_destination(self): """ send_inbound_to_destination should send the provided inbound message to the specified destination worker """ worker = yield self.get_router_worker({ 'destinations': [{ 'id': 'test-destination', 'amqp_queue': 'testqueue', }], }) inbound = self.messagehelper.make_inbound('test_message') yield worker.send_inbound_to_destination('test-destination', inbound) [message] = yield self.workerhelper.wait_for_dispatched_inbound( connector_name='testqueue') self.assertEqual(message, inbound) @inlineCallbacks def test_send_event_to_destination(self): """ send_event_to_destination should send the provided event message to the specified destination worker """ worker = yield self.get_router_worker({ 'destinations': [{ 'id': 'test-destination', 'amqp_queue': 'testqueue', }], }) ack = self.messagehelper.make_ack() yield worker.send_event_to_destination('test-destination', ack) [event] = yield self.workerhelper.wait_for_dispatched_events( connector_name='testqueue') self.assertEqual(event, ack) @inlineCallbacks def test_consume_destination(self): """ If a callback is attached to a destination, then that callback should be called when an outbound is sent from a destination """ worker = yield self.get_router_worker({ 'destinations': [{ 'id': 'test-destination', 'amqp_queue': 'testqueue', }], }) messages = [] def message_callback(destinationid, message): assert destinationid == 'test-destination' messages.append(message) yield worker.consume_destination('test-destination', message_callback) # Because this is only called in setup, and we're creating connectors # after setup, we need to unpause them worker.unpause_connectors() self.assertEqual(messages, []) msg = self.messagehelper.make_outbound('testmessage') yield self.workerhelper.dispatch_outbound(msg, 'test-destination') self.assertEqual(messages, [msg]) @inlineCallbacks def test_send_outbound_to_channel(self): """ send_outbound_to_channel should send the provided outbound message to the specified channel """ worker = yield self.get_router_worker({}) yield worker.consume_channel('testchannel', lambda m: m, lambda e: e) outbound = self.messagehelper.make_outbound('test message') yield worker.send_outbound_to_channel('testchannel', outbound) [message] = yield self.workerhelper.wait_for_dispatched_outbound( connector_name='testchannel') self.assertEqual(message, outbound)
class GoMessageHelper(object): implements(IHelper) def __init__(self, vumi_helper=None, **kw): self._msg_helper = MessageHelper(**kw) self.transport_name = self._msg_helper.transport_name self._vumi_helper = vumi_helper self.mdb = None if self._vumi_helper is not None: self.mdb = self._vumi_helper.get_vumi_api().mdb def setup(self): pass def cleanup(self): return self._msg_helper.cleanup() @proxyable def add_router_metadata(self, msg, router): msg.payload.setdefault('helper_metadata', {}) md = MessageMetadataHelper(None, msg) md.set_router_info(router.router_type, router.key) md.set_user_account(router.user_account.key) @proxyable def add_conversation_metadata(self, msg, conv): msg.payload.setdefault('helper_metadata', {}) md = MessageMetadataHelper(None, msg) md.set_conversation_info(conv.conversation_type, conv.key) md.set_user_account(conv.user_account.key) @proxyable def _add_go_metadata(self, msg, conv, router): if conv is not None: self.add_conversation_metadata(msg, conv) if router is not None: self.add_router_metadata(msg, router) @proxyable def _add_go_routing_metadata(self, msg, hops, outbound_hops): rmeta = RoutingMetadata(msg) if hops is not None: rmeta.set_hops(hops) if outbound_hops is not None: rmeta.set_outbound_hops(outbound_hops) @proxyable def make_inbound(self, content, conv=None, router=None, hops=None, outbound_hops=None, **kw): msg = self._msg_helper.make_inbound(content, **kw) self._add_go_metadata(msg, conv, router) self._add_go_routing_metadata(msg, hops, outbound_hops) return msg @proxyable def make_outbound(self, content, conv=None, router=None, hops=None, outbound_hops=None, **kw): msg = self._msg_helper.make_outbound(content, **kw) self._add_go_metadata(msg, conv, router) self._add_go_routing_metadata(msg, hops, outbound_hops) return msg @proxyable def make_ack(self, msg=None, conv=None, router=None, hops=None, outbound_hops=None, **kw): ack = self._msg_helper.make_ack(msg, **kw) self._add_go_metadata(ack, conv, router) self._add_go_routing_metadata(ack, hops, outbound_hops) return ack @proxyable def make_nack(self, msg=None, conv=None, router=None, hops=None, outbound_hops=None, **kw): nack = self._msg_helper.make_nack(msg, **kw) self._add_go_metadata(nack, conv, router) self._add_go_routing_metadata(nack, hops, outbound_hops) return nack @proxyable def make_delivery_report(self, msg=None, conv=None, router=None, hops=None, outbound_hops=None, **kw): dr = self._msg_helper.make_delivery_report(msg, **kw) self._add_go_metadata(dr, conv, router) self._add_go_routing_metadata(dr, hops, outbound_hops) return dr @proxyable def make_reply(self, msg, content, **kw): return self._msg_helper.make_reply(msg, content, **kw) @proxyable def store_inbound(self, conv, msg): if self.mdb is None: raise ValueError("No message store provided.") return self.mdb.add_inbound_message(msg, batch_id=conv.batch.key) @proxyable def store_outbound(self, conv, msg): if self.mdb is None: raise ValueError("No message store provided.") return self.mdb.add_outbound_message(msg, batch_id=conv.batch.key) @proxyable def store_event(self, event): if self.mdb is None: raise ValueError("No message store provided.") return self.mdb.add_event(event) @proxyable def make_stored_inbound(self, conv, content, **kw): msg = self.make_inbound(content, conv=conv, **kw) return maybe_async_return(msg, self.store_inbound(conv, msg)) @proxyable def make_stored_outbound(self, conv, content, **kw): msg = self.make_outbound(content, conv=conv, **kw) return maybe_async_return(msg, self.store_outbound(conv, msg)) @proxyable def make_stored_ack(self, conv, msg, **kw): event = self.make_ack(msg, conv=conv, **kw) return maybe_async_return(event, self.store_event(event)) @proxyable def make_stored_nack(self, conv, msg, **kw): event = self.make_nack(msg, conv=conv, **kw) return maybe_async_return(event, self.store_event(event)) @proxyable def make_stored_delivery_report(self, conv, msg, **kw): event = self.make_delivery_report(msg, conv=conv, **kw) return maybe_async_return(event, self.store_event(event)) @proxyable def add_inbound_to_conv(self, conv, count, start_date=None, time_multiplier=10): now = start_date or datetime.now().date() messages = [] for i in range(count): timestamp = now - timedelta(hours=i * time_multiplier) messages.append(self.make_stored_inbound( conv, "inbound %s" % (i,), from_addr='from-%s' % (i,), timestamp=timestamp)) # We can't use `maybe_async_return` here because we need gatherResults. if isinstance(messages[0], Deferred): return gatherResults(messages) else: return messages @proxyable def add_outbound_to_conv(self, conv, count, start_date=None, time_multiplier=10): now = start_date or datetime.now().date() messages = [] for i in range(count): timestamp = now - timedelta(hours=i * time_multiplier) messages.append(self.make_stored_outbound( conv, "outbound %s" % (i,), to_addr='to-%s' % (i,), timestamp=timestamp)) # We can't use `maybe_async_return` here because we need gatherResults. if isinstance(messages[0], Deferred): return gatherResults(messages) else: return messages @proxyable def add_replies_to_conv(self, conv, msgs): messages = [] ds = [] for msg in msgs: timestamp = msg['timestamp'] + timedelta(seconds=1) reply = self.make_reply(msg, "reply", timestamp=timestamp) messages.append(reply) ds.append(self.store_outbound(conv, reply)) # We can't use `maybe_async_return` here because we need gatherResults. if isinstance(ds[0], Deferred): return gatherResults(ds).addCallback(lambda r: messages) else: return messages
class TestRouter(JunebugTestBase): DEFAULT_ROUTER_WORKER_CONFIG = { 'inbound_ttl': 60, 'outbound_ttl': 60 * 60 * 24 * 2, 'metric_window': 1.0, 'destinations': [], } @inlineCallbacks def setUp(self): yield self.start_server() self.workerhelper = WorkerHelper() self.addCleanup(self.workerhelper.cleanup) self.persistencehelper = PersistenceHelper() yield self.persistencehelper.setup() self.addCleanup(self.persistencehelper.cleanup) self.messagehelper = MessageHelper() self.addCleanup(self.messagehelper.cleanup) @inlineCallbacks def get_router_worker(self, config=None): if config is None: config = {} config = conjoin( self.persistencehelper.mk_config( self.DEFAULT_ROUTER_WORKER_CONFIG), config) FromAddressRouter._create_worker = self.workerhelper.get_worker worker = yield self.workerhelper.get_worker(FromAddressRouter, config) returnValue(worker) @inlineCallbacks def test_validate_router_config_invalid_channel_uuid(self): """ If the provided channel UUID is not a valid UUID a config error should be raised """ with self.assertRaises(InvalidRouterConfig) as e: yield FromAddressRouter.validate_router_config( self.api, {'channel': "bad-uuid"}) self.assertEqual(e.exception.message, "Field 'channel' is not a valid UUID") @inlineCallbacks def test_validate_router_config_missing_channel(self): """ If the provided channel UUID is not for an existing channel, a config error should be raised """ channel_id = str(uuid.uuid4()) with self.assertRaises(InvalidRouterConfig) as e: yield FromAddressRouter.validate_router_config( self.api, {'channel': channel_id}) self.assertEqual(e.exception.message, "Channel {} does not exist".format(channel_id)) @inlineCallbacks def test_validate_router_config_existing_destination(self): """ If the specified channel already has a destination specified, then a config error should be raised """ channel = yield self.create_channel(self.api.service, self.redis) with self.assertRaises(InvalidRouterConfig) as e: yield FromAddressRouter.validate_router_config( self.api, {'channel': channel.id}) self.assertEqual( e.exception.message, "Channel {} already has a destination specified".format( channel.id)) @inlineCallbacks def test_validate_router_config_existing_router(self): """ If an existing router is already listening to the specified channel, then a config error should be raised """ channel = yield self.create_channel(self.api.service, self.redis, properties={ 'type': 'telnet', 'config': { 'twisted_endpoint': 'tcp:0', }, }) config = self.create_router_config(config={ 'test': 'pass', 'channel': channel.id }) router = Router(self.api, config) yield router.save() router.start(self.api.service) with self.assertRaises(InvalidRouterConfig) as e: yield FromAddressRouter.validate_router_config( self.api, {'channel': channel.id}) self.assertEqual( e.exception.message, "Router {} is already routing channel {}".format( router.id, channel.id)) @inlineCallbacks def test_validate_router_destination_config_invalid_regex(self): """ If invalid regex is passed into the regex field, a config error should be raised """ with self.assertRaises(InvalidRouterDestinationConfig) as e: yield FromAddressRouter.validate_destination_config( self.api, {'regular_expression': "("}) self.assertEqual( e.exception.message, "Field 'regular_expression' is not a valid regular expression: " "unbalanced parenthesis") @inlineCallbacks def test_validate_router_destination_config_missing_field(self): """ regular_expression should be a required field """ with self.assertRaises(InvalidRouterDestinationConfig) as e: yield FromAddressRouter.validate_destination_config(self.api, {}) self.assertEqual(e.exception.message, "Missing required config field 'regular_expression'") @inlineCallbacks def test_inbound_message_routing(self): """ Inbound messages should be routed to the correct destination worker(s) """ yield self.get_router_worker({ 'destinations': [{ 'id': "test-destination1", 'amqp_queue': "testqueue1", 'config': { 'regular_expression': '^1.*$' }, }, { 'id': "test-destination2", 'amqp_queue': "testqueue2", 'config': { 'regular_expression': '^2.*$' }, }, { 'id': "test-destination3", 'amqp_queue': "testqueue3", 'config': { 'regular_expression': '^2.*$' }, }], 'channel': '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', }) inbound = self.messagehelper.make_inbound('test message', to_addr='1234') yield self.workerhelper.dispatch_inbound( inbound, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [message] = yield self.workerhelper.wait_for_dispatched_inbound( connector_name='testqueue1') self.assertEqual(inbound, message) inbound = self.messagehelper.make_inbound('test message', to_addr='2234') yield self.workerhelper.dispatch_inbound( inbound, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [message] = yield self.workerhelper.wait_for_dispatched_inbound( connector_name='testqueue2') self.assertEqual(inbound, message) [message] = yield self.workerhelper.wait_for_dispatched_inbound( connector_name='testqueue3') self.assertEqual(inbound, message) @inlineCallbacks def test_inbound_message_routing_no_to_addr(self): """ If an inbound message doesn't have a to address, then an error should be logged """ yield self.get_router_worker({ 'channel': '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', }) logs = [] log.addObserver(logs.append) inbound = self.messagehelper.make_inbound('test message', to_addr=None) yield self.workerhelper.dispatch_inbound( inbound, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [error_log] = logs self.assertIn("Message has no to address, cannot route message: ", error_log['log_text']) @inlineCallbacks def test_inbound_event_routing(self): """ Inbound events should be routed to the correct destination worker(s) """ yield self.get_router_worker({ 'destinations': [{ 'id': "test-destination1", 'amqp_queue': "testqueue1", 'config': { 'regular_expression': '^1.*$' }, }, { 'id': "test-destination2", 'amqp_queue': "testqueue2", 'config': { 'regular_expression': '^2.*$' }, }, { 'id': "test-destination3", 'amqp_queue': "testqueue3", 'config': { 'regular_expression': '^2.*$' }, }], 'channel': '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', }) outbound = self.messagehelper.make_outbound("test message", from_addr="1234") yield self.workerhelper.dispatch_outbound(outbound, 'testqueue1') ack = self.messagehelper.make_ack(outbound) yield self.workerhelper.dispatch_event( ack, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [event] = yield self.workerhelper.wait_for_dispatched_events( connector_name='testqueue1') self.assertEqual(ack, event) outbound = self.messagehelper.make_outbound("test message", from_addr="2234") yield self.workerhelper.dispatch_outbound(outbound, 'testqueue2') ack = self.messagehelper.make_ack(outbound) yield self.workerhelper.dispatch_event( ack, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [event] = yield self.workerhelper.wait_for_dispatched_events( connector_name='testqueue2') self.assertEqual(ack, event) [event] = yield self.workerhelper.wait_for_dispatched_events( connector_name='testqueue3') self.assertEqual(ack, event) @inlineCallbacks def test_inbound_event_routing_no_inbound_message(self): """ If no message can be found in the message store for the event, then an error message should be logged """ yield self.get_router_worker({ 'destinations': [{ 'id': "test-destination1", 'amqp_queue': "testqueue1", 'config': { 'regular_expression': '^1.*$' }, }], 'channel': '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', }) logs = [] log.addObserver(logs.append) ack = self.messagehelper.make_ack() yield self.workerhelper.dispatch_event( ack, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [error_log] = logs self.assertIn("Cannot find message", error_log['log_text']) self.assertIn("for event, not routing event: ", error_log['log_text']) @inlineCallbacks def test_inbound_event_routing_no_from_address(self): """ If the message for an event doesn't have a from address, then an error message should be logged """ yield self.get_router_worker({ 'destinations': [{ 'id': "test-destination1", 'amqp_queue': "testqueue1", 'config': { 'regular_expression': '^1.*$' }, }], 'channel': '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', }) logs = [] log.addObserver(logs.append) outbound = self.messagehelper.make_outbound("test message", from_addr=None) yield self.workerhelper.dispatch_outbound(outbound, 'testqueue1') ack = self.messagehelper.make_ack(outbound) yield self.workerhelper.dispatch_event( ack, '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') [error_log] = logs self.assertIn("Message has no from address, cannot route event: ", error_log['log_text']) @inlineCallbacks def test_outbound_message_routing(self): """ Outbound messages should be routed to the configured channel, no matter which destination they came from. They should also be stored so that events can be routed correctly. """ worker = yield self.get_router_worker({ 'destinations': [{ 'id': "test-destination1", 'amqp_queue': "testqueue1", 'config': { 'regular_expression': '^1.*$' }, }, { 'id': "test-destination2", 'amqp_queue': "testqueue2", 'config': { 'regular_expression': '^2.*$' }, }], 'channel': '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', }) outbound = self.messagehelper.make_outbound('test message') yield self.workerhelper.dispatch_outbound(outbound, 'testqueue1') [message] = yield self.workerhelper.wait_for_dispatched_outbound( connector_name='41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') self.assertEqual(outbound, message) stored_message = yield worker.outbounds.load_message( '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', outbound['message_id']) self.assertEqual(api_from_message(outbound), stored_message) yield self.workerhelper.clear_dispatched_outbound( connector_name='41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') outbound = self.messagehelper.make_outbound('test message') yield self.workerhelper.dispatch_outbound(outbound, 'testqueue2') [message] = yield self.workerhelper.wait_for_dispatched_outbound( connector_name='41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14') self.assertEqual(outbound, message) stored_message = yield worker.outbounds.load_message( '41e58f4a-2acc-442f-b3e5-3cf2b2f1cf14', outbound['message_id']) self.assertEqual(api_from_message(outbound), stored_message)
def setUp(self): self.msg_helper = self.add_helper(MessageHelper())
def setUp(self): self.msg_helper = self.add_helper(MessageHelper()) self.worker_helper = self.add_helper(WorkerHelper())
def setUp(self): self.persistence_helper = self.add_helper( PersistenceHelper(use_riak=True)) self.worker_helper = self.add_helper(WorkerHelper()) self.msg_helper = self.add_helper(MessageHelper())
def setUp(self): self.vumi_helper = yield self.add_helper(VumiApiHelper()) self.user_helper = yield self.vumi_helper.make_user(u'user') self.msg_helper = self.add_helper(MessageHelper())
def setUp(self): self.msg_helper = self.add_helper(MessageHelper()) self.worker_helper = self.add_helper(WorkerHelper()) self.worker = yield self.worker_helper.get_worker( DummyWorker, {}, False)
def setUp(self): self.r_server = FakeRedis() self.scheduler = Scheduler(self.r_server, self._scheduler_callback) self.add_cleanup(self.stop_scheduler) self._delivery_history = [] self.msg_helper = self.add_helper(MessageHelper())