def test_get_transport_sad(self): self.config(rpc_backend=self.rpc_backend, transport_url=self.transport_url) if self.rpc_backend: driver.DriverManager = mock.Mock() invoke_args = [self.conf, oslo_messaging.TransportURL.parse(self.conf, self.url)] invoke_kwds = dict(default_exchange='openstack', allowed_remote_exmods=[]) driver.DriverManager.side_effect = RuntimeError() try: oslo_messaging.get_transport(self.conf, url=self.url) self.assertFalse(True) driver.DriverManager.\ assert_called_once_with('oslo.messaging.drivers', self.rpc_backend, invoke_on_load=True, invoke_args=invoke_args, invoke_kwds=invoke_kwds) except Exception as ex: ex_cls = self.ex.pop('cls') ex_msg_contains = self.ex.pop('msg_contains') self.assertIsInstance(ex, oslo_messaging.MessagingException) self.assertIsInstance(ex, ex_cls) self.assertIn(ex_msg_contains, six.text_type(ex)) for k, v in self.ex.items(): self.assertTrue(hasattr(ex, k)) self.assertEqual(v, str(getattr(ex, k)))
def setUp(self): super(EngineTestCase, self).setUp() # Get transport here to let oslo.messaging setup default config # before changing the rpc_backend to the fake driver; otherwise, # oslo.messaging will throw exception. messaging.get_transport(cfg.CONF) # Set the transport to 'fake' for Engine tests. cfg.CONF.set_default('rpc_backend', 'fake') # Drop all RPC objects (transport, clients). rpc.cleanup() transport = rpc.get_transport() self.engine_client = rpc.get_engine_client() self.executor_client = rpc.get_executor_client() self.engine = def_eng.DefaultEngine(self.engine_client) self.executor = def_exec.DefaultExecutor(self.engine_client) LOG.info("Starting engine and executor threads...") self.threads = [ eventlet.spawn(launch_engine_server, transport, self.engine), eventlet.spawn(launch_executor_server, transport, self.executor), ] self.addOnException(self.print_executions) # Start scheduler. scheduler_thread_group = scheduler.setup() self.addCleanup(self.kill_threads) self.addCleanup(scheduler_thread_group.stop)
def test_multiple_servers(self): url1 = "fake:///" + (self.exchange1 or "") url2 = "fake:///" + (self.exchange2 or "") transport1 = oslo_messaging.get_transport(self.conf, url=url1) if url1 != url2: transport2 = oslo_messaging.get_transport(self.conf, url=url1) else: transport2 = transport1 class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) def alive(self, ctxt): return "alive" if self.multi_endpoints: endpoint1, endpoint2 = TestEndpoint(), TestEndpoint() else: endpoint1 = endpoint2 = TestEndpoint() thread1 = self._setup_server(transport1, endpoint1, topic=self.topic1, server=self.server1) thread2 = self._setup_server(transport2, endpoint2, topic=self.topic2, server=self.server2) client1 = self._setup_client(transport1, topic=self.topic1) client2 = self._setup_client(transport2, topic=self.topic2) client1 = client1.prepare(server=self.server1) client2 = client2.prepare(server=self.server2) if self.fanout1: client1.call({}, "alive") client1 = client1.prepare(fanout=True) if self.fanout2: client2.call({}, "alive") client2 = client2.prepare(fanout=True) (client1.call if self.call1 else client1.cast)({}, "ping", arg="1") (client2.call if self.call2 else client2.cast)({}, "ping", arg="2") self.assertTrue(thread1.isAlive()) self._stop_server(client1.prepare(fanout=None), thread1, topic=self.topic1) self.assertTrue(thread2.isAlive()) self._stop_server(client2.prepare(fanout=None), thread2, topic=self.topic2) def check(pings, expect): self.assertEqual(len(expect), len(pings)) for a in expect: self.assertIn(a, pings) if self.expect_either: check(endpoint1.pings + endpoint2.pings, self.expect_either) else: check(endpoint1.pings, self.expect1) check(endpoint2.pings, self.expect2)
def set_transport_options(check_backend=True): # We can be sure that all needed transport options are registered # only if we at least once called method get_transport(). Because # this is the method that registers them. messaging.get_transport(CONF) backend = messaging.TransportURL.parse(CONF, CONF.transport_url).transport if check_backend and backend not in ['rabbit', 'kombu']: raise exc.MistralException("Unsupported backend: %s" % backend)
def main(argv=None): opts = setup_options(argv) core.setup_logging(level=logging.WARN if opts.quiet else logging.DEBUG) if core.forkme_and_wait(opts.processes): # I'm the father and I'm done sys.exit(0) transport_url = core.rabbit_connection_url(driver='rabbit') transport = messaging.get_transport(cfg.CONF, transport_url) t = TestClient(transport) ctxt = {'a': 1} i = 0 errors = 0 try: while opts.num_messages == 0 or i < opts.num_messages: if opts.no_uuid: arg = opts.message else: arg = opts.message + str(uuid.uuid4()) LOG.debug("Requesting echo(%s)" % arg) try: response = t.echo(ctxt, arg) LOG.info("Got %r" % (response,)) assert arg == response, "%s != %s" % (arg, response) i += 1 except messaging.exceptions.MessagingTimeout as ex: LOG.warn('Received MessagingTimeout exception: %s' % str(ex)) errors += 1 if opts.publish_interval > 0: time.sleep(opts.publish_interval) if not opts.reuse_transport: renew = True if opts.renew_transport > 0 and i % opts.renew_transport != 0: renew = False if renew: if opts.transport_cleanup: # removes the reply_* queue associated with this # transport t._client.transport.cleanup() transport = messaging.get_transport(cfg.CONF, transport_url) t = TestClient(transport) except KeyboardInterrupt: # TODO: clean connections and asdf sys.exit(0)
def init_audit(): global target global notifier audit_conf = jsonloader.config_for_audit() if audit_conf is None: return target = audit_conf.get('target', 'log') cfg = oslo_config.cfg.ConfigOpts() if target == 'messaging': transport = oslo_messaging.get_transport(cfg, url=audit_conf['url']) else: transport = oslo_messaging.get_transport(cfg) notifier = oslo_messaging.Notifier(transport, 'anchor', driver=target)
def test_set_default_control_exchange(self): oslo_messaging.set_transport_defaults(control_exchange='foo') self.mox.StubOutWithMock(driver, 'DriverManager') invoke_kwds = mox.ContainsKeyValue('default_exchange', 'foo') driver.DriverManager(mox.IgnoreArg(), mox.IgnoreArg(), invoke_on_load=mox.IgnoreArg(), invoke_args=mox.IgnoreArg(), invoke_kwds=invoke_kwds).\ AndReturn(_FakeManager(_FakeDriver(self.conf))) self.mox.ReplayAll() oslo_messaging.get_transport(self.conf)
def test_transport_url(self, fake_reset, fake_ensure): transport = oslo_messaging.get_transport(self.conf, self.url) self.addCleanup(transport.cleanup) driver = transport._driver urls = driver._get_connection()._url.split(";") self.assertEqual(sorted(self.expected), sorted(urls))
def main(): parser = argparse.ArgumentParser() parser.add_argument('sendto', help='Vitrage message bus path') parser.add_argument('topic', help='zabbix topic') parser.add_argument('body', help='zabbix body') args = parser.parse_args() logging.debug('[vitrage] sendto=%s, topic=%s, body=%s', args.sendto, args.topic, args.body) transport_url = args.sendto transport = messaging.get_transport(cfg.CONF, transport_url) driver = 'messagingv2' publisher = 'zabbix_%s' % socket.gethostname() notifier = messaging.Notifier(transport, driver=driver, publisher_id=publisher, topic='vitrage_notifications') alarm_status = args.topic.lower() event_type = '%s.%s' % (ZABBIX_EVENT_TYPE, alarm_status) payload = {key.lower().strip(): prop.strip() for key, prop in (line.split('=') for line in args.body.splitlines())} logging.debug('[vitrage] publisher: %s, event: %s, payload %s', publisher, event_type, payload) notifier.info(ctxt={'message_id': six.text_type(uuid.uuid4()), 'publisher_id': publisher, 'timestamp': datetime.utcnow()}, event_type=event_type, payload=payload)
def get_transport(): global _TRANSPORT if not _TRANSPORT: _TRANSPORT = messaging.get_transport(cfg.CONF) return _TRANSPORT
def test_two_pools(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = None targets = [oslo_messaging.Target(topic="topic")] listener1_thread = self._setup_listener(transport, [endpoint1], targets=targets, pool="pool1") listener2_thread = self._setup_listener(transport, [endpoint2], targets=targets, pool="pool2") notifier = self._setup_notifier(transport, topic="topic") notifier.info({'ctxt': '0'}, 'an_event.start', 'test message0') notifier.info({'ctxt': '1'}, 'an_event.start', 'test message1') self.wait_for_messages(2, "pool1") self.wait_for_messages(2, "pool2") self.assertFalse(listener2_thread.stop()) self.assertFalse(listener1_thread.stop()) def mocked_endpoint_call(i): return mock.call({'ctxt': '%d' % i}, 'testpublisher', 'an_event.start', 'test message%d' % i, {'timestamp': mock.ANY, 'message_id': mock.ANY}) endpoint1.info.assert_has_calls([mocked_endpoint_call(0), mocked_endpoint_call(1)]) endpoint2.info.assert_has_calls([mocked_endpoint_call(0), mocked_endpoint_call(1)])
def setUp(self): super(ZmqBaseTestCase, self).setUp() self.messaging_conf.transport_driver = 'zmq' zmq_options.register_opts(self.conf) # Set config values self.internal_ipc_dir = self.useFixture(fixtures.TempDir()).path kwargs = {'rpc_zmq_bind_address': '127.0.0.1', 'rpc_zmq_host': '127.0.0.1', 'rpc_zmq_ipc_dir': self.internal_ipc_dir, 'use_pub_sub': False, 'use_router_proxy': False, 'rpc_zmq_matchmaker': 'dummy'} self.config(group='oslo_messaging_zmq', **kwargs) self.config(rpc_response_timeout=5) # Get driver transport = oslo_messaging.get_transport(self.conf) self.driver = transport._driver self.listener = TestServerListener(self.driver) self.addCleanup( StopRpc(self, [('listener', 'stop'), ('driver', 'cleanup')]) )
def test_two_topics(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None targets = [oslo_messaging.Target(topic="topic1"), oslo_messaging.Target(topic="topic2")] listener_thread = self._setup_listener(transport, [endpoint], targets=targets) notifier = self._setup_notifier(transport, topic='topic1') notifier.info({'ctxt': '1'}, 'an_event.start1', 'test') notifier = self._setup_notifier(transport, topic='topic2') notifier.info({'ctxt': '2'}, 'an_event.start2', 'test') self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls([ mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start1', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY}), mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start2', 'test', {'timestamp': mock.ANY, 'message_id': mock.ANY})], any_order=True)
def start(self): LOG.info("Start VitrageApiHandlerService") super(VitrageApiHandlerService, self).start() transport = oslo_messaging.get_transport(cfg.CONF) # TODO(Dany) add real server target = oslo_messaging.Target(topic='rpcapiv1', server='localhost') # TODO(Dany) add rabbit configuratipn # target = om.Target(topic='testme', server='192.168.56.102') # target = oslo_messaging.Target( # topic='testme', server='135.248.18.223') # cfg.CONF.set_override('rabbit_host', '135.248.18.223') # cfg.CONF.set_override('rabbit_port', 5672) # cfg.CONF.set_override('rabbit_userid', 'guest') # cfg.CONF.set_override('rabbit_password', 'cloud') # cfg.CONF.set_override('rabbit_login_method', 'AMQPLAIN') # cfg.CONF.set_override('rabbit_virtual_host', '/') cfg.CONF.set_override('rpc_backend', 'rabbit') endpoints = [EntityGraphApis(self.entity_graph), ] # TODO(Dany) use eventlet instead of threading server = oslo_messaging.get_rpc_server(transport, target, endpoints, executor='threading') server.start() LOG.info("Finish start VitrageApiHandlerService")
def test_publish_with_none_rabbit_driver(self, cgt): sample_publisher = msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka')) cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092') transport = oslo_messaging.get_transport(self.CONF, 'kafka://127.0.0.1:9092') self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(sample_publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, sample_publisher.publish_samples, self.test_sample_data) self.assertEqual(0, len(sample_publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with('metering', mock.ANY) event_publisher = msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka')) cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092') with mock.patch.object(event_publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, event_publisher.publish_events, self.test_event_data) self.assertEqual(0, len(event_publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with('event', mock.ANY)
def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) serializer = RequestContextSerializer(JsonPayloadSerializer()) if conf.notification_format == 'unversioned': LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') elif conf.notification_format == 'both': LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer, topics=['versioned_notifications']) else: LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer, topics=['versioned_notifications'])
def test_client_call_timeout(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') finished = False wait = threading.Condition() class TestEndpoint(object): def ping(self, ctxt, arg): with wait: if not finished: wait.wait() server_thread = self._setup_server(transport, TestEndpoint()) client = self._setup_client(transport) try: client.prepare(timeout=0).call({}, 'ping', arg='foo') except Exception as ex: self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex) else: self.assertTrue(False) with wait: finished = True wait.notify() self._stop_server(client, server_thread)
def __init__(self, messaging_config, node_id, node_rpc_endpoints, partition_id=None): self.messaging_config = messaging_config self.node_id = node_id self.node_rpc_endpoints = node_rpc_endpoints # unique identifier shared by all nodes that can communicate self.partition_id = partition_id self.node_rpc_endpoints.append(DseNodeEndpoints(self)) self._running = False self._services = [] self.instance = uuid.uuid4() self.context = self._message_context() self.transport = messaging.get_transport( self.messaging_config, allowed_remote_exmods=[exception.__name__, ]) self._rpctarget = self.node_rpc_target(self.node_id, self.node_id) self._rpcserver = messaging.get_rpc_server( self.transport, self._rpctarget, self.node_rpc_endpoints, executor='eventlet') self._service_rpc_servers = {} # {service_id => (rpcserver, target)} self._control_bus = DseNodeControlBus(self) self.register_service(self._control_bus) # keep track of which local services subscribed to which other services self.subscribers = {} # load configured drivers self.loaded_drivers = self.load_drivers() self.start()
def test_transport_url(self): transport = oslo_messaging.get_transport(self.conf, self.url) self.addCleanup(transport.cleanup) driver = transport._driver conn = driver._get_connection(kafka_driver.PURPOSE_SEND) self.assertEqual(self.expected['hostaddrs'], conn.hostaddrs)
def test_server_wait_method(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') target = oslo_messaging.Target(topic='foo', server='bar') endpoints = [object()] serializer = object() class MagicMockIgnoreArgs(mock.MagicMock): """MagicMock ignores arguments. A MagicMock which can never misinterpret the arguments passed to it during construction. """ def __init__(self, *args, **kwargs): super(MagicMockIgnoreArgs, self).__init__() server = oslo_messaging.get_rpc_server(transport, target, endpoints, serializer=serializer) # Mocking executor server._executor_cls = MagicMockIgnoreArgs server._create_listener = MagicMockIgnoreArgs() server.dispatcher = MagicMockIgnoreArgs() # Here assigning executor's listener object to listener variable # before calling wait method, because in wait method we are # setting executor to None. server.start() listener = server.listener server.stop() # call server wait method server.wait() self.assertEqual(1, listener.cleanup.call_count)
def test_set_default_control_exchange(self): oslo_messaging.set_transport_defaults(control_exchange='foo') driver.DriverManager = mock.Mock() invoke_kwds = dict(default_exchange='foo', allowed_remote_exmods=[]) driver.DriverManager.return_value = \ _FakeManager(_FakeDriver(self.conf)) oslo_messaging.get_transport(self.conf) driver.DriverManager.assert_called_once_with(mock.ANY, mock.ANY, invoke_on_load=mock.ANY, invoke_args=mock.ANY, invoke_kwds=invoke_kwds)
def test_batch_timeout(self): transport = oslo_messaging.get_transport(self.conf, url='fake:') endpoint = mock.Mock() endpoint.info.return_value = None listener_thread = self._setup_listener(transport, [endpoint], batch=(5, 1)) notifier = self._setup_notifier(transport) for i in six.moves.range(12): notifier.info({}, 'an_event.start', 'test message') self.wait_for_messages(3) self.assertFalse(listener_thread.stop()) messages = [dict(ctxt={}, publisher_id='testpublisher', event_type='an_event.start', payload='test message', metadata={'message_id': mock.ANY, 'timestamp': mock.ANY})] endpoint.info.assert_has_calls([mock.call(messages * 5), mock.call(messages * 5), mock.call(messages * 2)])
def setup(url=None, optional=False): """Initialise the oslo_messaging layer.""" global TRANSPORT, NOTIFIER if url and url.startswith("fake://"): # NOTE(sileht): oslo_messaging fake driver uses time.sleep # for task switch, so we need to monkey_patch it eventlet.monkey_patch(time=True) if not TRANSPORT: oslo_messaging.set_transport_defaults('bilean') exmods = ['bilean.common.exception'] try: TRANSPORT = oslo_messaging.get_transport( cfg.CONF, url, allowed_remote_exmods=exmods) except oslo_messaging.InvalidTransportURL as e: TRANSPORT = None if not optional or e.url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise if not NOTIFIER and TRANSPORT: serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = oslo_messaging.Notifier(TRANSPORT, serializer=serializer)
def test_requeue(self): transport = oslo_messaging.get_transport(self.conf, url="fake:") endpoint = mock.Mock() endpoint.info = mock.Mock() def side_effect_requeue(*args, **kwargs): if endpoint.info.call_count == 1: return oslo_messaging.NotificationResult.REQUEUE return oslo_messaging.NotificationResult.HANDLED endpoint.info.side_effect = side_effect_requeue listener_thread = self._setup_listener(transport, [endpoint]) notifier = self._setup_notifier(transport) notifier.info({}, "an_event.start", "test") self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls( [ mock.call( {}, "testpublisher", "an_event.start", "test", {"timestamp": mock.ANY, "message_id": mock.ANY} ), mock.call( {}, "testpublisher", "an_event.start", "test", {"timestamp": mock.ANY, "message_id": mock.ANY} ), ] )
def test_two_pools(self): transport = oslo_messaging.get_transport(self.conf, url="fake:") endpoint1 = mock.Mock() endpoint1.info.return_value = None endpoint2 = mock.Mock() endpoint2.info.return_value = None targets = [oslo_messaging.Target(topic="topic")] listener1_thread = self._setup_listener(transport, [endpoint1], targets=targets, pool="pool1") listener2_thread = self._setup_listener(transport, [endpoint2], targets=targets, pool="pool2") notifier = self._setup_notifier(transport, topic="topic") notifier.info({"ctxt": "0"}, "an_event.start", "test message0") notifier.info({"ctxt": "1"}, "an_event.start", "test message1") self.wait_for_messages(2, "pool1") self.wait_for_messages(2, "pool2") self.assertFalse(listener2_thread.stop()) self.assertFalse(listener1_thread.stop()) def mocked_endpoint_call(i): return mock.call( {"ctxt": "%d" % i}, "testpublisher", "an_event.start", "test message%d" % i, {"timestamp": mock.ANY, "message_id": mock.ANY}, ) endpoint1.info.assert_has_calls([mocked_endpoint_call(0), mocked_endpoint_call(1)]) endpoint2.info.assert_has_calls([mocked_endpoint_call(0), mocked_endpoint_call(1)])
def main(argv=None): opts = setup_options(argv) core.setup_logging(level=logging.WARN if opts.quiet else logging.DEBUG) transport_url = core.rabbit_connection_url(driver='rabbit') transport = oslo_messaging.get_transport(cfg.CONF, transport_url) target = oslo_messaging.Target(topic=opts.topic, server=opts.server_name) control = ServerControlEndpoint(None) control.response_delay = opts.response_delay control.num_messages = opts.num_messages endpoints = [ control, ] server = oslo_messaging.get_rpc_server(transport, target, endpoints, executor='eventlet') control.server = server control.num_messages = opts.num_messages t_start = time.time() try: server.start() server.wait() t_end = time.time() except KeyboardInterrupt: t_end = time.time() server.stop() print('*** Stats ***') msgs_per_sec = float(control._counter) / float(t_end - t_start) print('msgs/sec:\t%.2f' % (msgs_per_sec, )) print('msgs:\t%d' % control._counter) print('secs:\t%d' % (t_end - t_start, )) LOG.info("Exciting...")
def test_two_topics(self): transport = oslo_messaging.get_transport(self.conf, url="fake:") endpoint = mock.Mock() endpoint.info.return_value = None targets = [oslo_messaging.Target(topic="topic1"), oslo_messaging.Target(topic="topic2")] listener_thread = self._setup_listener(transport, [endpoint], targets=targets) notifier = self._setup_notifier(transport, topic="topic1") notifier.info({"ctxt": "1"}, "an_event.start1", "test") notifier = self._setup_notifier(transport, topic="topic2") notifier.info({"ctxt": "2"}, "an_event.start2", "test") self.wait_for_messages(2) self.assertFalse(listener_thread.stop()) endpoint.info.assert_has_calls( [ mock.call( {"ctxt": "1"}, "testpublisher", "an_event.start1", "test", {"timestamp": mock.ANY, "message_id": mock.ANY}, ), mock.call( {"ctxt": "2"}, "testpublisher", "an_event.start2", "test", {"timestamp": mock.ANY, "message_id": mock.ANY}, ), ], any_order=True, )
def test_send_no_timeout(self, fake_publish): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') with transport._driver._get_connection(amqp.PURPOSE_SEND) as pool_conn: conn = pool_conn.connection conn._publish(mock.Mock(), 'msg', routing_key='routing_key') fake_publish.assert_called_with('msg', expiration=None)
def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = oslo_messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) NOTIFIER = oslo_messaging.Notifier(TRANSPORT)
def _do_test_heartbeat_sent(self, fake_ensure_connection, fake_heartbeat_support, fake_heartbeat, fake_logger, heartbeat_side_effect=None, info=None): event = threading.Event() def heartbeat_check(rate=2): event.set() if heartbeat_side_effect: raise heartbeat_side_effect fake_heartbeat.side_effect = heartbeat_check transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) conn = transport._driver._get_connection() conn.ensure(method=lambda: True) event.wait() conn._heartbeat_stop() # check heartbeat have been called self.assertLess(0, fake_heartbeat.call_count) if not heartbeat_side_effect: self.assertEqual(1, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(0, fake_logger.info.call_count) else: self.assertEqual(2, fake_ensure_connection.call_count) self.assertEqual(2, fake_logger.debug.call_count) self.assertEqual(1, fake_logger.info.call_count) self.assertIn(mock.call(info, mock.ANY), fake_logger.info.mock_calls)
self.monitorPMAApi = MonitorPMAAPI(CONF.hades_monitorPMA_topic, CONF.hades_exchange) def loadPolicy(self, ctxt, host, policys): print "policy loaded is:" print policys for policy in policys: if policy['target'] == 'arbiterPMA': self.arbiterPMAApi.loadPolicy({}, 'pike', policy) elif policy['target'] == 'monitorPMA': self.monitorPMAApi.loadPolicy({}, 'pike', policy) else: return False return True if __name__ == "__main__": CONF.control_exchange = CONF.hades_exchange transport = messaging.get_transport(CONF) target = messaging.Target(topic=CONF.hades_policyService_topic, server='pike') endpoints = [ PolicyServiceManager(), ] server = messaging.get_rpc_server(transport, target, endpoints, executor='blocking') server.start() server.wait()
def get_transport(): url = _deprecated_amqp_url() return oslo_messaging.get_transport(conf=cfg.CONF, url=url)
import oslo_messaging as om from config import * ##Invoke "get_transport". This call will set default Configurations required to Create Messaging Transport transport = om.get_transport(cfg.CONF) cfg.CONF(['--config-file', 'file.conf']) ##Create Messaging Transport transport = om.get_transport(cfg.CONF) ##Create Target target = om.Target(topic='testme') ##Create RPC Client client = om.RPCClient(transport, target) ##Invoke remote method and wait for a reply. arg = "Call Method 1" ctxt = {} client.call(ctxt, 'test_method1', arg=arg) ##Invoke remote method and return immediately. arg = "Cast method 1" ctxt = {} client.cast(ctxt, 'test_method1', arg=arg)
def setup_url(self, url): transport = oslo_messaging.get_transport(self.conf, url) self.addCleanup(transport.cleanup) driver = transport._driver return driver, url
def main(): parser = argparse.ArgumentParser( description='Tools to play with oslo.messaging\'s RPC', usage=USAGE, ) parser.add_argument('--url', dest='url', help="oslo.messaging transport url") parser.add_argument('-d', '--debug', dest='debug', action='store_true', help="Turn on DEBUG logging level instead of WARN") parser.add_argument('-tp', '--topic', dest='topic', default="profiler_topic", help="Topics to publish/receive messages to/from.") parser.add_argument('-s', '--server', dest='server', default="profiler_server", help="Servers to publish/receive messages to/from.") parser.add_argument('-tg', '--targets', dest='targets', nargs="+", default=["profiler_topic.profiler_server"], help="Targets to publish/receive messages to/from.") parser.add_argument('-l', dest='duration', type=int, help='send messages for certain time') parser.add_argument('-j', '--json', dest='json_filename', help='File name to store results in JSON format') parser.add_argument('--config-file', dest='config_file', type=str, help="Oslo messaging config file") subparsers = parser.add_subparsers(dest='mode', help='notify/rpc server/client mode') server = subparsers.add_parser('notify-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--requeue', dest='requeue', action='store_true') server = subparsers.add_parser('batch-notify-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--requeue', dest='requeue', action='store_true') client = subparsers.add_parser('notify-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, help='sleep time between two messages') client.add_argument('--timeout', dest='timeout', type=int, default=3, help='client timeout') server = subparsers.add_parser('rpc-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('-e', '--executor', dest='executor', type=str, default='eventlet', help='name of a message executor') client = subparsers.add_parser('rpc-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=float, default=-1, help='sleep time between two messages') client.add_argument('--timeout', dest='timeout', type=int, default=3, help='client timeout') client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0, help='Keep connections open N seconds after calls ' 'have been done') client.add_argument('--is-cast', dest='is_cast', action='store_true', help='Use `call` or `cast` RPC methods') client.add_argument('--is-fanout', dest='is_fanout', action='store_true', help='fanout=True for CAST messages') client.add_argument('--sync', dest='sync', choices=('call', 'fanout'), help="stop server when all msg was sent by clients") args = parser.parse_args() _setup_logging(is_debug=args.debug) if args.config_file: cfg.CONF(["--config-file", args.config_file]) global TRANSPORT if args.mode in ['rpc-server', 'rpc-client']: TRANSPORT = messaging.get_transport(cfg.CONF, url=args.url) else: TRANSPORT = messaging.get_notification_transport(cfg.CONF, url=args.url) if args.mode in ['rpc-client', 'notify-client']: # always generate maximum number of messages for duration-limited tests generate_messages(MESSAGES_LIMIT if args.duration else args.messages) # oslo.config defaults cfg.CONF.heartbeat_interval = 5 cfg.CONF.prog = os.path.basename(__file__) cfg.CONF.project = 'oslo.messaging' signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if args.mode == 'rpc-server': target = messaging.Target(topic=args.topic, server=args.server) endpoint = rpc_server(TRANSPORT, target, args.wait_before_answer, args.executor, args.duration) show_server_stats(endpoint, args.json_filename) elif args.mode == 'notify-server': endpoint = notify_server(TRANSPORT, args.topic, args.wait_before_answer, args.duration, args.requeue) show_server_stats(endpoint, args.json_filename) elif args.mode == 'batch-notify-server': endpoint = batch_notify_server(TRANSPORT, args.topic, args.wait_before_answer, args.duration, args.requeue) show_server_stats(endpoint, args.json_filename) elif args.mode == 'notify-client': spawn_notify_clients(args.threads, args.topic, TRANSPORT, args.messages, args.wait_after_msg, args.timeout, args.duration) show_client_stats(CLIENTS, args.json_filename) elif args.mode == 'rpc-client': targets = [] for target in args.targets: tp, srv = target.partition('.')[::2] t = messaging.Target(topic=tp, server=srv, fanout=args.is_fanout) targets.append(t) spawn_rpc_clients(args.threads, TRANSPORT, targets, args.wait_after_msg, args.timeout, args.is_cast, args.messages, args.duration, args.sync) show_client_stats(CLIENTS, args.json_filename, not args.is_cast) if args.exit_wait: LOG.info("Finished. waiting for %d seconds", args.exit_wait) time.sleep(args.exit_wait)
def setup_service_messaging(): global MESSAGING_TRANSPORT if MESSAGING_TRANSPORT: # Already is up return MESSAGING_TRANSPORT = messaging.get_transport(cfg.CONF, aliases=_ALIASES)
def setUp(self): super(TransportFixture, self).setUp() self.transport = oslo_messaging.get_transport(self.conf, url=self.url)
def test_send_receive(self): self.config(kombu_missing_consumer_retry_timeout=0.5, group="oslo_messaging_rabbit") self.config(heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] errors = [] def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.stubs.Set(driver_common.LOG, 'error', stub_error) def send_and_wait_for_reply(i): try: timeout = self.timeout replies.append( driver.send(target, self.ctxt, {'tx_id': i}, wait_for_reply=True, timeout=timeout)) self.assertFalse(self.failure) self.assertIsNone(self.timeout) except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e: replies.append(e) self.assertTrue(self.failure or self.timeout is not None) while len(senders) < self.n_senders: senders.append( threading.Thread(target=send_and_wait_for_reply, args=(len(senders), ))) for i in range(len(senders)): senders[i].start() received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.ctxt, received.ctxt) self.assertEqual({'tx_id': i}, received.message) msgs.append(received) # reply in reverse, except reply to the first guy second from last order = list(range(len(senders) - 1, -1, -1)) if len(order) > 1: order[-1], order[-2] = order[-2], order[-1] for i in order: if self.timeout is None: if self.failure: try: raise ZeroDivisionError except Exception: failure = sys.exc_info() msgs[i].reply(failure=failure, log_failure=not self.expected) elif self.rx_id: msgs[i].reply({'rx_id': i}) else: msgs[i].reply(self.reply) senders[i].join() self.assertEqual(len(senders), len(replies)) for i, reply in enumerate(replies): if self.timeout is not None: self.assertIsInstance(reply, oslo_messaging.MessagingTimeout) elif self.failure: self.assertIsInstance(reply, ZeroDivisionError) elif self.rx_id: self.assertEqual({'rx_id': order[i]}, reply) else: self.assertEqual(self.reply, reply) if not self.timeout and self.failure and not self.expected: self.assertTrue(len(errors) > 0, errors) else: self.assertEqual(0, len(errors), errors)
def init(): global TRANSPORT, NOTIFIER TRANSPORT = messaging.get_transport(CONF) NOTIFIER = messaging.Notifier(TRANSPORT, publisher_id=CONF.notifications.publisher_id)
def test_send_receive(self): transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] wait_conditions = [] orig_reply_waiter = amqpdriver.ReplyWaiter.wait def reply_waiter(self, msg_id, timeout): if wait_conditions: cond = wait_conditions.pop() with cond: cond.notify() with cond: cond.wait() return orig_reply_waiter(self, msg_id, timeout) self.stubs.Set(amqpdriver.ReplyWaiter, 'wait', reply_waiter) def send_and_wait_for_reply(i, wait_for_reply): replies.append( driver.send(target, {}, {'tx_id': i}, wait_for_reply=wait_for_reply, timeout=None)) while len(senders) < 2: t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), True)) t.daemon = True senders.append(t) # test the case then msg_id is not set t = threading.Thread(target=send_and_wait_for_reply, args=(len(senders), False)) t.daemon = True senders.append(t) # Start the first guy, receive his message, but delay his polling notify_condition = threading.Condition() wait_conditions.append(notify_condition) with notify_condition: senders[0].start() notify_condition.wait() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 0}, msgs[-1].message) # Start the second guy, receive his message senders[1].start() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 1}, msgs[-1].message) # Reply to both in order, making the second thread queue # the reply meant for the first thread msgs[0].reply({'rx_id': 0}) msgs[1].reply({'rx_id': 1}) # Wait for the second thread to finish senders[1].join() # Start the 3rd guy, receive his message senders[2].start() msgs.extend(listener.poll()) self.assertEqual({'tx_id': 2}, msgs[-1].message) # Verify the _send_reply was not invoked by driver: with mock.patch.object(msgs[2], '_send_reply') as method: msgs[2].reply({'rx_id': 2}) self.assertEqual(method.call_count, 0) # Wait for the 3rd thread to finish senders[2].join() # Let the first thread continue with notify_condition: notify_condition.notify() # Wait for the first thread to finish senders[0].join() # Verify replies were received out of order self.assertEqual(len(senders), len(replies)) self.assertEqual({'rx_id': 1}, replies[0]) self.assertIsNone(replies[1]) self.assertEqual({'rx_id': 0}, replies[2])
def init_action_rpc(conf): global TRANSPORT TRANSPORT = oslo_messaging.get_transport(conf)
def connection_with(self, prefetch, purpose): self.config(rabbit_qos_prefetch_count=prefetch, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') transport._driver._get_connection(purpose)
def test_driver_load(self): transport = oslo_messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, impl_zmq.ZmqDriver)
def get_transport(): return oslo_messaging.get_transport(CONF, aliases=_ALIASES)
# CONF.set_override('rabbit_port', 5672, group='rabbit') # CONF.set_override('rabbit_virtual_host', '/', group='rabbit') # CONF.set_override('rabbita_login_method', 'AMQPLAIN', group='rabbit') # CONF.set_override('rabbit_userid', 'rabbit', group='rabbit') # CONF.set_override('rabbit_password', 'welcome@123', group='rabbit') # print(CONF.rabbit.rabbit_host) print(CONF.oslo_messaging_rabbit.rabbit_host) res = [{k: v} for k, v in cfg.CONF.items()] pprint(res) log.info('configuring connection') #########RPC Server # create transport and target transport_url = 'rabbit://*****:*****@192.168.111.139:5672/' transport = om.get_transport(CONF, transport_url) #transport = om.get_rpc_transport(CONF) ''' >>> dir(transport.conf) ['GroupAttrProxy', '__abstractmethods__', '__class__', '__contains__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_abc_cache', '_abc_negative_cache', '_abc_negative_cache_version', '_abc_registry', '_conf', '_group', '_url', '_validate_query', 'get', 'items', 'keys', 'values'] >>> transport.conf.rabbit.get('rpc_backend', ) 'rabbit' >>> transport.conf.rabbit.get('rabbit_host', ) 'uvm3' ''' target = om.Target(topic='testme', server='192.168.111.140') # # create endpoints class TestEndpoint(object): def test_method1(self, ctx, arg):
def main(argv=None): _usage = """Usage: %prog [options] <server name>""" parser = optparse.OptionParser(usage=_usage) parser.add_option("--topic", action="store", default="my-topic", help="target topic, default 'my-topic'") parser.add_option("--exchange", action="store", default="my-exchange", help="target exchange, default 'my-exchange'") parser.add_option("--namespace", action="store", default="my-namespace", help="target namespace, default 'my-namespace'") parser.add_option("--version", action="store", default="1.1", help="target version, default '1.1'") parser.add_option("--url", action="store", default="rabbit://localhost", help="transport address, default 'rabbit://localhost'") parser.add_option("--executor", action="store", default="blocking", help="defaults to 'blocking'") parser.add_option("--oslo-config", type="string", help="the oslo.messaging configuration file.") opts, extra = parser.parse_args(args=argv) if not extra: print("<server-name> not supplied!") return False server_name = extra[0] rpc_log_init() LOG.info("Running server, name=%s exchange=%s topic=%s namespace=%s" % (server_name, opts.exchange, opts.topic, opts.namespace)) if opts.oslo_config: LOG.info("Loading config file %s" % opts.oslo_config) cfg.CONF(["--config-file", opts.oslo_config]) transport = messaging.get_transport(cfg.CONF, url=opts.url) target = messaging.Target(exchange=opts.exchange, topic=opts.topic, namespace=opts.namespace, server=server_name, version=opts.version) server = messaging.get_rpc_server(transport, target, [TestEndpoint(server_name, target)], executor=opts.executor) try: server.start() while True: time.sleep(1) except KeyboardInterrupt: LOG.info("Stopping..") server.stop() return True
def test_multiple_servers(self): url1 = 'fake:///' + (self.exchange1 or '') url2 = 'fake:///' + (self.exchange2 or '') transport1 = oslo_messaging.get_transport(self.conf, url=url1) if url1 != url2: transport2 = oslo_messaging.get_transport(self.conf, url=url1) else: transport2 = transport1 class TestEndpoint(object): def __init__(self): self.pings = [] def ping(self, ctxt, arg): self.pings.append(arg) def alive(self, ctxt): return 'alive' if self.multi_endpoints: endpoint1, endpoint2 = TestEndpoint(), TestEndpoint() else: endpoint1 = endpoint2 = TestEndpoint() thread1 = self._setup_server(transport1, endpoint1, topic=self.topic1, server=self.server1) thread2 = self._setup_server(transport2, endpoint2, topic=self.topic2, server=self.server2) client1 = self._setup_client(transport1, topic=self.topic1) client2 = self._setup_client(transport2, topic=self.topic2) client1 = client1.prepare(server=self.server1) client2 = client2.prepare(server=self.server2) if self.fanout1: client1.call({}, 'alive') client1 = client1.prepare(fanout=True) if self.fanout2: client2.call({}, 'alive') client2 = client2.prepare(fanout=True) (client1.call if self.call1 else client1.cast)({}, 'ping', arg='1') (client2.call if self.call2 else client2.cast)({}, 'ping', arg='2') self.assertTrue(thread1.isAlive()) self._stop_server(client1.prepare(fanout=None), thread1, topic=self.topic1) self.assertTrue(thread2.isAlive()) self._stop_server(client2.prepare(fanout=None), thread2, topic=self.topic2) def check(pings, expect): self.assertEqual(len(expect), len(pings)) for a in expect: self.assertIn(a, pings) if self.expect_either: check(endpoint1.pings + endpoint2.pings, self.expect_either) else: check(endpoint1.pings, self.expect1) check(endpoint2.pings, self.expect2)
def create_transport(url): exmods = get_allowed_exmods() return messaging.get_transport(CONF, url=url, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES)
def __init__(self): self._topics = CONF.eon_notifier_topics self._publisher_id = "eon" self._driver = 'messaging' self._transport = oslo_messaging.get_transport(cfg.CONF) self._notifiers = self._initialize_notifiers()
def main(): parser = argparse.ArgumentParser( description='Tools to play with oslo.messaging\'s RPC', usage=USAGE, ) parser.add_argument('--url', dest='url', default='rabbit://*****:*****@localhost/', help="oslo.messaging transport url") parser.add_argument('-d', '--debug', dest='debug', type=bool, default=False, help="Turn on DEBUG logging level instead of WARN") subparsers = parser.add_subparsers(dest='mode', help='notify/rpc server/client mode') server = subparsers.add_parser('notify-server') client = subparsers.add_parser('notify-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=int, default=-1, help='sleep time between two messages') client.add_argument('-t', dest='timeout', type=int, default=3, help='client timeout') client.add_argument('-s', dest='service', default='nova', help='nova, cinder or glance') client.add_argument('-a', dest='action', default='create', help='create or delete') client.add_argument('-x', dest='project_id', default='tenant_abc', help='project_id') client.add_argument('-r', dest='resource_id', default='resource_abc', help='resource_id') client.add_argument('-d', dest='load_date', default='2015-10-01', help='date in format yyyy-mm-dd') server = subparsers.add_parser('rpc-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--show-stats', dest='show_stats', type=bool, default=True) server.add_argument('-e', '--executor', dest='executor', type=str, default='eventlet', help='name of a message executor') client = subparsers.add_parser('rpc-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=int, default=-1, help='sleep time between two messages') client.add_argument('-t', dest='timeout', type=int, default=3, help='client timeout') client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0, help='Keep connections open N seconds after calls ' 'have been done') client.add_argument('--is-cast', dest='is_cast', type=bool, default=False, help='Use `call` or `cast` RPC methods') args = parser.parse_args() _setup_logging(is_debug=args.debug) # oslo.config defaults cfg.CONF.heartbeat_interval = 5 cfg.CONF.notification_topics = "notif" cfg.CONF.notification_driver = "messaging" transport = messaging.get_transport(cfg.CONF, url=args.url) target = messaging.Target(topic='profiler_topic', server='profiler_server') if args.mode == 'rpc-server': if args.url.startswith('zmq'): cfg.CONF.rpc_zmq_matchmaker = "redis" transport._driver.matchmaker._redis.flushdb() rpc_server(transport, target, args.wait_before_answer, args.executor, args.show_stats) elif args.mode == 'notify-server': notify_server(transport) elif args.mode == 'notify-client': threads_spawner(args.threads, notifier, transport, args.messages, args.wait_after_msg, args.timeout, args.service, args.action, args.project_id, args.resource_id, args.load_date) elif args.mode == 'rpc-client': start = datetime.datetime.now() threads_spawner(args.threads, send_msg, transport, target, args.messages, args.wait_after_msg, args.timeout, args.is_cast) time_ellapsed = (datetime.datetime.now() - start).total_seconds() msg_count = args.messages * args.threads print('%d messages was sent for %s seconds. Bandwight is %s msg/sec' % (msg_count, time_ellapsed, (msg_count / time_ellapsed))) LOG.info("calls finished, wait %d seconds" % args.exit_wait) time.sleep(args.exit_wait)
def setUp(self): super(TestKafkaDriver, self).setUp() self.messaging_conf.transport_driver = 'kafka' transport = oslo_messaging.get_transport(self.conf) self.driver = transport._driver
def __init__(self): super(Connection, self).__init__() self.servers = [] self.transport = oslo_messaging.get_transport(aim_cfg.CONF)
def main(): parser = argparse.ArgumentParser( description='Tools to play with oslo.messaging\'s RPC', usage=USAGE, ) parser.add_argument('--url', dest='url', default='rabbit://*****:*****@localhost/', help="oslo.messaging transport url") parser.add_argument('-d', '--debug', dest='debug', type=bool, default=False, help="Turn on DEBUG logging level instead of WARN") parser.add_argument('-tp', '--topic', dest='topic', default="profiler_topic", help="Topic to publish/receive messages to/from.") subparsers = parser.add_subparsers(dest='mode', help='notify/rpc server/client mode') server = subparsers.add_parser('notify-server') server.add_argument('--show-stats', dest='show_stats', type=bool, default=True) server = subparsers.add_parser('batch-notify-server') server.add_argument('--show-stats', dest='show_stats', type=bool, default=True) client = subparsers.add_parser('notify-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=int, default=-1, help='sleep time between two messages') client.add_argument('-t', dest='timeout', type=int, default=3, help='client timeout') server = subparsers.add_parser('rpc-server') server.add_argument('-w', dest='wait_before_answer', type=int, default=-1) server.add_argument('--show-stats', dest='show_stats', type=bool, default=True) server.add_argument('-e', '--executor', dest='executor', type=str, default='eventlet', help='name of a message executor') client = subparsers.add_parser('rpc-client') client.add_argument('-p', dest='threads', type=int, default=1, help='number of client threads') client.add_argument('-m', dest='messages', type=int, default=1, help='number of call per threads') client.add_argument('-w', dest='wait_after_msg', type=int, default=-1, help='sleep time between two messages') client.add_argument('-t', dest='timeout', type=int, default=3, help='client timeout') client.add_argument('--exit-wait', dest='exit_wait', type=int, default=0, help='Keep connections open N seconds after calls ' 'have been done') client.add_argument('--is-cast', dest='is_cast', type=bool, default=False, help='Use `call` or `cast` RPC methods') args = parser.parse_args() _setup_logging(is_debug=args.debug) if args.mode in ['rpc-server', 'rpc-client']: transport = messaging.get_transport(cfg.CONF, url=args.url) else: transport = messaging.get_notification_transport(cfg.CONF, url=args.url) cfg.CONF.oslo_messaging_notifications.topics = "notif" cfg.CONF.oslo_messaging_notifications.driver = "messaging" target = messaging.Target(topic=args.topic, server='profiler_server') # oslo.config defaults cfg.CONF.heartbeat_interval = 5 cfg.CONF.prog = os.path.basename(__file__) cfg.CONF.project = 'oslo.messaging' if args.mode == 'rpc-server': if args.url.startswith('zmq'): cfg.CONF.rpc_zmq_matchmaker = "redis" transport._driver.matchmaker._redis.flushdb() rpc_server(transport, target, args.wait_before_answer, args.executor, args.show_stats) elif args.mode == 'notify-server': notify_server(transport, args.show_stats) elif args.mode == 'batch-notify-server': batch_notify_server(transport, args.show_stats) elif args.mode == 'notify-client': threads_spawner(args.threads, notifier, transport, args.messages, args.wait_after_msg, args.timeout) elif args.mode == 'rpc-client': init_msg(args.messages) start = datetime.datetime.now() threads_spawner(args.threads, send_msg, transport, target, args.wait_after_msg, args.timeout, args.is_cast, args.messages) time_elapsed = (datetime.datetime.now() - start).total_seconds() msg_count = 0 total_bytes = 0 for client in RPC_CLIENTS: msg_count += client.msg_sent total_bytes += client.bytes LOG.info( '%d messages were sent for %d seconds. ' 'Bandwidth was %d msg/sec', msg_count, time_elapsed, (msg_count / time_elapsed)) log_msg = '%s bytes were sent for %d seconds. Bandwidth is %d b/s' % ( total_bytes, time_elapsed, (total_bytes / time_elapsed)) LOG.info(log_msg) with open('./oslo_res_%s.txt' % args.topic, 'a+') as f: f.write(log_msg + '\n') LOG.info("calls finished, wait %d seconds" % args.exit_wait) time.sleep(args.exit_wait)
def test_send_receive(self): self.config(kombu_missing_consumer_retry_timeout=0.5, group="oslo_messaging_rabbit") self.config(heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target, None, None)._poll_style_listener senders = [] replies = [] msgs = [] # FIXME(danms): Surely this is not the right way to do this... self.ctxt['client_timeout'] = self.call_monitor_timeout def send_and_wait_for_reply(i): try: timeout = self.timeout cm_timeout = self.call_monitor_timeout replies.append(driver.send(target, self.ctxt, {'tx_id': i}, wait_for_reply=True, timeout=timeout, call_monitor_timeout=cm_timeout)) self.assertFalse(self.failure) self.assertIsNone(self.timeout) except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e: replies.append(e) self.assertTrue(self.failure or self.timeout is not None) while len(senders) < self.n_senders: senders.append(threading.Thread(target=send_and_wait_for_reply, args=(len(senders), ))) for i in range(len(senders)): senders[i].start() received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.ctxt, received.ctxt) self.assertEqual({'tx_id': i}, received.message) msgs.append(received) # reply in reverse, except reply to the first guy second from last order = list(range(len(senders) - 1, -1, -1)) if len(order) > 1: order[-1], order[-2] = order[-2], order[-1] for i in order: if self.timeout is None: if self.failure: try: raise ZeroDivisionError except Exception: failure = sys.exc_info() msgs[i].reply(failure=failure) elif self.rx_id: msgs[i].reply({'rx_id': i}) else: msgs[i].reply(self.reply) senders[i].join() self.assertEqual(len(senders), len(replies)) for i, reply in enumerate(replies): if self.timeout is not None: self.assertIsInstance(reply, oslo_messaging.MessagingTimeout) elif self.failure: self.assertIsInstance(reply, ZeroDivisionError) elif self.rx_id: self.assertEqual({'rx_id': order[i]}, reply) else: self.assertEqual(self.reply, reply)
def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
def test_send_receive(self): self.config(kombu_missing_consumer_retry_timeout=0.5, group="oslo_messaging_rabbit") self.config(heartbeat_timeout_threshold=0, group="oslo_messaging_rabbit") transport = oslo_messaging.get_transport(self.conf, 'kombu+memory:////') self.addCleanup(transport.cleanup) driver = transport._driver target = oslo_messaging.Target(topic='testtopic') listener = driver.listen(target) senders = [] replies = [] msgs = [] errors = [] def stub_error(msg, *a, **kw): if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]): a = a[0] errors.append(str(msg) % a) self.stubs.Set(driver_common.LOG, 'error', stub_error) def send_and_wait_for_reply(i): try: if self.reply_failure_404: timeout = 0.01 else: timeout = self.timeout replies.append( driver.send(target, self.ctxt, {'tx_id': i}, wait_for_reply=True, timeout=timeout)) self.assertFalse(self.failure) self.assertIsNone(self.timeout) except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e: replies.append(e) self.assertTrue(self.failure or self.timeout is not None or self.reply_failure_404) while len(senders) < self.n_senders: senders.append( threading.Thread(target=send_and_wait_for_reply, args=(len(senders), ))) for i in range(len(senders)): senders[i].start() received = listener.poll()[0] self.assertIsNotNone(received) self.assertEqual(self.ctxt, received.ctxt) self.assertEqual({'tx_id': i}, received.message) msgs.append(received) # reply in reverse, except reply to the first guy second from last order = list(range(len(senders) - 1, -1, -1)) if len(order) > 1: order[-1], order[-2] = order[-2], order[-1] if self.reply_failure_404: start = time.time() # NOTE(sileht): Simulate a rpc client restart # By returning a ExchangeNotFound when we try to # send reply exc = ( driver._reply_q_conn.connection.connection.channel_errors[0]()) exc.code = 404 self.useFixture( mockpatch.Patch('kombu.messaging.Producer.publish', side_effect=exc)) for i in order: if self.timeout is None: if self.failure: try: raise ZeroDivisionError except Exception: failure = sys.exc_info() msgs[i].reply(failure=failure, log_failure=not self.expected) elif self.rx_id: msgs[i].reply({'rx_id': i}) else: msgs[i].reply(self.reply) elif self.reply_failure_404: msgs[i].reply({}) senders[i].join() if self.reply_failure_404: # NOTE(sileht) all reply fail, first take # kombu_missing_consumer_retry_timeout seconds to fail # next immediately fail dt = time.time() - start rabbit_conf = self.conf.oslo_messaging_rabbit timeout = rabbit_conf.kombu_missing_consumer_retry_timeout self.assertTrue(timeout <= dt < (timeout + 0.100), dt) self.assertEqual(len(senders), len(replies)) for i, reply in enumerate(replies): if self.timeout is not None or self.reply_failure_404: self.assertIsInstance(reply, oslo_messaging.MessagingTimeout) elif self.failure: self.assertIsInstance(reply, ZeroDivisionError) elif self.rx_id: self.assertEqual({'rx_id': order[i]}, reply) else: self.assertEqual(self.reply, reply) if not self.timeout and self.failure and not self.expected: self.assertTrue(len(errors) > 0, errors) else: self.assertEqual(0, len(errors), errors)
class TestClient(object): def __init__(self, transport, target): self.transport = transport self.target = target self._client = msg.RPCClient(self.transport, self.target) def test(self): cctxt = self._client.prepare(namespace='control', version='2.0') #call cctxt.call(ctxt={}, method='test', arg='Hello, my name is Kien') # Cast cctxt.cast(ctxt={}, method='test', arg='I am a beginer Python') # Authentication with msg.conf cfg.CONF(['--config-file', 'msg.conf']) #Create Messaging Transport transport = msg.get_transport(cfg.CONF) # Create target target = msg.Target(topic='kiennn') # Create RPC client client = TestClient(transport, target) # Call Function client.test()
#!/usr/bin/env python # coding: utf-8 import socket import logging as symlog from oslo_log import log as logging from oslo_log import helpers as log_helpers from oslo_config import cfg import oslo_messaging LOG = logging.getLogger(__name__) symlog.basicConfig(level=symlog.INFO) transport_url = 'rabbit://*****:*****@sidewinder.rmq.cloudamqp.com:5672/gjxdknsw' transport = oslo_messaging.get_transport(cfg.CONF, transport_url) target = oslo_messaging.Target(exchange="basic", topic="basic_agent", server=socket.gethostname()) client = oslo_messaging.RPCClient(transport, target) LOG.info(client.prepare(fanout=False).call({}, "func1", mtype="call")) LOG.info(client.prepare(fanout=False).cast({}, "func1", mtype="cast")) #c = client.prepare() #c.call(context, "func1", arg=1)
from mistral.api.controllers.v2 import execution from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import api as sql_db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.rpc import base as rpc_base from mistral.rpc import clients as rpc_clients from mistral.tests.unit.api import base from mistral.tests.unit import base as unit_base from mistral import utils from mistral.utils import rest_utils from mistral.workflow import states # This line is needed for correct initialization of messaging config. oslo_messaging.get_transport(cfg.CONF) WF_EX = models.WorkflowExecution( id='123e4567-e89b-12d3-a456-426655440000', workflow_name='some', workflow_id='123e4567-e89b-12d3-a456-426655441111', description='execution description.', spec={'name': 'some'}, state=states.RUNNING, state_info=None, input={'foo': 'bar'}, output={}, params={'env': { 'k1': 'abc' }}, created_at=datetime.datetime(1970, 1, 1),
def test_driver_load(self): transport = oslo_messaging.get_transport(self.conf) self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver)