def test_sending_rpc_call_to_nova(connection): class Proxy(object): def testmethod(self, context, foo): return {'foo': foo, } novaconn = rpc.create_connection() novaconn.create_consumer('test', Proxy()) gt = eventlet.spawn(novaconn.consume) try: eventlet.sleep(0) with connection as newconn: resp = sending.send_rpc( newconn, context.get_admin_context(), exchange=flags.FLAGS.control_exchange, topic='test', method='testmethod', args={'foo': 'bar', }, timeout=2) assert resp == {'foo': 'bar', } finally: gt.kill() novaconn.close()
def test_sending_rpc_call_to_nova(connection): class Proxy(object): def testmethod(self, context, foo): return { 'foo': foo, } novaconn = rpc.create_connection() novaconn.create_consumer('test', Proxy()) gt = eventlet.spawn(novaconn.consume) try: eventlet.sleep(0) with connection as newconn: resp = sending.send_rpc(newconn, context.get_admin_context(), exchange=flags.FLAGS.control_exchange, topic='test', method='testmethod', args={ 'foo': 'bar', }, timeout=2) assert resp == { 'foo': 'bar', } finally: gt.kill() novaconn.close()
def setUp(self): super(RpcAMQPTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() self.consumer = rpc.create_consumer(self.conn, "test", self.receiver, False) self.consumer.attach_to_eventlet() self.context = context.get_admin_context()
def register_nova_listeners(self): self.tokens = {} class TopicProxy(): @staticmethod def authorize_vnc_console(context, **kwargs): print "Received a token: %s" % kwargs self.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} self.conn = rpc.create_connection(new=True) self.conn.create_consumer('vncproxy', TopicProxy) def delete_expired_tokens(): now = time.time() to_delete = [] for k, v in self.tokens.items(): if now - v['last_activity'] > FLAGS.vnc_proxy_idle_timeout: to_delete.append(k) for k in to_delete: del self.tokens[k] self.conn.consume_in_thread() utils.LoopingCall(delete_expired_tokens).start(1)
def test_nested_calls(self): """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): """Calls echo in the passed queue""" LOG.debug(_("Nested received %(queue)s, %(value)s") % locals()) # TODO: so, it will replay the context and use the same REQID? # that's bizarre. ret = rpc.call(context, queue, { "method": "echo", "args": { "value": value } }) LOG.debug(_("Nested return %s"), ret) return value nested = Nested() conn = rpc.create_connection(True) consumer = rpc.create_consumer(conn, 'nested', nested, False) consumer.attach_to_eventlet() value = 42 result = rpc.call(self.context, 'nested', { "method": "echo", "args": { "queue": "test", "value": value } }) self.assertEqual(value, result)
def test_raising_from_error_in_nova(connection): class Proxy(object): def testmethod(self, context, **kwargs): raise Exception('foo') novaconn = rpc.create_connection() novaconn.create_consumer('test', Proxy()) gt = eventlet.spawn(novaconn.consume) try: eventlet.sleep(0) with connection as newconn: with pytest.raises(exceptions.RemoteError): sending.send_rpc( newconn, context.get_admin_context(), exchange=flags.FLAGS.control_exchange, topic='test', method='testmethod', args={}, timeout=2) finally: gt.kill() novaconn.close()
def test_nested_calls(self): """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): """Calls echo in the passed queue""" LOG.debug(_("Nested received %(queue)s, %(value)s") % locals()) # TODO: so, it will replay the context and use the same REQID? # that's bizarre. ret = rpc.call(context, queue, {"method": "echo", "args": {"value": value}}) LOG.debug(_("Nested return %s"), ret) return value nested = Nested() conn = rpc.create_connection(True) consumer = rpc.create_consumer(conn, 'nested', nested, False) consumer.attach_to_eventlet() value = 42 result = rpc.call(self.context, 'nested', {"method": "echo", "args": {"queue": "test", "value": value}}) self.assertEqual(value, result)
def register_nova_listeners(self): self.tokens = {} class TopicProxy(): @staticmethod def authorize_vnc_console(context, **kwargs): print "Received a token: %s" % kwargs self.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} self.conn = rpc.create_connection(new=True) self.conn.create_consumer( 'vncproxy', TopicProxy) def delete_expired_tokens(): now = time.time() to_delete = [] for k, v in self.tokens.items(): if now - v['last_activity'] > FLAGS.vnc_proxy_idle_timeout: to_delete.append(k) for k in to_delete: del self.tokens[k] self.conn.consume_in_thread() utils.LoopingCall(delete_expired_tokens).start(1)
def setUp(self): super(RpcAMQPTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() self.consumer = rpc.create_consumer(self.conn, 'test', self.receiver, False) self.consumer.attach_to_eventlet() self.context = context.get_admin_context()
def main(): rpc.register_opts(FLAGS) FLAGS.register_opts([ cfg.StrOpt('datafile', default=None, help='Data file to read or write', ), cfg.BoolOpt('record', help='Record events', ), cfg.BoolOpt('replay', help='Replay events', ), ]) remaining_args = FLAGS(sys.argv) utils.monkey_patch() parser = argparse.ArgumentParser( description='record or play back notification events', ) parser.add_argument('mode', choices=('record', 'replay', 'monitor'), help='operating mode', ) parser.add_argument('data_file', default='msgs.dat', nargs='?', help='the data file to read or write', ) parser.add_argument('--topic', default='notifications.info', help='the exchange topic to listen for', ) args = parser.parse_args(remaining_args[1:]) console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) connection = rpc.create_connection() try: if args.mode == 'replay': with open(args.data_file, 'rb') as input: send_messages(connection, args.topic, input) elif args.mode == 'record': with open(args.data_file, 'wb') as output: record_messages(connection, args.topic, output) elif args.mode == 'monitor': monitor_messages(connection, args.topic) finally: connection.close() return 0
def start(self): vcs_string = version.version_string_with_vcs() logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), { 'topic': self.topic, 'vcs_string': vcs_string }) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers consumer_all = rpc.create_consumer(self.conn, self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) consumer_node = rpc.create_consumer(self.conn, node_topic, self, fanout=False) fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) consumers = [consumer_all, consumer_node, fanout] consumer_set = rpc.create_consumer_set(self.conn, consumers) # Wait forever, processing these consumers def _wait(): try: consumer_set.wait() finally: consumer_set.close() self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def test_rpc_consumer_isolation(self): class NeverCalled(object): def __getattribute__(*args): assert False, "I should never get called." connection = rpc.create_connection(new=True) proxy = NeverCalled() connection.create_consumer('compute', proxy, fanout=False) connection.consume_in_thread()
def start(self): vcs_string = version.version_string_with_vcs() logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers consumer_all = rpc.create_consumer(self.conn, self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) consumer_node = rpc.create_consumer(self.conn, node_topic, self, fanout=False) fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) consumers = [consumer_all, consumer_node, fanout] consumer_set = rpc.create_consumer_set(self.conn, consumers) # Wait forever, processing these consumers def _wait(): try: consumer_set.wait() finally: consumer_set.close() self.consumer_set_thread = eventlet.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def start(self): vcs_string = version.version_string_with_vcs() LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), { 'topic': self.topic, 'vcs_string': vcs_string }) utils.cleanup_file_locks() rpc.register_opts(FLAGS) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) LOG.debug( _("Creating Consumer connection for Service %s") % self.topic) rpc_dispatcher = self.manager.create_rpc_dispatcher() # Share this same connection for these Consumers self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic)
def start(self): vcs_string = version.version_string_with_vcs() LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) utils.cleanup_file_locks() rpc.register_opts(FLAGS) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) LOG.debug(_("Creating Consumer connection for Service %s") % self.topic) rpc_dispatcher = self.manager.create_rpc_dispatcher() # Share this same connection for these Consumers self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic)
def main(): rpc.register_opts(FLAGS) FLAGS.register_opts( [ cfg.StrOpt("datafile", default=None, help="Data file to read or write"), cfg.BoolOpt("record", help="Record events"), cfg.BoolOpt("replay", help="Replay events"), ] ) remaining_args = FLAGS(sys.argv) utils.monkey_patch() parser = argparse.ArgumentParser(description="record or play back notification events") parser.add_argument("mode", choices=("record", "replay", "monitor"), help="operating mode") parser.add_argument("data_file", default="msgs.dat", nargs="?", help="the data file to read or write") parser.add_argument("--topic", default="notifications.info", help="the exchange topic to listen for") args = parser.parse_args(remaining_args[1:]) console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter("%(message)s") console.setFormatter(formatter) root_logger = logging.getLogger("") root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) connection = rpc.create_connection() try: if args.mode == "replay": with open(args.data_file, "rb") as input: send_messages(connection, args.topic, input) elif args.mode == "record": with open(args.data_file, "wb") as output: record_messages(connection, args.topic, output) elif args.mode == "monitor": monitor_messages(connection, args.topic) finally: connection.close() return 0
def start(self): vcs_string = version.version_string_with_vcs() logging.audit( _("Starting %(topic)s node (version %(vcs_string)s)"), {"topic": self.topic, "vcs_string": vcs_string} ) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref["id"] except exception.NotFound: self._create_service_ref(ctxt) if "nova-compute" == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers self.conn.create_consumer(self.topic, self, fanout=False) node_topic = "%s.%s" % (self.topic, self.host) self.conn.create_consumer(node_topic, self, fanout=False) self.conn.create_consumer(self.topic, self, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def setUp(self): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') self.conn = rpc.create_connection() # set up our cloud self.api = admin.AdminController() # set up services self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) self.user_id = 'admin' self.project_id = 'admin' self.context = context.RequestContext(self.user_id, self.project_id, True) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'available'}} self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish rpc_cast = rpc.cast def finish_cast(*args, **kwargs): rpc_cast(*args, **kwargs) greenthread.sleep(0.2) self.stubs.Set(rpc, 'cast', finish_cast)
def test_raising_from_error_in_nova(connection): class Proxy(object): def testmethod(self, context, **kwargs): raise Exception('foo') novaconn = rpc.create_connection() novaconn.create_consumer('test', Proxy()) gt = eventlet.spawn(novaconn.consume) try: eventlet.sleep(0) with connection as newconn: with pytest.raises(exceptions.RemoteError): sending.send_rpc(newconn, context.get_admin_context(), exchange=flags.FLAGS.control_exchange, topic='test', method='testmethod', args={}, timeout=2) finally: gt.kill() novaconn.close()
def main(): rpc.register_opts(FLAGS) FLAGS.register_opts([ cfg.StrOpt( 'datafile', default=None, help='Data file to read or write', ), cfg.BoolOpt( 'record', help='Record events', ), cfg.BoolOpt( 'replay', help='Replay events', ), ]) remaining_args = FLAGS(sys.argv) utils.monkey_patch() parser = argparse.ArgumentParser( description='record or play back notification events', ) parser.add_argument( 'mode', choices=('record', 'replay', 'monitor'), help='operating mode', ) parser.add_argument( 'data_file', default='msgs.dat', nargs='?', help='the data file to read or write', ) parser.add_argument( '--topic', default='notifications.info', help='the exchange topic to listen for', ) args = parser.parse_args(remaining_args[1:]) console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) connection = rpc.create_connection() try: if args.mode == 'replay': with open(args.data_file, 'rb') as input: send_messages(connection, args.topic, input) elif args.mode == 'record': with open(args.data_file, 'wb') as output: record_messages(connection, args.topic, output) elif args.mode == 'monitor': monitor_messages(connection, args.topic) finally: connection.close() return 0