def query(self, req, id, body): if not(id): raise webob.exc.HTTPNotFound() context = req.environ["nova.context"] authorize(context) # Extract the query arguments. args = body.get('args', {}) # Construct the query. kwargs = { 'method' : 'query', 'args' : args } parts = id.split(':', 1) if len(parts) == 2: instance = db.instance_get_by_uuid(context, parts[1]) kwargs['args']['target'] = instance['name'] queue = rpc.queue_get_for(context, FLAGS.canary_topic, parts[0]) else: queue = rpc.queue_get_for(context, FLAGS.canary_topic, id) try: # Send it along. result = rpc.call(context, queue, kwargs) except Exception, e: raise webob.exc.HTTPBadRequest(explanation=unicode(e))
def query(self, req, id, body): if not (id): raise webob.exc.HTTPNotFound() context = req.environ["nova.context"] authorize(context) # Extract the query arguments. args = body.get('args', {}) # Construct the query. kwargs = {'method': 'query', 'args': args} parts = id.split(':', 1) if len(parts) == 2: instance = db.instance_get_by_uuid(context, parts[1]) kwargs['args']['target'] = instance['name'] queue = rpc.queue_get_for(context, CONF.canary_topic, parts[0]) else: queue = rpc.queue_get_for(context, CONF.canary_topic, id) try: # Send it along. result = rpc.call(context, queue, kwargs) except Exception, e: raise webob.exc.HTTPBadRequest(explanation=unicode(e))
def test_block_migration_dest_check_service_lack_disk(self): """Confirms exception raises when dest doesn't have enough disk.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(self.driver, 'assert_compute_node_has_enough_memory') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) # Enough memory self.driver.assert_compute_node_has_enough_memory(self.context, instance, dest) # Not enough disk self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1023) rpc.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') instance_disk_info_msg = { 'method': 'get_instance_disk_info', 'args': { 'instance_name': instance['name'], }, 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION, } instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}] rpc.call(self.context, 'src_queue', instance_disk_info_msg, None).AndReturn(jsonutils.dumps(instance_disk_info)) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_cast_to_compute_host_update_db_without_instance_uuid(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_compute_host(self.context, host, method, **fake_kwargs)
def test_cast_to_network_host(self): host = "fake_host1" method = "fake_method" fake_kwargs = {"extra_arg": "meow"} queue = "fake_queue" self.mox.StubOutWithMock(rpc, "queue_get_for") self.mox.StubOutWithMock(rpc, "cast") rpc.queue_get_for(self.context, "network", host).AndReturn(queue) rpc.cast(self.context, queue, {"method": method, "args": fake_kwargs}) self.mox.ReplayAll() driver.cast_to_network_host(self.context, host, method, **fake_kwargs)
def test_cast_to_compute_host_update_db_without_instance_uuid(self): host = "fake_host1" method = "fake_method" fake_kwargs = {"extra_arg": "meow"} queue = "fake_queue" self.mox.StubOutWithMock(rpc, "queue_get_for") self.mox.StubOutWithMock(rpc, "cast") rpc.queue_get_for(self.context, "compute", host).AndReturn(queue) rpc.cast(self.context, queue, {"method": method, "args": fake_kwargs}) self.mox.ReplayAll() driver.cast_to_compute_host(self.context, host, method, **fake_kwargs)
def test_cast_to_network_host(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') rpc.queue_get_for(self.context, 'network', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_network_host(self.context, host, method, **fake_kwargs)
def migrate_instance_start(self, ctxt, instance_uuid, floating_addresses, host): return self.call(ctxt, self.make_msg('migrate_instance_start', instance_uuid=instance_uuid, floating_addresses=floating_addresses), topic=rpc.queue_get_for(ctxt, self.topic, host), version='1.1')
def associate_floating_ip(self, context, instance, floating_address, fixed_address, affect_auto_assigned=False): """Associates a floating ip with a fixed ip. ensures floating ip is allocated to the project in context """ host = instance.get('host') if host: topic = rpc.queue_get_for(context, FLAGS.network_topic, host) else: topic = FLAGS.network_topic orig_instance_uuid = rpc.call(context, topic, {'method': 'associate_floating_ip', 'args': {'floating_address': floating_address, 'fixed_address': fixed_address, 'affect_auto_assigned': affect_auto_assigned}}) if orig_instance_uuid: msg_dict = dict(address=floating_address, instance_id=orig_instance_uuid) LOG.info(_('re-assign floating IP %(address)s from ' 'instance %(instance_id)s') % msg_dict) orig_instance = self.db.instance_get_by_uuid(context, orig_instance_uuid) # purge cached nw info for the original instance update_instance_cache_with_nw_info(self, context, orig_instance)
def delete_console(self, context, instance_id, console_uuid): instance_id = self._translate_uuid_if_necessary(context, instance_uuid) console = self.db.console_get(context, console_id, instance_uuid) topic = rpc.queue_get_for(context, FLAGS.console_topic, pool['host']) rpcapi = console_rpcapi.ConsoleAPI(topic=topic) rpcapi.remove_console(context, console['id'])
def rpc_setup_network_on_host(self, ctxt, network_id, teardown, host): # NOTE(tr3buchet): the call is just to wait for completion return self.call( ctxt, self.make_msg("rpc_setup_network_on_host", network_id=network_id, teardown=teardown), topic=rpc.queue_get_for(ctxt, self.topic, host), )
def _create_snapshot(self, context, volume, name, description, force=False): check_policy(context, 'create_snapshot', volume) if ((not force) and (volume['status'] != "available")): msg = _("must be available") raise exception.InvalidVolume(reason=msg) options = { 'volume_id': volume['id'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description} snapshot = self.db.snapshot_create(context, options) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "create_snapshot", "args": {"volume_id": volume['id'], "snapshot_id": snapshot['id']}}) return snapshot
def delete(self, context, volume, force=False): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reservations = QUOTAS.reserve(context, volumes=-1, gigabytes=-volume['size']) except Exception: reservations = None LOG.exception(_("Failed to update quota for deleting volume.")) self.db.volume_destroy(context, volume_id) if reservations: QUOTAS.commit(context, reservations) return if not force and volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = timeutils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume_id}})
def _disassociate_floating_ip(self, ctxt, address, interface, host, instance_uuid=None): return self.call(ctxt, self.make_msg('_disassociate_floating_ip', address=address, interface=interface, instance_uuid=instance_uuid), topic=rpc.queue_get_for(ctxt, self.topic, host), version='1.6')
def _cast_create_volume(self, context, volume_id, snapshot_id, reservations): # NOTE(Rongze Zhu): It is a simple solution for bug 1008866 # If snapshot_id is set, make the call create volume directly to # the volume host where the snapshot resides instead of passing it # through the scheduer. So snapshot can be copy to new volume. if snapshot_id and FLAGS.snapshot_same_host: snapshot_ref = self.db.snapshot_get(context, snapshot_id) src_volume_ref = self.db.volume_get(context, snapshot_ref['volume_id']) topic = rpc.queue_get_for(context, FLAGS.volume_topic, src_volume_ref['host']) rpc.cast(context, topic, {"method": "create_volume", "args": {"volume_id": volume_id, "snapshot_id": snapshot_id}}) else: rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, "volume_id": volume_id, "snapshot_id": snapshot_id, "reservations": reservations}})
def host_schedule(rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties): instance_uuid = base_options.get('uuid') now = utils.utcnow() self.db.instance_update(context, instance_uuid, { 'host': target_host, 'scheduled_at': now }) rpc.cast( context, rpc.queue_get_for(context, CONF.compute_topic, target_host), { "method": "run_instance", "args": { "instance_uuid": instance_uuid, "availability_zone": availability_zone, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks } }) # Instance was already created before calling scheduler return self.get(context, instance_uuid)
def host_schedule(rpc_method, context, base_options, instance_type, availability_zone, injected_files, admin_password, image, num_instances, requested_networks, block_device_mapping, security_group, filter_properties): instance_uuid = base_options.get('uuid') now = utils.utcnow() self.db.instance_update(context, instance_uuid, {'host': target_host, 'scheduled_at': now}) rpc.cast(context, rpc.queue_get_for(context, CONF.compute_topic, target_host), {"method": "run_instance", "args": {"instance_uuid": instance_uuid, "availability_zone": availability_zone, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks}}) # Instance was already created before calling scheduler return self.get(context, instance_uuid)
def _create_snapshot(self, context, volume, name, description, force=False): check_policy(context, 'create_snapshot', volume) if ((not force) and (volume['status'] != "available")): msg = _("must be available") raise exception.InvalidVolume(reason=msg) options = { 'volume_id': volume['id'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description } snapshot = self.db.snapshot_create(context, options) host = volume['host'] rpc.cast( context, rpc.queue_get_for(context, FLAGS.volume_topic, host), { "method": "create_snapshot", "args": { "volume_id": volume['id'], "snapshot_id": snapshot['id'] } }) return snapshot
def migrate_instance_finish(self, ctxt, instance_uuid, floating_addresses, dest): return self.call(ctxt, self.make_msg('migrate_instance_finish', instance_uuid=instance_uuid, floating_addresses=floating_addresses, host=dest), topic=rpc.queue_get_for(ctxt, self.topic, dest))
def cast_to_network_host(context, host, method, **kwargs): """Cast request to a network host queue""" rpc.cast(context, rpc.queue_get_for(context, 'network', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
def allocate_for_instance(self, ctxt, instance_id, project_id, host, rxtx_factor, vpn, requested_networks, macs=None, dhcp_options=None): if CONF.multi_host: topic = rpc.queue_get_for(ctxt, self.topic, host) else: topic = None return self.call(ctxt, self.make_msg('allocate_for_instance', instance_id=instance_id, project_id=project_id, host=host, rxtx_factor=rxtx_factor, vpn=vpn, requested_networks=requested_networks, macs=jsonutils.to_primitive(macs)), topic=topic, version='1.9')
def _cast_create_volume(self, context, volume_id, snapshot_id, reservations): # NOTE(Rongze Zhu): It is a simple solution for bug 1008866 # If snapshot_id is set, make the call create volume directly to # the volume host where the snapshot resides instead of passing it # through the scheduer. So snapshot can be copy to new volume. if snapshot_id and FLAGS.snapshot_same_host: snapshot_ref = self.db.snapshot_get(context, snapshot_id) src_volume_ref = self.db.volume_get(context, snapshot_ref['volume_id']) topic = rpc.queue_get_for(context, FLAGS.volume_topic, src_volume_ref['host']) rpc.cast( context, topic, { "method": "create_volume", "args": { "volume_id": volume_id, "snapshot_id": snapshot_id } }) else: self.scheduler_rpcapi.create_volume(context, volume_id, snapshot_id, reservations)
def delete(self, context, volume): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it self.db.volume_destroy(context, volume_id) return if volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = timeutils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), { "method": "delete_volume", "args": { "volume_id": volume_id } })
def rpc_setup_network_on_host(self, ctxt, network_id, teardown, host): # NOTE(tr3buchet): the call is just to wait for completion return self.call(ctxt, self.make_msg('rpc_setup_network_on_host', network_id=network_id, teardown=teardown), topic=rpc.queue_get_for(ctxt, self.topic, host))
def initialize_connection(self, context, volume, connector): host = volume['host'] queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "initialize_connection", "args": {"volume_id": volume['id'], "connector": connector}})
def _cast_gridcentric_message(self, method, context, instance_uuid, host=None, params=None): """Generic handler for RPC casts to gridcentric. This does not block for a response. :param params: Optional dictionary of arguments to be passed to the gridcentric worker :returns: None """ if not params: params = {} if not host: instance = self.get(context, instance_uuid) host = instance['host'] if not host: queue = CONF.gridcentric_topic else: queue = rpc.queue_get_for(context, CONF.gridcentric_topic, host) params['instance_uuid'] = instance_uuid kwargs = {'method': method, 'args': params} rpc.cast(context, queue, kwargs)
def _confirm_service_failure(self, ctxt, service): """ confirm wether service is failure by send a rpc call to the service :param service: service info got from db :returns: False if service is ok, True if confirm service failed """ host = service['host'] service_topic = service['topic'] service_binary = service['binary'] try: topic = FLAGS.get('%s_topic' % service_topic, None) rpc_topic = rpc.queue_get_for(ctxt, topic, host) msg = {'method': 'service_version'} if service_topic == 'compute': msg['version'] = '2.0' service_version = rpc.call(ctxt, rpc_topic, msg) LOG.info(_('confirm service %(service_binary)s on %(host)s normal,' ' version : %(service_version)s!'), locals()) return False except Exception as ex: LOG.info(_('confirm service %(service_binary)s on %(host)s ' 'abnormal!'), locals()) return True
def copy_volume_to_image(self, context, volume, metadata, force): """Create a new image from the specified volume.""" self._check_volume_availability(context, volume, force) recv_metadata = self.image_service.create(context, metadata) self.update(context, volume, {'status': 'uploading'}) rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, volume['host']), {"method": "copy_volume_to_image", "args": {"volume_id": volume['id'], "image_id": recv_metadata['id']}}) response = {"id": volume['id'], "updated_at": volume['updated_at'], "status": 'uploading', "display_description": volume['display_description'], "size": volume['size'], "volume_type": volume['volume_type'], "image_id": recv_metadata['id'], "container_format": recv_metadata['container_format'], "disk_format": recv_metadata['disk_format'], "image_name": recv_metadata.get('name', None) } return response
def _test_network_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = network_rpcapi.NetworkAPI() expected_retval = 'foo' if method == 'call' else None expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) expected_topic = CONF.network_topic expected_msg = rpcapi.make_msg(method, **kwargs) if 'source_compute' in expected_msg['args']: # Fix up for migrate_instance_* calls. args = expected_msg['args'] args['source'] = args.pop('source_compute') args['dest'] = args.pop('dest_compute') targeted_methods = [ 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host', '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns', '_associate_floating_ip', '_disassociate_floating_ip', 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start', 'migrate_instance_finish', 'allocate_for_instance', 'deallocate_for_instance', ] if method in targeted_methods and 'host' in kwargs: if method not in ['allocate_for_instance', 'deallocate_for_instance', 'deallocate_fixed_ip']: del expected_msg['args']['host'] host = kwargs['host'] if CONF.multi_host: expected_topic = rpc.queue_get_for(ctxt, CONF.network_topic, host) expected_msg['version'] = expected_version self.fake_args = None self.fake_kwargs = None def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.stubs.Set(rpc, rpc_method, _fake_rpc_method) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) expected_args = [ctxt, expected_topic, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): try: self.assertEqual(arg, expected_arg) except AssertionError: # actual_args may contain optional args, like the one that # have default values; therefore if arg and excepted_arg # do not match verify at least that the required ones do if isinstance(arg, dict) and isinstance(expected_arg, dict): actual_args = arg.get('args') required_args = expected_arg.get('args') if actual_args and required_args: self.assertThat(required_args, matchers.IsSubDictOf(actual_args))
def attach(self, context, volume, instance_uuid, mountpoint): host = volume['host'] queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "attach_volume", "args": {"volume_id": volume['id'], "instance_uuid": instance_uuid, "mountpoint": mountpoint}})
def terminate_connection(self, context, volume, connector): self.unreserve_volume(context, volume) host = volume['host'] queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "terminate_connection", "args": {"volume_id": volume['id'], "connector": connector}})
def _associate_floating_ip(self, ctxt, floating_address, fixed_address, interface, host): return self.call(ctxt, self.make_msg('_associate_floating_ip', floating_address=floating_address, fixed_address=fixed_address, interface=interface), topic=rpc.queue_get_for(ctxt, self.topic, host))
def deallocate_for_instance(self, ctxt, instance_id, project_id, host): if CONF.multi_host: topic = rpc.queue_get_for(ctxt, self.topic, host) else: topic = None return self.call(ctxt, self.make_msg('deallocate_for_instance', instance_id=instance_id, project_id=project_id, host=host), topic=topic)
def _rpc_allocate_fixed_ip(self, ctxt, instance_id, network_id, address, vpn, host): return self.call( ctxt, self.make_msg( "_rpc_allocate_fixed_ip", instance_id=instance_id, network_id=network_id, address=address, vpn=vpn ), topic=rpc.queue_get_for(ctxt, self.topic, host), )
def _test_network_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = network_rpcapi.NetworkAPI() expected_retval = 'foo' if method == 'call' else None expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION) expected_topic = CONF.network_topic expected_msg = rpcapi.make_msg(method, **kwargs) if 'source_compute' in expected_msg['args']: # Fix up for migrate_instance_* calls. args = expected_msg['args'] args['source'] = args.pop('source_compute') args['dest'] = args.pop('dest_compute') targeted_methods = [ 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host', '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns', '_associate_floating_ip', '_disassociate_floating_ip', 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start', 'migrate_instance_finish', 'get_backdoor_port', 'allocate_for_instance', 'deallocate_for_instance', ] if method in targeted_methods and 'host' in kwargs: if method not in [ 'allocate_for_instance', 'deallocate_for_instance', 'deallocate_fixed_ip' ]: del expected_msg['args']['host'] host = kwargs['host'] if CONF.multi_host: expected_topic = rpc.queue_get_for(ctxt, CONF.network_topic, host) expected_msg['version'] = expected_version self.fake_args = None self.fake_kwargs = None def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.stubs.Set(rpc, rpc_method, _fake_rpc_method) retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) expected_args = [ctxt, expected_topic, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(arg, expected_arg)
def test_cast_to_compute_host_update_db_with_instance_uuid(self): host = "fake_host1" method = "fake_method" fake_kwargs = {"instance_uuid": "fake_uuid", "extra_arg": "meow"} queue = "fake_queue" self.mox.StubOutWithMock(timeutils, "utcnow") self.mox.StubOutWithMock(db, "instance_update") self.mox.StubOutWithMock(rpc, "queue_get_for") self.mox.StubOutWithMock(rpc, "cast") timeutils.utcnow().AndReturn("fake-now") db.instance_update(self.context, "fake_uuid", {"host": None, "scheduled_at": "fake-now"}) rpc.queue_get_for(self.context, "compute", host).AndReturn(queue) rpc.cast(self.context, queue, {"method": method, "args": fake_kwargs}) self.mox.ReplayAll() driver.cast_to_compute_host(self.context, host, method, **fake_kwargs)
def test_cast_to_host_unknown_topic(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'extra_arg': 'meow'} topic = 'unknown' queue = 'fake_queue' self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') rpc.queue_get_for(self.context, topic, host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_host(self.context, topic, host, method, update_db=False, **fake_kwargs)
def cast_to_volume_host(context, host, method, **kwargs): """Cast request to a volume host queue""" volume_id = kwargs.get("volume_id", None) if volume_id is not None: now = timeutils.utcnow() db.volume_update(context, volume_id, {"host": host, "scheduled_at": now}) rpc.cast(context, rpc.queue_get_for(context, "volume", host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
def _rpc_allocate_fixed_ip(self, ctxt, instance_id, network_id, address, vpn, host): return self.call(ctxt, self.make_msg('_rpc_allocate_fixed_ip', instance_id=instance_id, network_id=network_id, address=address, vpn=vpn), topic=rpc.queue_get_for(ctxt, self.topic, host))
def cast_to_compute_host(context, host, method, **kwargs): """Cast request to a compute host queue""" instance_uuid = kwargs.get("instance_uuid", None) if instance_uuid: instance_update_db(context, instance_uuid, host) rpc.cast(context, rpc.queue_get_for(context, "compute", host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def detach(self, context, volume): host = volume['host'] queue = rpc.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, { "method": "detach_volume", "args": { "volume_id": volume['id'] } })
def deallocate_for_instance(self, ctxt, instance_id, project_id, host, requested_networks=None): if CONF.multi_host: topic = rpc.queue_get_for(ctxt, self.topic, host) else: topic = None return self.call(ctxt, self.make_msg('deallocate_for_instance', instance_id=instance_id, project_id=project_id, host=host, requested_networks=requested_networks), topic=topic)
def test_live_migration_dest_host_incompatable_cpu_raises(self): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) self._check_shared_storage(dest, instance, True) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}]) rpc.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') rpc.call(self.context, 'dest_queue', {'method': 'compare_cpu', 'args': {'cpu_info': 'fake_cpu_info'}, 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, None).AndRaise(rpc_common.RemoteError()) self.mox.ReplayAll() self.assertRaises(rpc_common.RemoteError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration)
def delete_network(self, context, fixed_range, uuid): """Lookup network by uuid, delete both the IPAM subnet and the corresponding Quantum network. The fixed_range parameter is kept here for interface compatibility but is not used. """ net_ref = db.network_get_by_uuid(context.elevated(), uuid) project_id = net_ref['project_id'] q_tenant_id = project_id or FLAGS.quantum_default_tenant_id net_uuid = net_ref['uuid'] # Check for any attached ports on the network and fail the deletion if # there is anything but the gateway port attached. If it is only the # gateway port, unattach and delete it. ports = self.q_conn.get_attached_ports(q_tenant_id, net_uuid) num_ports = len(ports) gw_interface_id = self.driver.get_dev(net_ref) gw_port_uuid = None if gw_interface_id is not None: gw_port_uuid = self.q_conn.get_port_by_attachment( q_tenant_id, net_uuid, gw_interface_id) if gw_port_uuid: num_ports -= 1 if num_ports > 0: raise exception.NetworkBusy(network=net_uuid) # only delete gw ports if we are going to finish deleting network if gw_port_uuid: self.q_conn.detach_and_delete_port(q_tenant_id, net_uuid, gw_port_uuid) self.l3driver.remove_gateway(net_ref) # Now we can delete the network self.q_conn.delete_network(q_tenant_id, net_uuid) LOG.debug("Deleting network %s for tenant: %s" % (net_uuid, q_tenant_id)) self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id) # Get rid of dnsmasq if FLAGS.quantum_use_dhcp: if net_ref['host'] == self.host: self.kill_dhcp(net_ref) else: topic = rpc.queue_get_for(context, FLAGS.network_topic, net_ref['host']) rpc.call(context, topic, { 'method': 'kill_dhcp', 'args': { 'net_ref': net_ref } })
def cast_to_compute_host(context, host, method, **kwargs): """Cast request to a compute host queue""" instance_uuid = kwargs.get('instance_uuid', None) if instance_uuid: instance_update_db(context, instance_uuid, host) rpc.cast(context, rpc.queue_get_for(context, 'compute', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def delete_snapshot(self, context, snapshot): if snapshot['status'] not in ["available", "error"]: msg = _("Volume Snapshot status must be available or error") raise exception.InvalidVolume(reason=msg) self.db.snapshot_update(context, snapshot['id'], {'status': 'deleting'}) volume = self.db.volume_get(context, snapshot['volume_id']) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_snapshot", "args": {"snapshot_id": snapshot['id']}})
def test_cast_to_compute_host_update_db_with_instance_uuid(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'instance_uuid': 'fake_uuid', 'extra_arg': 'meow'} queue = 'fake_queue' self.mox.StubOutWithMock(timeutils, 'utcnow') self.mox.StubOutWithMock(db, 'instance_update') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') timeutils.utcnow().AndReturn('fake-now') db.instance_update(self.context, 'fake_uuid', { 'host': None, 'scheduled_at': 'fake-now' }) rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_compute_host(self.context, host, method, **fake_kwargs)
def cast_to_volume_host(context, host, method, **kwargs): """Cast request to a volume host queue""" volume_id = kwargs.get('volume_id', None) if volume_id is not None: now = timeutils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) rpc.cast(context, rpc.queue_get_for(context, 'volume', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
def cast_to_host(context, topic, host, method, **kwargs): """Generic cast to host""" topic_mapping = {CONF.compute_topic: cast_to_compute_host} func = topic_mapping.get(topic) if func: cast_to_compute_host(context, host, method, **kwargs) else: rpc.cast(context, rpc.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") % locals())