def assert_compute_node_has_enough_disk(self, context, instance_ref, dest, disk_over_commit): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param disk_over_commit: if True, consider real(not virtual) disk size. """ # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. # Refresh compute_nodes table topic = db.queue_get_for(context, FLAGS.compute_topic, dest) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting total available disk of host available_gb = self._get_compute_info(context, dest, 'disk_available_least') available = available_gb * (1024 ** 3) # Getting necessary disk size try: topic = db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) ret = rpc.call(context, topic, {"method": 'get_instance_disk_info', "args": {'instance_name': instance_ref.name}}) disk_infos = utils.loads(ret) except rpc.RemoteError: LOG.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Unable to migrate %(instance_id)s to %(dest)s: " "Lack of disk(host:%(available)s " "<= instance:%(necessary)s)") raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. At first, dest host creates temp file, and src host can see it if they mounts same shared storage. Then src host erase it. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ src = instance_ref['host'] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) filename = rpc.call(context, dst_t, {"method": 'create_shared_storage_test_file'}) try: # make sure existence at src host. ret = rpc.call(context, src_t, {"method": 'check_shared_storage_test_file', "args": {'filename': filename}}) finally: rpc.cast(context, dst_t, {"method": 'cleanup_shared_storage_test_file', "args": {'filename': filename}}) return ret
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = datetime.datetime.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), {"method": 'check_shared_storage_test_file', "args": {'filename': fpath}}) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), {"method": 'cleanup_shared_storage_test_file', "args": {'filename': fpath}}) self.mox.ReplayAll() try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, dest) except exception.Invalid, e: c = (e.message.find('does not exist') >= 0)
def test_live_migration_common_check_service_orig_not_exists(self): """Destination host does not exist.""" dest = 'dummydest' # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) # mocks for mounted_on_same_shared_storage() fpath = '/test/20110127120000' self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), {"method": 'check_shared_storage_test_file', "args": {'filename': fpath}}) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, dest), {"method": 'cleanup_shared_storage_test_file', "args": {'filename': fpath}}) self.mox.ReplayAll() #self.assertRaises(exception.SourceHostUnavailable, self.assertRaises(exception.FileNotFound, self.scheduler.driver._live_migration_common_check, self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id'])
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest, disk_over_commit): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param disk_over_commit: if True, consider real(not virtual) disk size. """ # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. # Refresh compute_nodes table topic = db.queue_get_for(context, FLAGS.compute_topic, dest) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting total available disk of host available_gb = self._get_compute_info(context, dest, 'disk_available_least') available = available_gb * (1024 ** 3) # Getting necessary disk size try: topic = db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) ret = rpc.call(context, topic, {"method": 'get_instance_disk_info', "args": {'instance_name': instance_ref['name']}}) disk_infos = utils.loads(ret) except rpc.RemoteError: LOG.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Unable to migrate %(instance_id)s to %(dest)s: " "Lack of disk(host:%(available)s " "<= instance:%(necessary)s)") raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. At first, dest host creates temp file, and src host can see it if they mounts same shared storage. Then src host erase it. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ src = instance_ref["host"] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) filename = None try: # create tmpfile at dest host filename = rpc.call(context, dst_t, {"method": "create_shared_storage_test_file"}) # make sure existence at src host. ret = rpc.call(context, src_t, {"method": "check_shared_storage_test_file", "args": {"filename": filename}}) if not ret: raise exception.FileNotFound(file_path=filename) except exception.FileNotFound: raise finally: # Should only be None for tests? if filename is not None: rpc.call(context, dst_t, {"method": "cleanup_shared_storage_test_file", "args": {"filename": filename}})
def test_block_migration_dest_check_service_lack_disk(self): """Confirms exception raises when dest doesn't have enough disk.""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') self.mox.StubOutWithMock(utils, 'service_is_up') self.mox.StubOutWithMock(self.driver, 'assert_compute_node_has_enough_memory') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) db.service_get_all_compute_by_host(self.context, dest).AndReturn(['fake_service3']) utils.service_is_up('fake_service3').AndReturn(True) # Enough memory self.driver.assert_compute_node_has_enough_memory(self.context, instance, dest) # Not enough disk db.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') rpc.call(self.context, 'dest_queue', {'method': 'update_available_resource'}) self.driver._get_compute_info(self.context, dest, 'disk_available_least').AndReturn(1023) db.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') rpc.call(self.context, 'src_queue', {'method': 'get_instance_disk_info', 'args': {'instance_name': instance['name']}}).AndReturn( json.dumps([{'disk_size': 1024 * (1024 ** 3)}])) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self): self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) db.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') db.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', {'method': 'create_shared_storage_test_file'} ).AndReturn(tmp_filename) rpc.call(self.context, 'src_queue', {'method': 'check_shared_storage_test_file', 'args': {'filename': tmp_filename}}).AndReturn(True) rpc.call(self.context, 'dest_queue', {'method': 'cleanup_shared_storage_test_file', 'args': {'filename': tmp_filename}}) db.service_get_all_compute_by_host(self.context, dest).AndReturn( [{'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}]) # different hypervisor type db.service_get_all_compute_by_host(self.context, instance['host']).AndReturn( [{'compute_node': [{'hypervisor_type': 'not-xen', 'hypervisor_version': 1}]}]) self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def _provision_volume(self, context, vol, vsa_id, availability_zone): if availability_zone is None: availability_zone = FLAGS.storage_availability_zone now = utils.utcnow() options = { 'size': vol['size'], 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': None, 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': vol['name'], 'display_description': vol['description'], 'volume_type_id': vol['volume_type_id'], 'metadata': dict(to_vsa_id=vsa_id), 'host': vol['host'], 'scheduled_at': now } size = vol['size'] host = vol['host'] name = vol['name'] LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ "host %(host)s"), locals()) volume_ref = db.volume_create(context, options) rpc.cast(context, db.queue_get_for(context, "volume", vol['host']), {"method": "create_volume", "args": {"volume_id": volume_ref['id'], "snapshot_id": None}})
def cast_to_network_host(context, host, method, update_db=False, **kwargs): """Cast request to a network host queue""" rpc.cast(context, db.queue_get_for(context, 'network', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
def test_live_migration_dest_raises_exception_no_volume(self): """Same as above test(input pattern is different) """ i_ref = self._get_dummy_instance() i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", "args": {'instance_id': i_ref['id']}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update( c, i_ref['id'], { 'state_description': 'running', 'state': power_state.RUNNING, 'host': i_ref['host'] }) self.compute.db = dbmock self.mox.ReplayAll() self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host'])
def test_live_migration_dest_raises_exception(self): """Confirm exception when pre_live_migration fails.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, FLAGS.volume_topic, { "method": "check_for_export", "args": { 'instance_id': i_ref['id'] } }) dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", "args": {'instance_id': i_ref['id']}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update( c, i_ref['id'], { 'state_description': 'running', 'state': power_state.RUNNING, 'host': i_ref['host'] }) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) self.compute.db = dbmock self.mox.ReplayAll() self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host'])
def test_live_migration_works_correctly_no_volume(self): """Confirm live_migration() works as expected correctly.""" i_ref = self._get_dummy_instance() i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, { "method": "pre_live_migration", "args": { 'instance_id': i_ref['id'] } }) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, self.compute.recover_live_migration) self.compute.db = dbmock self.mox.ReplayAll() ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None)
def test_live_migration_dest_raises_exception(self): """Confirm exception when pre_live_migration fails.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export", "args": {'instance_id': i_ref['id']}}) dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", "args": {'instance_id': i_ref['id']}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) self.compute.db = dbmock self.mox.ReplayAll() self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host'])
def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. At first, dest host creates temp file, and src host can see it if they mounts same shared storage. Then src host erase it. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ src = instance_ref['host'] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) filename = None try: # create tmpfile at dest host filename = rpc.call(context, dst_t, {"method": 'create_shared_storage_test_file'}) # make sure existence at src host. ret = rpc.call( context, src_t, { "method": 'check_shared_storage_test_file', "args": { 'filename': filename } }) if not ret: raise exception.FileNotFound(file_path=filename) except exception.FileNotFound: raise finally: # Should only be None for tests? if filename is not None: rpc.call( context, dst_t, { "method": 'cleanup_shared_storage_test_file', "args": { 'filename': filename } })
def test_cast_to_volume_host_update_db_without_volume_id(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'extra_arg': 'meow'} queue = 'fake_queue' self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') db.queue_get_for(self.context, 'volume', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_volume_host(self.context, host, method, update_db=True, **fake_kwargs)
def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. At first, dest host creates temp file, and src host can see it if they mounts same shared storage. Then src host erase it. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ src = instance_ref['host'] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) try: # create tmpfile at dest host filename = rpc.call(context, dst_t, {"method": 'create_shared_storage_test_file'}) # make sure existence at src host. rpc.call( context, src_t, { "method": 'check_shared_storage_test_file', "args": { 'filename': filename } }) except rpc.RemoteError: ipath = FLAGS.instances_path logging.error( _("Cannot confirm tmpfile at %(ipath)s is on " "same shared storage between %(src)s " "and %(dest)s.") % locals()) raise finally: rpc.call( context, dst_t, { "method": 'cleanup_shared_storage_test_file', "args": { 'filename': filename } })
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Update latest compute_node table topic = db.queue_get_for(context, FLAGS.compute_topic, host) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) # Getting total available/used resource compute_ref = compute_ref['compute_node'][0] resource = {'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used']} usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs if i['project_id'] == project_id] root = [i['root_gb'] for i in instance_refs if i['project_id'] == project_id] ephemeral = [i['ephemeral_gb'] for i in instance_refs if i['project_id'] == project_id] usage[project_id] = {'vcpus': sum(vcpus), 'memory_mb': sum(mem), 'root_gb': sum(root), 'ephemeral_gb': sum(ephemeral)} return {'resource': resource, 'usage': usage}
def test_cast_to_host_unknown_topic(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'extra_arg': 'meow'} topic = 'unknown' queue = 'fake_queue' self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') db.queue_get_for(self.context, topic, host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_host(self.context, topic, host, method, update_db=False, **fake_kwargs)
def _live_migration_common_check(self, context, instance_ref, dest): """Live migration common check routine. Below checkings are followed by http://wiki.libvirt.org/page/TodoPreMigrationChecks :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Checking shared storage connectivity self.mounted_on_same_shared_storage(context, instance_ref, dest) # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) dservice_ref = dservice_refs[0]['compute_node'][0] # Checking original host( where instance was launched at) exists. try: oservice_refs = db.service_get_all_compute_by_host(context, instance_ref['launched_on']) except exception.NotFound: raise exception.Invalid(_("host %s where instance was launched " "does not exist.") % instance_ref['launched_on']) oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] dest_hypervisor = dservice_ref['hypervisor_type'] if orig_hypervisor != dest_hypervisor: raise exception.Invalid(_("Different hypervisor type" "(%(orig_hypervisor)s->" "%(dest_hypervisor)s)')" % locals())) # Checkng hypervisor version. orig_hypervisor = oservice_ref['hypervisor_version'] dest_hypervisor = dservice_ref['hypervisor_version'] if orig_hypervisor > dest_hypervisor: raise exception.Invalid(_("Older hypervisor version" "(%(orig_hypervisor)s->" "%(dest_hypervisor)s)") % locals()) # Checking cpuinfo. try: rpc.call(context, db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": 'compare_cpu', "args": {'cpu_info': oservice_ref['cpu_info']}}) except rpc.RemoteError: src = instance_ref['host'] logging.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Update latest compute_node table topic = db.queue_get_for(context, FLAGS.compute_topic, host) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) # Getting total available/used resource compute_ref = compute_ref['compute_node'][0] resource = { 'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used'] } usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs \ if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs \ if i['project_id'] == project_id] disk = [i['local_gb'] for i in instance_refs \ if i['project_id'] == project_id] usage[project_id] = { 'vcpus': reduce(lambda x, y: x + y, vcpus), 'memory_mb': reduce(lambda x, y: x + y, mem), 'local_gb': reduce(lambda x, y: x + y, disk) } return {'resource': resource, 'usage': usage}
def _live_migration_common_check(self, context, instance_ref, dest): """Live migration common check routine. Below checkings are followed by http://wiki.libvirt.org/page/TodoPreMigrationChecks :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Checking shared storage connectivity self.mounted_on_same_shared_storage(context, instance_ref, dest) # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) dservice_ref = dservice_refs[0]['compute_node'][0] # Checking original host( where instance was launched at) exists. try: oservice_refs = db.service_get_all_compute_by_host( context, instance_ref['launched_on']) except exception.NotFound: raise exception.SourceHostUnavailable() oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] dest_hypervisor = dservice_ref['hypervisor_type'] if orig_hypervisor != dest_hypervisor: raise exception.InvalidHypervisorType() # Checkng hypervisor version. orig_hypervisor = oservice_ref['hypervisor_version'] dest_hypervisor = dservice_ref['hypervisor_version'] if orig_hypervisor > dest_hypervisor: raise exception.DestinationHypervisorTooOld() # Checking cpuinfo. try: rpc.call( context, db.queue_get_for(context, FLAGS.compute_topic, dest), { "method": 'compare_cpu', "args": { 'cpu_info': oservice_ref['cpu_info'] } }) except rpc.RemoteError: src = instance_ref['host'] logging.exception( _("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise
def cast_to_volume_host(context, host, method, update_db=True, **kwargs): """Cast request to a volume host queue""" if update_db: volume_id = kwargs.get("volume_id", None) if volume_id is not None: now = utils.utcnow() db.volume_update(context, volume_id, {"host": host, "scheduled_at": now}) rpc.cast(context, db.queue_get_for(context, "volume", host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
def test_live_migration_same_shared_storage_okay(self): """live migration works with same src and dest shared storage""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) db.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') db.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', {'method': 'create_shared_storage_test_file'} ).AndReturn(tmp_filename) rpc.call(self.context, 'src_queue', {'method': 'check_shared_storage_test_file', 'args': {'filename': tmp_filename}}).AndReturn(False) rpc.call(self.context, 'dest_queue', {'method': 'cleanup_shared_storage_test_file', 'args': {'filename': tmp_filename}}) self.mox.ReplayAll() # FIXME(comstud): See LP891756. self.assertRaises(exception.FileNotFound, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def cast_to_host(context, topic, host, method, update_db=True, **kwargs): """Generic cast to host""" topic_mapping = {"compute": cast_to_compute_host, "volume": cast_to_volume_host, "network": cast_to_network_host} func = topic_mapping.get(topic) if func: func(context, host, method, update_db=update_db, **kwargs) else: rpc.cast(context, db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") % locals())
def test_live_migration_different_shared_storage_raises(self): """Src and dest must have same shared storage for live migration""" self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() db.instance_get(self.context, instance['id']).AndReturn(instance) self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest, block_migration, disk_over_commit) db.queue_get_for(self.context, FLAGS.compute_topic, dest).AndReturn('dest_queue') db.queue_get_for(self.context, FLAGS.compute_topic, instance['host']).AndReturn('src_queue') tmp_filename = 'test-filename' rpc.call(self.context, 'dest_queue', {'method': 'create_shared_storage_test_file'} ).AndReturn(tmp_filename) rpc.call(self.context, 'src_queue', {'method': 'check_shared_storage_test_file', 'args': {'filename': tmp_filename}}).AndReturn(False) rpc.cast(self.context, 'dest_queue', {'method': 'cleanup_shared_storage_test_file', 'args': {'filename': tmp_filename}}) self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, self.driver.schedule_live_migration, self.context, instance_id=instance['id'], dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def cast_to_compute_host(context, host, method, update_db=True, **kwargs): """Cast request to a compute host queue""" if update_db: # fall back on the id if the uuid is not present instance_id = kwargs.get("instance_id", None) instance_uuid = kwargs.get("instance_uuid", instance_id) if instance_uuid is not None: now = utils.utcnow() db.instance_update(context, instance_uuid, {"host": host, "scheduled_at": now}) rpc.cast(context, db.queue_get_for(context, "compute", host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def cast_to_volume_host(context, host, method, update_db=True, **kwargs): """Cast request to a volume host queue""" if update_db: volume_id = kwargs.get('volume_id', None) if volume_id is not None: now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) rpc.cast(context, db.queue_get_for(context, 'volume', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
def cast_to_compute_host(context, host, method, update_db=True, **kwargs): """Cast request to a compute host queue""" if update_db: instance_id = kwargs.get('instance_id', None) if instance_id is not None: now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) rpc.cast(context, db.queue_get_for(context, 'compute', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def _schedule(self, method, context, topic, *args, **kwargs): """Tries to call schedule_* method on the driver to retrieve host. Falls back to schedule(context, topic) if method doesn't exist. """ driver_method = "schedule_%s" % method elevated = context.elevated() try: host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) except AttributeError: host = self.driver.schedule(elevated, topic, *args, **kwargs) rpc.cast(context, db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
def test_cast_to_volume_host_update_db_with_volume_id(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'volume_id': 31337, 'extra_arg': 'meow'} queue = 'fake_queue' self.mox.StubOutWithMock(utils, 'utcnow') self.mox.StubOutWithMock(db, 'volume_update') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') utils.utcnow().AndReturn('fake-now') db.volume_update(self.context, 31337, {'host': host, 'scheduled_at': 'fake-now'}) db.queue_get_for(self.context, 'volume', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_volume_host(self.context, host, method, update_db=True, **fake_kwargs)
def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. At first, dest host creates temp file, and src host can see it if they mounts same shared storage. Then src host erase it. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ src = instance_ref['host'] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) try: # create tmpfile at dest host filename = rpc.call(context, dst_t, {"method": 'create_shared_storage_test_file'}) # make sure existence at src host. rpc.call(context, src_t, {"method": 'check_shared_storage_test_file', "args": {'filename': filename}}) except rpc.RemoteError: ipath = FLAGS.instances_path logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " "same shared storage between %(src)s " "and %(dest)s.") % locals()) raise finally: rpc.call(context, dst_t, {"method": 'cleanup_shared_storage_test_file', "args": {'filename': filename}})
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest, disk_over_commit): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param disk_over_commit: if True, consider real(not virtual) disk size. """ # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. # Getting total available disk of host available_gb = self._get_compute_info(context, dest, "disk_available_least") available = available_gb * (1024 ** 3) # Getting necessary disk size topic = db.queue_get_for(context, FLAGS.compute_topic, instance_ref["host"]) ret = rpc.call( context, topic, {"method": "get_instance_disk_info", "args": {"instance_name": instance_ref["name"]}} ) disk_infos = utils.loads(ret) necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info["disk_size"]) else: for info in disk_infos: necessary += int(info["virt_disk_size"]) # Check that available disk > necessary disk if (available - necessary) < 0: instance_uuid = instance_ref["uuid"] reason = _( "Unable to migrate %(instance_uuid)s to %(dest)s: " "Lack of disk(host:%(available)s " "<= instance:%(necessary)s)" ) raise exception.MigrationError(reason=reason % locals())
def _provision_resource_locally(self, context, build_plan_item, request_spec, kwargs): """Create the requested resource in this Zone.""" host = build_plan_item["hostname"] base_options = request_spec["instance_properties"] # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, base_options, None, []) instance_id = instance["id"] kwargs["instance_id"] = instance_id rpc.cast(context, db.queue_get_for(context, "compute", host), {"method": "run_instance", "args": kwargs}) LOG.debug(_("Provisioning locally via compute node %(host)s") % locals())
def cast_to_compute_host(context, host, method, update_db=True, **kwargs): """Cast request to a compute host queue""" if update_db: # fall back on the id if the uuid is not present instance_id = kwargs.get('instance_id', None) instance_uuid = kwargs.get('instance_uuid', instance_id) if instance_uuid is not None: now = utils.utcnow() db.instance_update(context, instance_uuid, {'host': host, 'scheduled_at': now}) rpc.cast(context, db.queue_get_for(context, 'compute', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def cast_to_host(context, topic, host, method, update_db=True, **kwargs): """Generic cast to host""" topic_mapping = { "compute": cast_to_compute_host, "volume": cast_to_volume_host, 'network': cast_to_network_host} func = topic_mapping.get(topic) if func: func(context, host, method, update_db=update_db, **kwargs) else: rpc.cast(context, db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") % locals())
def forward_request(context, request_type, master, aggregate_id, slave_compute, slave_address, slave_uuid): """Casts add/remove requests to the pool master.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi # NOTE: password in clear is not great, but it'll do for now sender_url = swap_xapi_host(FLAGS.xenapi_connection_url, slave_address) rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, master), {"method": request_type, "args": {"aggregate_id": aggregate_id, "host": slave_compute, "url": sender_url, "user": FLAGS.xenapi_connection_username, "passwd": FLAGS.xenapi_connection_password, "compute_uuid": vm_utils.get_this_vm_uuid(), "xenhost_uuid": slave_uuid, }, })
def _schedule(self, method, context, topic, *args, **kwargs): """Tries to call schedule_* method on the driver to retrieve host. Falls back to schedule(context, topic) if method doesn't exist. """ driver_method = 'schedule_%s' % method elevated = context.elevated() try: host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) except AttributeError: host = self.driver.schedule(elevated, topic, *args, **kwargs) rpc.cast(context, db.queue_get_for(context, topic, host), { "method": method, "args": kwargs }) LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
def test_scheduler_live_migration_with_volume(self): """scheduler_live_migration() works correctly as expected. Also, checks instance state is changed from 'running' -> 'migrating'. """ instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, dic) # cannot check 2nd argument b/c the addresses of instance object # is different. driver_i = self.scheduler.driver nocare = mox.IgnoreArg() self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), { "method": 'live_migration', "args": kwargs }) self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, dest=i_ref['host']) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id'])
def test_scheduler_live_migration_with_volume(self): """scheduler_live_migration() works correctly as expected. Also, checks instance state is changed from 'running' -> 'migrating'. """ instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, dic) # cannot check 2nd argument b/c the addresses of instance object # is different. driver_i = self.scheduler.driver nocare = mox.IgnoreArg() self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'], False) driver_i._live_migration_common_check(nocare, nocare, i_ref['host'], False) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) kwargs = {'instance_id': instance_id, 'dest': i_ref['host'], 'block_migration': False} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), {"method": 'live_migration', "args": kwargs}) self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, dest=i_ref['host'], block_migration=False) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id'])
def _provision_resource_locally(self, context, build_plan_item, request_spec, kwargs): """Create the requested resource in this Zone.""" host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance( context, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id rpc.cast(context, db.queue_get_for(context, "compute", host), { "method": "run_instance", "args": kwargs }) LOG.debug( _("Provisioning locally via compute node %(host)s") % locals())
def test_live_migration_works_correctly_no_volume(self): """Confirm live_migration() works as expected correctly.""" i_ref = self._get_dummy_instance() i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.mox.StubOutWithMock(rpc, 'call') dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", "args": {'instance_id': i_ref['id']}}) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, self.compute.recover_live_migration) self.compute.db = dbmock self.mox.ReplayAll() ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None)
def test_scheduler_live_migration_with_volume(self): """scheduler_live_migration() works correctly as expected. Also, checks instance state is changed from 'running' -> 'migrating'. """ instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {"instance_id": instance_id, "size": 1} v_ref = db.volume_create(self.context, dic) # cannot check 2nd argument b/c the addresses of instance object # is different. driver_i = self.scheduler.driver nocare = mox.IgnoreArg() self.mox.StubOutWithMock(driver_i, "_live_migration_src_check") self.mox.StubOutWithMock(driver_i, "_live_migration_dest_check") self.mox.StubOutWithMock(driver_i, "_live_migration_common_check") driver_i._live_migration_src_check(nocare, nocare) driver_i._live_migration_dest_check(nocare, nocare, i_ref["host"]) driver_i._live_migration_common_check(nocare, nocare, i_ref["host"]) self.mox.StubOutWithMock(rpc, "cast", use_mock_anything=True) kwargs = {"instance_id": instance_id, "dest": i_ref["host"]} rpc.cast( self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref["host"]), {"method": "live_migration", "args": kwargs}, ) self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, dest=i_ref["host"]) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref["state_description"] == "migrating") db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref["id"])
def test_live_migration_dest_raises_exception_no_volume(self): """Same as above test(input pattern is different) """ i_ref = self._get_dummy_instance() i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", "args": {'instance_id': i_ref['id']}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, 'host': i_ref['host']}) self.compute.db = dbmock self.mox.ReplayAll() self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host'])
class SchedulerManager(manager.Manager): """Chooses a host to run instances on.""" def __init__(self, scheduler_driver=None, *args, **kwargs): self.zone_manager = zone_manager.ZoneManager() if not scheduler_driver: scheduler_driver = FLAGS.scheduler_driver self.driver = utils.import_object(scheduler_driver) self.driver.set_zone_manager(self.zone_manager) super(SchedulerManager, self).__init__(*args, **kwargs) def __getattr__(self, key): """Converts all method calls to use the schedule method""" return functools.partial(self._schedule, key) def periodic_tasks(self, context=None): """Poll child zones periodically to get status.""" self.zone_manager.ping(context) def get_host_list(self, context=None): """Get a list of hosts from the ZoneManager.""" return self.zone_manager.get_host_list() def get_zone_list(self, context=None): """Get a list of zones from the ZoneManager.""" return self.zone_manager.get_zone_list() def get_zone_capabilities(self, context=None): """Get the normalized set of capabilites for this zone.""" return self.zone_manager.get_zone_capabilities(context) def update_service_capabilities(self, context=None, service_name=None, host=None, capabilities=None): """Process a capability update from a service node.""" if not capability: capability = {} self.zone_manager.update_service_capabilities(service_name, host, capabilities) def select(self, context=None, *args, **kwargs): """Select a list of hosts best matching the provided specs.""" return self.driver.select(context, *args, **kwargs) def get_scheduler_rules(self, context=None, *args, **kwargs): """Ask the driver how requests should be made of it.""" return self.driver.get_scheduler_rules(context, *args, **kwargs) def _schedule(self, method, context, topic, *args, **kwargs): """Tries to call schedule_* method on the driver to retrieve host. Falls back to schedule(context, topic) if method doesn't exist. """ driver_method = 'schedule_%s' % method elevated = context.elevated() try: host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) except AttributeError, e: LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s." "Reverting to schedule()") % locals()) host = self.driver.schedule(elevated, topic, *args, **kwargs) if not host: LOG.debug(_("%(topic)s %(method)s handled in Scheduler") % locals()) return rpc.cast(context, db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals())
def _live_migration_common_check(self, context, instance_ref, dest, block_migration): """Live migration common check routine. Below checkings are followed by http://wiki.libvirt.org/page/TodoPreMigrationChecks :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity # if block migration, instances_paths should not be on shared storage. try: self.mounted_on_same_shared_storage(context, instance_ref, dest) if block_migration: reason = _("Block migration can not be used " "with shared storage.") raise exception.InvalidSharedStorage(reason=reason, path=dest) except exception.FileNotFound: if not block_migration: src = instance_ref['host'] ipath = FLAGS.instances_path LOG.error( _("Cannot confirm tmpfile at %(ipath)s is on " "same shared storage between %(src)s " "and %(dest)s.") % locals()) raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) dservice_ref = dservice_refs[0]['compute_node'][0] # Checking original host( where instance was launched at) exists. try: oservice_refs = db.service_get_all_compute_by_host( context, instance_ref['launched_on']) except exception.NotFound: raise exception.SourceHostUnavailable() oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] dest_hypervisor = dservice_ref['hypervisor_type'] if orig_hypervisor != dest_hypervisor: raise exception.InvalidHypervisorType() # Checkng hypervisor version. orig_hypervisor = oservice_ref['hypervisor_version'] dest_hypervisor = dservice_ref['hypervisor_version'] if orig_hypervisor > dest_hypervisor: raise exception.DestinationHypervisorTooOld() # Checking cpuinfo. try: rpc.call( context, db.queue_get_for(context, FLAGS.compute_topic, dest), { "method": 'compare_cpu', "args": { 'cpu_info': oservice_ref['cpu_info'] } }) except rpc.RemoteError: src = instance_ref['host'] LOG.exception( _("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise