def test_live_migration_dest_hypervisor_version_older_raises(self): # Confirm live migration to older hypervisor raises. self.mox.StubOutWithMock(self.driver, "_live_migration_src_check") self.mox.StubOutWithMock(self.driver, "_live_migration_dest_check") self.mox.StubOutWithMock(rpc, "queue_get_for") self.mox.StubOutWithMock(rpc, "call") self.mox.StubOutWithMock(rpc, "cast") self.mox.StubOutWithMock(db, "service_get_by_compute_host") dest = "fake_host2" block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest).AndReturn(dest) db.service_get_by_compute_host(self.context, dest).AndReturn( {"compute_node": [{"hypervisor_type": "xen", "hypervisor_version": 1}]} ) db.service_get_by_compute_host(self.context, instance["host"]).AndReturn( {"compute_node": [{"hypervisor_type": "xen", "hypervisor_version": 2}]} ) self.mox.ReplayAll() self.assertRaises( exception.DestinationHypervisorTooOld, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit, )
def test_live_migration_dest_check_service_lack_memory(self): # Confirms exception raises when dest doesn't have enough memory. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn( {'memory_mb': 2048}) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=1024), dict(memory_mb=512)]) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_check_requested_destination(self): self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'check_can_live_migrate_destination') db.service_get_by_compute_host(self.context, self.destination).AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) hypervisor_details = { "hypervisor_type": "a", "hypervisor_version": 6.1, "free_ram_mb": 513 } self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.instance_host)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task.compute_rpcapi.check_can_live_migrate_destination( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit).AndReturn( "migrate_data") self.mox.ReplayAll() self.task._check_requested_destination() self.assertEqual("migrate_data", self.task.migrate_data)
def test_live_migration_compute_dest_not_alive(self): # Raise exception when dest compute node is not alive. self.mox.StubOutWithMock(self.driver, "_live_migration_src_check") self.mox.StubOutWithMock(db, "service_get_by_compute_host") self.mox.StubOutWithMock(servicegroup.API, "service_is_up") dest = "fake_host2" block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn("fake_service3") # Compute is down self.servicegroup_api.service_is_up("fake_service3").AndReturn(False) self.mox.ReplayAll() self.assertRaises( exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit, )
def test_check_instance_host_is_up_fails_if_not_found(self): self.mox.StubOutWithMock(db, "service_get_by_compute_host") db.service_get_by_compute_host(self.context, "host").AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host")
def test_check_requested_destination_fails_when_destination_is_up(self): self.mox.StubOutWithMock(db, "service_get_by_compute_host") db.service_get_by_compute_host(self.context, self.destination).AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_requested_destination)
def test_live_migration_compute_dest_not_alive(self): # Raise exception when dest compute node is not alive. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') # Compute is down self.servicegroup_api.service_is_up('fake_service3').AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self): # Test live migration when all checks pass. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = jsonutils.to_primitive(self._live_migration_instance()) # Source checks db.service_get_by_compute_host(self.context, instance['host']).AndReturn('fake_service2') self.servicegroup_api.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() db.service_get_by_compute_host(self.context, dest).AndReturn( {'compute_node': [{'memory_mb': 2048, 'hypervisor_version': 1}]}) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=256), dict(memory_mb=512)]) # Common checks (same hypervisor, etc) db.service_get_by_compute_host(self.context, dest).AndReturn( {'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}) db.service_get_by_compute_host(self.context, instance['host']).AndReturn( {'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info'}]}) rpc.call(self.context, "compute.fake_host2", {"method": 'check_can_live_migrate_destination', "args": {'instance': instance, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit}, "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, None).AndReturn({}) self.driver.compute_rpcapi.live_migration(self.context, host=instance['host'], instance=instance, dest=dest, block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def test_live_migration_compute_dest_not_exist(self): # Raise exception when dest compute node does not exist. self.mox.StubOutWithMock(self.driver, "_live_migration_src_check") self.mox.StubOutWithMock(db, "service_get_by_compute_host") dest = "fake_host2" block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) # Compute down db.service_get_by_compute_host(self.context, dest).AndRaise(exception.NotFound()) self.mox.ReplayAll() self.assertRaises( exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit, )
def test_check_requested_destination(self): self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.task, '_get_compute_info') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(self.task.compute_rpcapi, 'check_can_live_migrate_destination') db.service_get_by_compute_host(self.context, self.destination).AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) hypervisor_details = { "hypervisor_type": "a", "hypervisor_version": 6.1, "free_ram_mb": 513 } self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.instance_host)\ .AndReturn(hypervisor_details) self.task._get_compute_info(self.destination)\ .AndReturn(hypervisor_details) self.task.compute_rpcapi.check_can_live_migrate_destination( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit).AndReturn("migrate_data") self.mox.ReplayAll() self.task._check_requested_destination() self.assertEqual("migrate_data", self.task.migrate_data)
def test_live_migration_compute_src_not_exist(self): # Raise exception when src compute node is does not exist. self.mox.StubOutWithMock(servicegroup.API, "service_is_up") self.mox.StubOutWithMock(db, "service_get_by_compute_host") dest = "fake_host2" block_migration = False disk_over_commit = False instance = self._live_migration_instance() # Compute down db.service_get_by_compute_host(self.context, instance["host"]).AndRaise( exception.ComputeHostNotFound(host="fake") ) self.mox.ReplayAll() self.assertRaises( exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit, )
def test_live_migration_dest_hypervisor_version_older_raises(self): # Confirm live migration to older hypervisor raises. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest).AndReturn(dest) db.service_get_by_compute_host(self.context, dest).AndReturn( {'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 1}]}) db.service_get_by_compute_host(self.context, instance['host']).AndReturn( {'compute_node': [{'hypervisor_type': 'xen', 'hypervisor_version': 2}]}) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_check_instance_host_is_up(self): self.mox.StubOutWithMock(db, "service_get_by_compute_host") self.mox.StubOutWithMock(self.task.servicegroup_api, "service_is_up") db.service_get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) self.mox.ReplayAll() self.task._check_host_is_up("host")
def test_check_instance_host_is_up_fails_if_not_up(self): self.mox.StubOutWithMock(db, "service_get_by_compute_host") self.mox.StubOutWithMock(self.task.servicegroup_api, "service_is_up") db.service_get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host")
def test_check_instance_host_is_up_fails_if_not_found(self): self.mox.StubOutWithMock(db, 'service_get_by_compute_host') db.service_get_by_compute_host(self.context, "host").AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host")
def test_check_requested_destination_fails_when_destination_is_up(self): self.mox.StubOutWithMock(db, 'service_get_by_compute_host') db.service_get_by_compute_host( self.context, self.destination).AndRaise(exception.NotFound) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_requested_destination)
def test_check_instance_host_is_up(self): self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') db.service_get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(True) self.mox.ReplayAll() self.task._check_host_is_up("host")
def test_check_requested_destination_fails_with_not_enough_memory(self): self.mox.StubOutWithMock(self.task, "_check_host_is_up") self.mox.StubOutWithMock(db, "service_get_by_compute_host") self.task._check_host_is_up(self.destination) db.service_get_by_compute_host(self.context, self.destination).AndReturn( {"compute_node": [{"free_ram_mb": 511}]} ) self.mox.ReplayAll() self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination)
def test_check_instance_host_is_up_fails_if_not_up(self): self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up') db.service_get_by_compute_host(self.context, "host").AndReturn("service") self.task.servicegroup_api.service_is_up("service").AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host")
def test_live_migration_dest_check_service_memory_overcommit(self): instance = self._live_migration_instance() # Live-migration should work since default is to overcommit memory. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(self.driver, '_live_migration_common_check') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') dest = 'fake_host2' block_migration = False disk_over_commit = False self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn( {'memory_mb': 2048, 'free_disk_gb': 512, 'local_gb_used': 512, 'free_ram_mb': 512, 'local_gb': 1024, 'vcpus': 4, 'vcpus_used': 2, 'updated_at': None}) self.driver._live_migration_common_check(self.context, instance, dest) rpc.call(self.context, "compute.fake_host2", {"method": 'check_can_live_migrate_destination', "namespace": None, "args": {'instance': instance, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit}, "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}, None).AndReturn({}) self.driver.compute_rpcapi.live_migration(self.context, host=instance['host'], instance=instance, dest=dest, block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def test_describe_host(self): """ Makes sure that describe_host returns the correct information given our fake input. """ ctx = context.get_admin_context() self.mox.StubOutWithMock(db, 'service_get_by_compute_host') host_name = 'host_c1' db.service_get_by_compute_host(ctx, host_name).AndReturn( {'host': 'fake_host', 'compute_node': [ {'vcpus': 4, 'vcpus_used': 1, 'memory_mb': 8192, 'memory_mb_used': 2048, 'local_gb': 1024, 'local_gb_used': 648} ] }) self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.instance_get_all_by_host(ctx, 'fake_host').AndReturn( [{'project_id': 42, 'vcpus': 1, 'memory_mb': 2048, 'root_gb': 648, 'ephemeral_gb': 0, }]) self.mox.ReplayAll() result = self.api.describe_host(ctx, host_name) self.assertEqual(result, [{'resource': {'cpu': 4, 'disk_gb': 1024, 'host': 'host_c1', 'memory_mb': 8192, 'project': '(total)'}}, {'resource': {'cpu': 1, 'disk_gb': 648, 'host': 'host_c1', 'memory_mb': 2048, 'project': '(used_now)'}}, {'resource': {'cpu': 1, 'disk_gb': 648, 'host': 'host_c1', 'memory_mb': 2048, 'project': '(used_max)'}}, {'resource': {'cpu': 1, 'disk_gb': 648, 'host': 'host_c1', 'memory_mb': 2048, 'project': 42}}] ) self.mox.VerifyAll()
def test_check_requested_destination_fails_with_not_enough_memory(self): self.mox.StubOutWithMock(self.task, '_check_host_is_up') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.task._check_host_is_up(self.destination) db.service_get_by_compute_host(self.context, self.destination).AndReturn({ "compute_node": [{"free_ram_mb": 511}] }) self.mox.ReplayAll() self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination)
def test_show_host_resources(self): host = 'fake_host' compute_node = {'host': host, 'compute_node': [{'vcpus': 4, 'vcpus_used': 2, 'memory_mb': 1024, 'memory_mb_used': 512, 'local_gb': 1024, 'local_gb_used': 512}]} instances = [{'project_id': 'project1', 'vcpus': 1, 'memory_mb': 128, 'root_gb': 128, 'ephemeral_gb': 0}, {'project_id': 'project1', 'vcpus': 2, 'memory_mb': 256, 'root_gb': 384, 'ephemeral_gb': 0}, {'project_id': 'project2', 'vcpus': 2, 'memory_mb': 256, 'root_gb': 256, 'ephemeral_gb': 0}] self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.service_get_by_compute_host(self.context, host).AndReturn( compute_node) db.instance_get_all_by_host(self.context, host).AndReturn(instances) self.mox.ReplayAll() result = self.manager.show_host_resources(self.context, host) expected = {'usage': {'project1': {'memory_mb': 384, 'vcpus': 3, 'root_gb': 512, 'ephemeral_gb': 0}, 'project2': {'memory_mb': 256, 'vcpus': 2, 'root_gb': 256, 'ephemeral_gb': 0}}, 'resource': {'vcpus': 4, 'vcpus_used': 2, 'local_gb': 1024, 'local_gb_used': 512, 'memory_mb': 1024, 'memory_mb_used': 512}} self.assertThat(result, matchers.DictMatches(expected))
def test_live_migration_dest_check_service_lack_memory(self): # Confirms exception raises when dest doesn't have enough memory. # Flag needed to make FilterScheduler test hit memory limit since the # default for it is to allow memory overcommit by a factor of 1.5. self.flags(ram_allocation_ratio=1.0) self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn({ 'memory_mb': 2048, 'free_disk_gb': 512, 'local_gb_used': 512, 'free_ram_mb': 512, 'local_gb': 1024, 'vcpus': 4, 'vcpus_used': 2, 'updated_at': None }) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def _live_migration_dest_check(self, context, instance_ref, dest): """Live migration check routine (for destination host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Checking dest exists and compute node. dservice_ref = db.service_get_by_compute_host(context, dest) # Checking dest host is alive. if not self.servicegroup_api.service_is_up(dservice_ref): raise exception.ComputeServiceUnavailable(host=dest) # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: raise exception.UnableToMigrateToSelf( instance_id=instance_ref['uuid'], host=dest) # Check memory requirements self._assert_compute_node_has_enough_memory(context, instance_ref, dest)
def _live_migration_dest_check(self, context, instance_ref, dest): """Live migration check routine (for destination host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Checking dest exists and compute node. try: dservice_ref = db.service_get_by_compute_host(context, dest) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=dest) # Checking dest host is alive. if not self.servicegroup_api.service_is_up(dservice_ref): raise exception.ComputeServiceUnavailable(host=dest) # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: raise exception.UnableToMigrateToSelf( instance_id=instance_ref['uuid'], host=dest) # Check memory requirements self._assert_compute_node_has_enough_memory(context, instance_ref, dest)
def _check_host_is_up(self, host): try: service = db.service_get_by_compute_host(self.context, host) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=host) if not self.servicegroup_api.service_is_up(service): raise exception.ComputeServiceUnavailable(host=host)
def _get_compute_info(self, context, dest): """Get compute node's information :param context: security context :param dest: hostname (must be compute node) :return: dict of compute node information """ service_ref = db.service_get_by_compute_host(context, dest) return service_ref['compute_node'][0]
def test_show_host_resources(self): host = "fake_host" compute_node = { "host": host, "compute_node": [ { "vcpus": 4, "vcpus_used": 2, "memory_mb": 1024, "memory_mb_used": 512, "local_gb": 1024, "local_gb_used": 512, } ], } instances = [ {"project_id": "project1", "vcpus": 1, "memory_mb": 128, "root_gb": 128, "ephemeral_gb": 0}, {"project_id": "project1", "vcpus": 2, "memory_mb": 256, "root_gb": 384, "ephemeral_gb": 0}, {"project_id": "project2", "vcpus": 2, "memory_mb": 256, "root_gb": 256, "ephemeral_gb": 0}, ] self.mox.StubOutWithMock(db, "service_get_by_compute_host") self.mox.StubOutWithMock(db, "instance_get_all_by_host") db.service_get_by_compute_host(self.context, host).AndReturn(compute_node) db.instance_get_all_by_host(self.context, host).AndReturn(instances) self.mox.ReplayAll() result = self.manager.show_host_resources(self.context, host) expected = { "usage": { "project1": {"memory_mb": 384, "vcpus": 3, "root_gb": 512, "ephemeral_gb": 0}, "project2": {"memory_mb": 256, "vcpus": 2, "root_gb": 256, "ephemeral_gb": 0}, }, "resource": { "vcpus": 4, "vcpus_used": 2, "local_gb": 1024, "local_gb_used": 512, "memory_mb": 1024, "memory_mb_used": 512, }, } self.assertThat(result, matchers.DictMatches(expected))
def test_live_migration_dest_check_service_lack_memory(self): # Confirms exception raises when dest doesn't have enough memory. # Flag needed to make FilterScheduler test hit memory limit since the # default for it is to allow memory overcommit by a factor of 1.5. self.flags(ram_allocation_ratio=1.0) self.mox.StubOutWithMock(self.driver, "_live_migration_src_check") self.mox.StubOutWithMock(db, "service_get_by_compute_host") self.mox.StubOutWithMock(servicegroup.API, "service_is_up") self.mox.StubOutWithMock(self.driver, "_get_compute_info") dest = "fake_host2" block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn("fake_service3") self.servicegroup_api.service_is_up("fake_service3").AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn( { "memory_mb": 2048, "free_disk_gb": 512, "local_gb_used": 512, "free_ram_mb": 512, "local_gb": 1024, "vcpus": 4, "vcpus_used": 2, "updated_at": None, } ) self.mox.ReplayAll() self.assertRaises( exception.MigrationError, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit, )
def _get_compute_info(self, context, host): """get compute node's information specified by key :param context: security context :param host: hostname(must be compute node) :param key: column name of compute_nodes :return: value specified by key """ service_ref = db.service_get_by_compute_host(context, host) return service_ref['compute_node'][0]
def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below:: {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Getting compute node info and related instances info service_ref = db.service_get_by_compute_host(context, host) instance_refs = db.instance_get_all_by_host(context, service_ref['host']) # Getting total available/used resource compute_ref = service_ref['compute_node'][0] resource = {'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used']} usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs if i['project_id'] == project_id] root = [i['root_gb'] for i in instance_refs if i['project_id'] == project_id] ephemeral = [i['ephemeral_gb'] for i in instance_refs if i['project_id'] == project_id] usage[project_id] = {'vcpus': sum(vcpus), 'memory_mb': sum(mem), 'root_gb': sum(root), 'ephemeral_gb': sum(ephemeral)} return {'resource': resource, 'usage': usage}
def _live_migration_dest_check(self, context, instance_ref, dest, ignore_hosts=None): """Live migration check routine (for destination host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param ignore_hosts: hosts that should be avoided as dest host """ # If dest is not specified, have scheduler pick one. if dest is None: instance_type = flavors.extract_instance_type(instance_ref) if not instance_ref['image_ref']: image = None else: image = self.image_service.show(context, instance_ref['image_ref']) request_spec = { 'instance_properties': instance_ref, 'instance_type': instance_type, 'instance_uuids': [instance_ref['uuid']], 'image': image } filter_properties = {'ignore_hosts': ignore_hosts} return self.select_hosts(context, request_spec, filter_properties)[0] # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: raise exception.UnableToMigrateToSelf( instance_id=instance_ref['uuid'], host=dest) # Checking dest exists and compute node. try: dservice_ref = db.service_get_by_compute_host(context, dest) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=dest) # Checking dest host is alive. if not self.servicegroup_api.service_is_up(dservice_ref): raise exception.ComputeServiceUnavailable(host=dest) # Check memory requirements self._assert_compute_node_has_enough_memory(context, instance_ref, dest) return dest
def test_live_migration_compute_dest_not_exist(self): # Raise exception when dest compute node does not exist. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) # Compute down db.service_get_by_compute_host(self.context, dest).AndRaise(exception.NotFound()) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_exist(self): # Raise exception when src compute node is does not exist. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() # Compute down db.service_get_by_compute_host(self.context, instance['host']).AndRaise( exception.ComputeHostNotFound(host='fake')) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self): # Raise exception when src compute node is not alive. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() # Compute down db.service_get_by_compute_host(self.context, instance['host']).AndReturn('fake_service2') self.servicegroup_api.service_is_up('fake_service2').AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self): # Confirms exception raises in case dest and src is same host. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') block_migration = False instance = self._live_migration_instance() # make dest same as src dest = instance['host'] self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.UnableToMigrateToSelf, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self): # Confirms exception raises when dest doesn't have enough memory. # Flag needed to make FilterScheduler test hit memory limit since the # default for it is to allow memory overcommit by a factor of 1.5. self.flags(ram_allocation_ratio=1.0) self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') dest = 'fake_host2' block_migration = False disk_over_commit = False instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn( {'memory_mb': 2048, 'free_disk_gb': 512, 'local_gb_used': 512, 'free_ram_mb': 512, 'local_gb': 1024, 'vcpus': 4, 'vcpus_used': 2, 'updated_at': None}) self.mox.ReplayAll() self.assertRaises(exception.MigrationError, self.driver.schedule_live_migration, self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit)
def _live_migration_dest_check(self, context, instance_ref, dest, ignore_hosts=None): """Live migration check routine (for destination host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param ignore_hosts: hosts that should be avoided as dest host """ # If dest is not specified, have scheduler pick one. if dest is None: instance_type = flavors.extract_flavor(instance_ref) if not instance_ref['image_ref']: image = None else: image = self.image_service.show(context, instance_ref['image_ref']) request_spec = {'instance_properties': instance_ref, 'instance_type': instance_type, 'instance_uuids': [instance_ref['uuid']], 'image': image} filter_properties = {'ignore_hosts': ignore_hosts} return self.select_hosts(context, request_spec, filter_properties)[0] # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: raise exception.UnableToMigrateToSelf( instance_id=instance_ref['uuid'], host=dest) # Checking dest exists and compute node. try: dservice_ref = db.service_get_by_compute_host(context, dest) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=dest) # Checking dest host is alive. if not self.servicegroup_api.service_is_up(dservice_ref): raise exception.ComputeServiceUnavailable(host=dest) # Check memory requirements self._assert_compute_node_has_enough_memory(context, instance_ref, dest) return dest
def _get_available_memory(self, host): """ Get available memory of gaven host. :param host: name of the target host. :return: Integer as the current memory available on the target host. """ ctxt = context.get_admin_context() service_resource = db.service_get_by_compute_host(ctxt, host) node_resource = service_resource['compute_node'][0] current_memory = node_resource['memory_mb'] - node_resource[ 'memory_mb_used'] if current_memory < 0: LOG.exception(_("Failed to get available node resource")) else: return current_memory
def _live_migration_src_check(self, context, instance_ref): """Live migration check routine (for src host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object """ # TODO(johngar) why is this not in the API layer? # Checking instance is running. if instance_ref["power_state"] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_ref["uuid"]) # Checking src host exists and compute node src = instance_ref["host"] try: service = db.service_get_by_compute_host(context, src) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=src) # Checking src host is alive. if not self.servicegroup_api.service_is_up(service): raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_src_check(self, context, instance_ref): """Live migration check routine (for src host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object """ # TODO(johngar) why is this not in the API layer? # Checking instance is running. if instance_ref['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning( instance_id=instance_ref['uuid']) # Checking src host exists and compute node src = instance_ref['host'] try: service = db.service_get_by_compute_host(context, src) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=src) # Checking src host is alive. if not self.servicegroup_api.service_is_up(service): raise exception.ComputeServiceUnavailable(host=src)
def get_by_compute_host(cls, context, host, use_slave=False): db_service = db.service_get_by_compute_host(context, host) return cls._from_db_object(context, cls(), db_service)
def _db_service_get_by_compute_host(context, host, use_slave=False): return db.service_get_by_compute_host(context, host)
def test_live_migration_all_checks_pass(self): # Test live migration when all checks pass. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration') dest = 'fake_host2' block_migration = True disk_over_commit = True instance = jsonutils.to_primitive(self._live_migration_instance()) # Source checks db.service_get_by_compute_host( self.context, instance['host']).AndReturn('fake_service2') self.servicegroup_api.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) db.service_get_by_compute_host(self.context, dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() db.service_get_by_compute_host(self.context, dest).AndReturn({ 'compute_node': [{ 'memory_mb': 2048, 'free_disk_gb': 512, 'local_gb_used': 512, 'free_ram_mb': 1280, 'local_gb': 1024, 'vcpus': 4, 'vcpus_used': 2, 'updated_at': None, 'hypervisor_version': 1 }] }) # Common checks (same hypervisor, etc) db.service_get_by_compute_host(self.context, dest).AndReturn({ 'compute_node': [{ 'hypervisor_type': 'xen', 'hypervisor_version': 1 }] }) db.service_get_by_compute_host(self.context, instance['host']).AndReturn({ 'compute_node': [{ 'hypervisor_type': 'xen', 'hypervisor_version': 1, 'cpu_info': 'fake_cpu_info' }] }) rpc.call( self.context, "compute.fake_host2", { "method": 'check_can_live_migrate_destination', "args": { 'instance': instance, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit }, "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION }, None).AndReturn({}) self.driver.compute_rpcapi.live_migration( self.context, host=instance['host'], instance=instance, dest=dest, block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() result = self.driver.schedule_live_migration( self.context, instance=instance, dest=dest, block_migration=block_migration, disk_over_commit=disk_over_commit) self.assertEqual(result, None)
def get_by_compute_host(cls, context, host): db_service = db.service_get_by_compute_host(context, host) return cls._from_db_object(context, cls(), db_service)
def _get_compute_info(self, host): service_ref = db.service_get_by_compute_host(self.context, host) return service_ref['compute_node'][0]
def _test_delete(self, delete_type, **attrs): inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) timeutils.set_time_override(delete_time) task_state = (delete_type == 'soft_delete' and task_states.SOFT_DELETING or task_states.DELETING) db_inst = obj_base.obj_to_primitive(inst) updates = {'progress': 0, 'task_state': task_state} if delete_type == 'soft_delete': updates['deleted_at'] = delete_time self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.compute_api.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(inst.info_cache, 'delete') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid).AndReturn([]) inst.save() self.compute_api._create_reservations( self.context, inst, inst.instance_type_id, inst.project_id, inst.user_id).AndReturn('fake-resv') if inst.vm_state == vm_states.RESIZED: self._test_delete_resized_part(inst) self.context.elevated().MultipleTimes().AndReturn(self.context) db.service_get_by_compute_host(self.context, inst.host).AndReturn('fake-service') self.compute_api.servicegroup_api.service_is_up( 'fake-service').AndReturn(inst.host != 'down-host') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance') if inst.host == 'down-host': inst.info_cache.delete() compute_utils.notify_about_instance_usage(self.context, inst, '%s.start' % delete_type) self.compute_api.network_api.deallocate_for_instance( self.context, inst) db.instance_system_metadata_get(self.context, inst.uuid).AndReturn('sys-meta') state = ('soft' in delete_type and vm_states.SOFT_DELETED or vm_states.DELETED) updates.update({ 'vm_state': state, 'task_state': None, 'terminated_at': delete_time }) inst.save() if self.is_cells: if delete_type == 'soft_delete': rpcapi.soft_delete_instance(self.context, inst, reservations=None) else: rpcapi.terminate_instance(self.context, inst, [], reservations=None) db.instance_destroy(self.context, inst.uuid, constraint=None) compute_utils.notify_about_instance_usage( self.context, inst, '%s.end' % delete_type, system_metadata='sys-meta') if inst.host == 'down-host': quota.QUOTAS.commit(self.context, 'fake-resv', project_id=inst.project_id, user_id=inst.user_id) elif delete_type == 'soft_delete': self.compute_api._record_action_start(self.context, inst, instance_actions.DELETE) rpcapi.soft_delete_instance(self.context, inst, reservations='fake-resv') elif delete_type in ['delete', 'force_delete']: self.compute_api._record_action_start(self.context, inst, instance_actions.DELETE) rpcapi.terminate_instance(self.context, inst, [], reservations='fake-resv') self.mox.ReplayAll() getattr(self.compute_api, delete_type)(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v)