def test_service_get_all_cells_with_minimal_constructs(self, mock_sg, mock_get_hm, mock_cm_list): service = objects.Service(binary='nova-compute', host='host-%s' % uuids.cell0) cells = [ objects.CellMapping(uuid=uuids.cell1, id=1), objects.CellMapping(uuid=uuids.cell2, id=2), ] mock_cm_list.return_value = cells context.load_cells() # create two hms in cell1, which is the down cell in this test. hm1 = objects.HostMapping(self.ctxt, host='host1-unavailable', cell_mapping=cells[0]) hm1.create() hm2 = objects.HostMapping(self.ctxt, host='host2-unavailable', cell_mapping=cells[0]) hm2.create() mock_sg.return_value = { cells[0].uuid: [service], cells[1].uuid: context.did_not_respond_sentinel, } mock_get_hm.return_value = [hm1, hm2] services = self.host_api.service_get_all(self.ctxt, all_cells=True, cell_down_support=True) # returns the results from cell0 and minimal construct from cell1. self.assertEqual(sorted(['host-%s' % uuids.cell0, 'host1-unavailable', 'host2-unavailable']), sorted([svc.host for svc in services])) mock_sg.assert_called_once_with(self.ctxt, objects.ServiceList.get_all, None, set_zones=False) mock_get_hm.assert_called_once_with(self.ctxt, cells[1].id)
def test_discover_hosts(self, mock_discover): cm1 = objects.CellMapping(name='cell1') cm2 = objects.CellMapping(name='cell2') mock_discover.return_value = [ objects.HostMapping(host='a', cell_mapping=cm1), objects.HostMapping(host='b', cell_mapping=cm2) ] self.manager._discover_hosts_in_cells(mock.sentinel.context)
def start_service(self, name, host=None, **kwargs): cell = None # if the host is None then the CONF.host remains defaulted to # 'fake-mini' (originally done in ConfFixture) if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell_name = kwargs.pop('cell', CELL1_NAME) or CELL1_NAME cell = self.cell_mappings[cell_name] if (host or name) not in self.host_mappings: # NOTE(gibi): If the HostMapping does not exists then this is # the first start of the service so we create the mapping. hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs)) return svc.service
def test_save(self, save_in_db): db_mapping = get_db_mapping() # This isn't needed here db_mapping.pop("cell_mapping") host = db_mapping['host'] mapping_obj = objects.HostMapping(self.context) mapping_obj.host = host mapping_obj.id = db_mapping['id'] new_fake_cell = test_cell_mapping.get_db_mapping(id=10) fake_cell_obj = objects.CellMapping(self.context, **new_fake_cell) mapping_obj.cell_mapping = fake_cell_obj db_mapping.update({"cell_id": new_fake_cell["id"]}) save_in_db.return_value = db_mapping mapping_obj.save() save_in_db.assert_called_once_with( self.context, test.MatchType(host_mapping.HostMapping), { 'cell_id': new_fake_cell["id"], 'host': host, 'id': db_mapping['id'] }) self.compare_obj( mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={'cell_mapping': self._check_cell_map_value})
def test_by_host(self, mock_rpcclient, mock_create, mock_get): default_client = mock.Mock() cell_client = mock.Mock() mock_rpcclient.return_value = cell_client ctxt = mock.Mock() cm = objects.CellMapping(uuid=uuids.cell_mapping, transport_url='fake:///') mock_get.return_value = objects.HostMapping(cell_mapping=cm) host = 'fake-host' router = rpc.ClientRouter(default_client) client = router.by_host(ctxt, host) mock_get.assert_called_once_with(ctxt, host) # verify a client was created by ClientRouter mock_rpcclient.assert_called_once_with( mock_create.return_value, default_client.target, version_cap=default_client.version_cap, serializer=default_client.serializer) # verify cell client was returned self.assertEqual(cell_client, client) # reset and check that cached client is returned the second time mock_rpcclient.reset_mock() mock_create.reset_mock() mock_get.reset_mock() client = router.by_host(ctxt, host) mock_get.assert_called_once_with(ctxt, host) mock_rpcclient.assert_not_called() mock_create.assert_not_called() self.assertEqual(cell_client, client)
def test_from_db_object_no_cell_map(self): """Test when db object does not have cell_mapping""" fake_cell = test_cell_mapping.get_db_mapping(id=1) db_mapping = get_db_mapping(mapped_cell=fake_cell) # If db object has no cell_mapping, lazy loading should occur db_mapping.pop("cell_mapping") fake_cell_obj = objects.CellMapping(self.context, **fake_cell) mapping_obj = objects.HostMapping()._from_db_object( self.context, objects.HostMapping(), db_mapping) with mock.patch.object( host_mapping.HostMapping, '_get_cell_mapping') as mock_load: mock_load.return_value = fake_cell_obj self.compare_obj(mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={ 'cell_mapping': self._check_cell_map_value}) # Check that cell_mapping is lazy loaded mock_load.assert_called_once_with()
def test_map_cell_and_hosts_partial_update(self): # Create a cell mapping and partial hosts and check that # missing HostMappings are created ctxt = context.RequestContext() cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping(ctxt, uuid=cell_mapping_uuid, name='fake', transport_url='fake://', database_connection='fake://') cell_mapping.create() # Create compute nodes that will map to the cell values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() # Only create 2 existing HostMappings out of 3 for i in range(2): host = 'host%s' % i host_mapping = objects.HostMapping(ctxt, host=host, cell_mapping=cell_mapping) host_mapping.create() cell_transport_url = "fake://*****:*****@127.0.0.1:9999/" self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) # Verify the HostMapping for the last host was created host_mapping = objects.HostMapping.get_by_host(ctxt, 'host2') self.assertEqual(cell_mapping.uuid, host_mapping.cell_mapping.uuid) # Verify the output output = sys.stdout.getvalue().strip() expected = '' for i in range(2): expected += ('Host host%s is already mapped to cell %s\n' % (i, cell_mapping_uuid)) # The expected CellMapping UUID for the last host should be the same expected += cell_mapping.uuid self.assertEqual(expected, output)
def start_service(self, name, host=None, **kwargs): svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, **kwargs)) if name == 'compute' and self.USES_DB: ctxt = context.get_context() cell = self.cell_mappings[kwargs.pop('cell', CELL1_NAME)] hm = objects.HostMapping(context=ctxt, host=svc.service.host, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm return svc.service
def test_check_success(self): """Tests a successful cells v2 upgrade check.""" # create the cell0 and first cell mappings self._setup_cells() # Start a compute service and create a hostmapping for it svc = self.start_service('compute') cell = self.cell_mappings[test.CELL1_NAME] hm = objects.HostMapping(context=context.get_admin_context(), host=svc.host, cell_mapping=cell) hm.create() result = self.cmd._check_cellsv2() self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) self.assertIsNone(result.details)
def test_get_by_host(self, host_from_db): fake_cell = test_cell_mapping.get_db_mapping(id=1) db_mapping = get_db_mapping(mapped_cell=fake_cell) host_from_db.return_value = db_mapping mapping_obj = objects.HostMapping().get_by_host( self.context, db_mapping['host']) host_from_db.assert_called_once_with(self.context, db_mapping['host']) with mock.patch.object(host_mapping.HostMapping, '_get_cell_mapping') as mock_load: self.compare_obj( mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={'cell_mapping': self._check_cell_map_value}) # Check that lazy loading isn't happening self.assertFalse(mock_load.called)
def start_service(self, name, host=None, **kwargs): if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell = self.cell_mappings[kwargs.pop('cell', CELL1_NAME)] hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, **kwargs)) return svc.service
def start_service(self, name, host=None, cell_name=None, **kwargs): # Disallow starting multiple scheduler services if name == 'scheduler' and self._service_fixture_count[name]: raise TestingException("Duplicate start_service(%s)!" % name) cell = None # if the host is None then the CONF.host remains defaulted to # 'fake-mini' (originally done in ConfFixture) if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell_name = cell_name or CELL1_NAME cell = self.cell_mappings[cell_name] if (host or name) not in self.host_mappings: # NOTE(gibi): If the HostMapping does not exists then this is # the first start of the service so we create the mapping. hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs)) # Keep track of how many instances of this service are running. self._service_fixture_count[name] += 1 real_stop = svc.service.stop # Make sure stopping the service decrements the active count, so that # start,stop,start doesn't trigger the "Duplicate start_service" # exception. def patch_stop(*a, **k): self._service_fixture_count[name] -= 1 return real_stop(*a, **k) self.useFixture( fixtures.MockPatchObject(svc.service, 'stop', patch_stop)) return svc.service
def _setup_cells(self): ctxt = context.get_admin_context() self.celldbs = fixtures.CellDatabases() cells = [] for uuid in (uuids.cell1, uuids.cell2, uuids.cell3): cm = objects.CellMapping(context=ctxt, uuid=uuid, database_connection=uuid, transport_url='fake://') cm.create() cells.append(cm) self.celldbs.add_cell_database(uuid) self.useFixture(self.celldbs) for cell in cells: for i in (1, 2, 3): # Make one host in each cell unmapped mapped = 0 if i == 2 else 1 host = 'host-%s-%i' % (cell.uuid, i) if mapped: hm = objects.HostMapping(context=ctxt, cell_mapping=cell, host=host) hm.create() with context.target_cell(ctxt, cell): cn = objects.ComputeNode(context=ctxt, vcpus=1, memory_mb=1, local_gb=1, vcpus_used=0, memory_mb_used=0, local_gb_used=0, hypervisor_type='danvm', hypervisor_version='1', cpu_info='foo', cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0, disk_allocation_ratio=1.0, mapped=mapped, host=host) cn.create()
def test_map_cell_and_hosts_duplicate(self): # Create a cell mapping and hosts and check that nothing new is created ctxt = context.RequestContext() cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping(ctxt, uuid=cell_mapping_uuid, name='fake', transport_url='fake://', database_connection='fake://') cell_mapping.create() # Create compute nodes that will map to the cell values = { 'vcpus': 4, 'memory_mb': 4096, 'local_gb': 1024, 'vcpus_used': 2, 'memory_mb_used': 2048, 'local_gb_used': 512, 'hypervisor_type': 'Hyper-Dan-VM-ware', 'hypervisor_version': 1001, 'cpu_info': 'Schmintel i786', } for i in range(3): host = 'host%s' % i compute_node = objects.ComputeNode(ctxt, host=host, **values) compute_node.create() host_mapping = objects.HostMapping(ctxt, host=host, cell_mapping=cell_mapping) host_mapping.create() cell_transport_url = "fake://*****:*****@127.0.0.1:9999/" retval = self.commands.map_cell_and_hosts(cell_transport_url, name='ssd', verbose=True) self.assertEqual(0, retval) output = sys.stdout.getvalue().strip() expected = '' for i in range(3): expected += ('Host host%s is already mapped to cell %s\n' % (i, cell_mapping_uuid)) expected += 'All hosts are already mapped to cell(s), exiting.' self.assertEqual(expected, output)
def test_create(self, create_in_db): fake_cell = test_cell_mapping.get_db_mapping(id=1) db_mapping = get_db_mapping(mapped_cell=fake_cell) db_mapping.pop("cell_mapping") host = db_mapping['host'] create_in_db.return_value = db_mapping fake_cell_obj = objects.CellMapping(self.context, **fake_cell) mapping_obj = objects.HostMapping(self.context) mapping_obj.host = host mapping_obj.cell_mapping = fake_cell_obj mapping_obj.create() create_in_db.assert_called_once_with(self.context, {'host': host, 'cell_id': fake_cell["id"]}) self.compare_obj(mapping_obj, db_mapping, subs={'cell_mapping': 'cell_id'}, comparators={ 'cell_mapping': self._check_cell_map_value})
def test_upgrade_with_required_mappings(self): self._flavor_me() cell0 = objects.CellMapping(context=self.context, uuid=objects.CellMapping.CELL0_UUID, name='cell0', transport_url='fake', database_connection='fake') cell0.create() cell1 = objects.CellMapping(context=self.context, uuid=uuidsentinel.cell1, name='cell1', transport_url='fake', database_connection='fake') cell1.create() hostmapping = objects.HostMapping(context=self.context, cell_mapping=cell1, host='foo') hostmapping.create() self.migration.upgrade(self.engine)
def start_service(self, name, host=None, **kwargs): cell = None if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell_name = kwargs.pop('cell', CELL1_NAME) or CELL1_NAME cell = self.cell_mappings[cell_name] hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs)) return svc.service
def discover_hosts(ctxt, cell_uuid=None, status_fn=None): # TODO(alaski): If this is not run on a host configured to use the API # database most of the lookups below will fail and may not provide a # great error message. Add a check which will raise a useful error # message about running this from an API host. from nova import objects if not status_fn: status_fn = lambda x: None if cell_uuid: cell_mappings = [objects.CellMapping.get_by_uuid(ctxt, cell_uuid)] else: cell_mappings = objects.CellMappingList.get_all(ctxt) status_fn(_('Found %s cell mappings.') % len(cell_mappings)) host_mappings = [] for cm in cell_mappings: if cm.is_cell0(): status_fn(_('Skipping cell0 since it does not contain hosts.')) continue if 'name' in cm and cm.name: status_fn( _("Getting compute nodes from cell '%(name)s': " "%(uuid)s") % { 'name': cm.name, 'uuid': cm.uuid }) else: status_fn( _("Getting compute nodes from cell: %(uuid)s") % {'uuid': cm.uuid}) with context.target_cell(ctxt, cm): compute_nodes = objects.ComputeNodeList.get_all(ctxt) status_fn( _('Found %(num)s computes in cell: %(uuid)s') % { 'num': len(compute_nodes), 'uuid': cm.uuid }) for compute in compute_nodes: status_fn( _("Checking host mapping for compute host " "'%(host)s': %(uuid)s") % { 'host': compute.host, 'uuid': compute.uuid }) try: objects.HostMapping.get_by_host(ctxt, compute.host) except exception.HostMappingNotFound: status_fn( _("Creating host mapping for compute host " "'%(host)s': %(uuid)s") % { 'host': compute.host, 'uuid': compute.uuid }) host_mapping = objects.HostMapping(ctxt, host=compute.host, cell_mapping=cm) host_mapping.create() host_mappings.append(host_mapping) return host_mappings
def _hm_get(context, host): if host in ['a', 'b', 'c']: return objects.HostMapping() raise exception.HostMappingNotFound(name=host)
def test_destroy(self, destroy_in_db): mapping_obj = objects.HostMapping(self.context) mapping_obj.host = "fake-host2" mapping_obj.destroy() destroy_in_db.assert_called_once_with(self.context, "fake-host2")
def test_compute_node_get_all_uuid_marker(self): """Tests paging over multiple cells with a uuid marker. This test is going to setup three compute nodes in two cells for a total of six compute nodes. Then it will page over them with a limit of two so there should be three pages total. """ # create the compute nodes in the non-cell0 cells count = 0 for cell in self.cell_mappings[1:]: for x in range(3): compute_node_uuid = getattr(uuids, 'node_%s' % count) with context.target_cell(self.ctxt, cell) as cctxt: node = objects.ComputeNode(cctxt, uuid=compute_node_uuid, host=compute_node_uuid, vcpus=2, memory_mb=2048, local_gb=128, vcpus_used=0, memory_mb_used=0, local_gb_used=0, cpu_info='{}', hypervisor_type='fake', hypervisor_version=10) node.create() count += 1 # create a host mapping for the compute to link it to the cell host_mapping = objects.HostMapping(self.ctxt, host=compute_node_uuid, cell_mapping=cell) host_mapping.create() # now start paging with a limit of two per page; the first page starts # with no marker compute_nodes = self.host_api.compute_node_get_all(self.ctxt, limit=2) # assert that we got two compute nodes from cell1 self.assertEqual(2, len(compute_nodes)) for compute_node in compute_nodes: host_mapping = objects.HostMapping.get_by_host( self.ctxt, compute_node.host) self.assertEqual(uuids.cell1, host_mapping.cell_mapping.uuid) # now our marker is the last item in the first page marker = compute_nodes[-1].uuid compute_nodes = self.host_api.compute_node_get_all(self.ctxt, limit=2, marker=marker) # assert that we got the last compute node from cell1 and the first # compute node from cell2 self.assertEqual(2, len(compute_nodes)) host_mapping = objects.HostMapping.get_by_host(self.ctxt, compute_nodes[0].host) self.assertEqual(uuids.cell1, host_mapping.cell_mapping.uuid) host_mapping = objects.HostMapping.get_by_host(self.ctxt, compute_nodes[1].host) self.assertEqual(uuids.cell2, host_mapping.cell_mapping.uuid) # now our marker is the last item in the second page; make the limit=3 # so we make sure we've exhausted the pages marker = compute_nodes[-1].uuid compute_nodes = self.host_api.compute_node_get_all(self.ctxt, limit=3, marker=marker) # assert that we got two compute nodes from cell2 self.assertEqual(2, len(compute_nodes)) for compute_node in compute_nodes: host_mapping = objects.HostMapping.get_by_host( self.ctxt, compute_node.host) self.assertEqual(uuids.cell2, host_mapping.cell_mapping.uuid)
class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = "context" self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) def test_execute_with_destination(self): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination', return_value=(mock.sentinel.source_node, dest_node)), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig): mock_mig.return_value = "bob" self.assertEqual("bob", self.task.execute()) mock_check_up.assert_called_once_with(self.instance_host) mock_check_dest.assert_called_once_with() mock_claim.assert_called_once_with( self.task.scheduler_client.reportclient, self.instance, mock.sentinel.source_node, dest_node) mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, self.migration.dest_node) self.assertEqual(self.task.destination, self.migration.dest_compute) def test_execute_without_destination(self): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save')) as (mock_check, mock_find, mock_mig, mock_save): mock_find.return_value = ("found_host", "found_node") mock_mig.return_value = "bob" self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" self.assertEqual((hypervisor_details, hypervisor_details), self.task._check_requested_destination()) self.assertEqual("migrate_data", self.task.migrate_data) mock_get_host.assert_called_once_with(self.context, self.destination) mock_is_up.assert_called_once_with("service") self.assertEqual([ mock.call(self.destination), mock.call(self.instance_host), mock.call(self.destination) ], mock_get_info.call_args_list) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_requested_destination_fails_when_destination_is_up( self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_requested_destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_requested_destination_fails_with_not_enough_memory( self, mock_get_first, mock_is_up): mock_get_first.return_value = (objects.ComputeNode( free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_diff( self, mock_get_info, mock_check, mock_is_up): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a') ] self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_too_old( self, mock_get_info, mock_check, mock_is_up): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [ objects.ComputeNode(**host1), objects.ComputeNode(**host2) ] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') @mock.patch.object( objects.HostMapping, 'get_by_host', return_value=objects.HostMapping(cell_mapping=objects.CellMapping( uuid=uuids.different))) def test_check_requested_destination_fails_different_cells( self, mock_get_host_mapping, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" ex = self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) self.assertIn('across cells', six.text_type(ex)) def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.fake_spec.reset_forced_destinations() self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual(("host1", "node1"), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [{'host': 'host1', 'nodename': 'node1'}] self.assertEqual(("host1", "node1"), task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) setup_ig.assert_called_once_with(self.context, another_spec) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid]) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(another_spec.requested_destination.cell) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test() def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual(("host1", "node1"), self.task._find_destination()) def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2', 'nodename': 'node2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2', 'nodename': 'node2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2', 'nodename': 'node2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with test.nested( mock.patch.object(self.task.migration, 'save'), mock.patch.object( self.task, '_remove_host_allocations')) as (save_mock, remove_allocs): self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with() # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndRaise(exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError( self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.scheduler_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}) @mock.patch.object( objects.InstanceMapping, 'get_by_instance_uuid', side_effect=exception.InstanceMappingNotFound(uuid=uuids.instance)) def test_get_source_cell_mapping_not_found(self, mock_get): """Negative test where InstanceMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_source_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.instance.uuid) @mock.patch.object( objects.HostMapping, 'get_by_host', side_effect=exception.HostMappingNotFound(name='destination')) def test_get_destination_cell_mapping_not_found(self, mock_get): """Negative test where HostMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_destination_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.destination) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename', side_effect=exception.ComputeHostNotFound(host='host')) def test_remove_host_allocations_compute_host_not_found(self, get_cn): """Tests that failing to find a ComputeNode will not blow up the _remove_host_allocations method. """ with mock.patch.object( self.task.scheduler_client.reportclient, 'remove_provider_from_instance_allocation') as remove_provider: self.task._remove_host_allocations('host', 'node') remove_provider.assert_not_called()
class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = nova_context.get_admin_context() self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state = vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.instance.numa_topology = None self.instance.pci_requests = None self.instance.resources = None self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() _p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv') self.heal_reqspec_is_bfv_mock = _p.start() self.addCleanup(_p.stop) _p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata') self.ensure_network_metadata_mock = _p.start() self.addCleanup(_p.stop) _p = mock.patch( 'nova.network.neutron.API.' 'get_requested_resource_for_instance', return_value=[]) self.mock_get_res_req = _p.start() self.addCleanup(_p.stop) def _generate_task(self): self.task = live_migrate.LiveMigrationTask(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), query.SchedulerQueryClient(), report.SchedulerReportClient(), self.fake_spec) @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='fake-az') def test_execute_with_destination(self, mock_get_az): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination'), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), mock.patch.object(self.task, '_check_destination_is_not_source'), mock.patch.object(self.task, '_check_destination_has_enough_memory'), mock.patch.object(self.task, '_check_compatible_with_source_hypervisor', return_value=(mock.sentinel.source_node, dest_node)), ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig, m_alloc, m_check_diff, m_check_enough_mem, m_check_compatible): mock_mig.return_value = "bob" m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs) self.assertEqual("bob", self.task.execute()) mock_check_up.assert_has_calls([ mock.call(self.instance_host), mock.call(self.destination)]) mock_check_dest.assert_called_once_with() m_check_diff.assert_called_once() m_check_enough_mem.assert_called_once() m_check_compatible.assert_called_once() allocs = mock.sentinel.allocs mock_claim.assert_called_once_with( self.context, self.task.report_client, self.instance, mock.sentinel.source_node, dest_node, source_allocations=allocs, consumer_generation=None) mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) mock_get_az.assert_called_once_with(self.context, self.destination) self.assertEqual('fake-az', self.instance.availability_zone) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, self.migration.dest_node) self.assertEqual(self.task.destination, self.migration.dest_compute) m_alloc.assert_called_once_with(self.context, self.instance, self.migration) # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # heal the request spec. self.heal_reqspec_is_bfv_mock.assert_not_called() # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # modify the request spec self.ensure_network_metadata_mock.assert_not_called() @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='nova') def test_execute_without_destination(self, mock_get_az): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), ) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc): mock_find.return_value = ("found_host", "found_node", None) mock_mig.return_value = "bob" mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock()) self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with(self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) mock_get_az.assert_called_once_with(self.context, 'found_host') self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) self.assertTrue(mock_alloc.called) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_no_numa(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') self.task.instance.numa_topology = None mock_get.return_value = objects.ComputeNode( uuid=uuids.cn1, hypervisor_type='qemu') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_non_kvm(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) mock_get.return_value = objects.ComputeNode( uuid=uuids.cn1, hypervisor_type='xen') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') @mock.patch.object(objects.Service, 'get_minimum_version', return_value=39) def test_check_instance_has_no_numa_passes_workaround( self, mock_get_min_ver, mock_get): self.flags(enable_numa_live_migration=True, group='workarounds') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) mock_get.return_value = objects.ComputeNode( uuid=uuids.cn1, hypervisor_type='qemu') self.task._check_instance_has_no_numa() mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute') @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') @mock.patch.object(objects.Service, 'get_minimum_version', return_value=39) def test_check_instance_has_no_numa_fails(self, mock_get_min_ver, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') mock_get.return_value = objects.ComputeNode( uuid=uuids.cn1, hypervisor_type='qemu') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) self.assertRaises(exception.MigrationPreCheckError, self.task._check_instance_has_no_numa) mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute') @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') @mock.patch.object(objects.Service, 'get_minimum_version', return_value=40) def test_check_instance_has_no_numa_new_svc_passes(self, mock_get_min_ver, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') mock_get.return_value = objects.ComputeNode( uuid=uuids.cn1, hypervisor_type='qemu') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) self.task._check_instance_has_no_numa() mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute') @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") def test_check_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_destination_is_not_source) @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_destination_fails_with_not_enough_memory( self, mock_get_first): mock_get_first.return_value = ( objects.ComputeNode(free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9,)) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_destination_has_enough_memory) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_compatible_fails_with_hypervisor_diff( self, mock_get_info): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a')] self.assertRaises(exception.InvalidHypervisorType, self.task._check_compatible_with_source_hypervisor, self.destination) self.assertEqual([mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_compatible_fails_with_hypervisor_too_old( self, mock_get_info): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [objects.ComputeNode(**host1), objects.ComputeNode(**host2)] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_compatible_with_source_hypervisor, self.destination) self.assertEqual([mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check): mock_check.return_value = "migrate_data" self.task.limits = fake_limits1 with test.nested( mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False), mock.patch.object(self.task, '_check_can_migrate_pci')): self.assertIsNone(self.task._check_requested_destination()) self.assertEqual("migrate_data", self.task.migrate_data) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.task.migration, fake_limits1) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') @mock.patch.object(objects.HostMapping, 'get_by_host', return_value=objects.HostMapping( cell_mapping=objects.CellMapping( uuid=uuids.different))) def test_check_requested_destination_fails_different_cells( self, mock_get_host_mapping, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode( hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" with test.nested( mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False), mock.patch.object(self.task, '_check_can_migrate_pci')): ex = self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) self.assertIn('across cells', str(ex)) @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_works(self, mock_setup, mock_reset, mock_select, mock_check, mock_call): self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {}) @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_no_image_works(self, mock_setup, mock_select, mock_check, mock_call): self.instance['image_ref'] = '' self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {}) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def _test_find_destination_retry_hypervisor_raises( self, error, mock_setup, mock_select, mock_check, mock_call, mock_remove): mock_check.side_effect = [error, None] self.assertEqual(("host2", "node2", fake_limits2), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False)]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_called_once_with('host2', {}) def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_invalid_livem_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [exception.Invalid(), None] self.assertEqual(("host2", "node2", fake_limits2), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False)]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls( [mock.call('host1', {}), mock.call('host2', {})]) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_failed_migration_pre_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [exception.MigrationPreCheckError('reason'), None] self.assertEqual(("host2", "node2", fake_limits2), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False)]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls( [mock.call('host1', {}), mock.call('host2', {})]) @mock.patch.object(objects.Migration, 'save') @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor', side_effect=exception.DestinationHypervisorTooOld()) @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_exceeds_max( self, mock_setup, mock_select, mock_check, mock_remove, mock_save): self.flags(migrate_max_retries=0) self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) mock_save.assert_called_once_with() # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=exception.NoValidHost(reason="")) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_when_runs_out_of_hosts(self, mock_setup, mock_select): self.assertRaises(exception.NoValidHost, self.task._find_destination) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError(self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.query_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with test.nested( mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout), mock.patch.object(self.task, '_check_can_migrate_pci')): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}, {}) @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_merges_profiles(self, mock_bind_ports): """Assert that if both the migration_data and the provider mapping contains binding profile related information then such information is merged in the resulting profile. """ self.task.migrate_data = objects.LibvirtLiveMigrateData( vifs=[ objects.VIFMigrateData( port_id=uuids.port1, profile_json=jsonutils.dumps( {'some-key': 'value'})) ]) provider_mappings = {uuids.port1: [uuids.dest_bw_rp]} self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with( context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={uuids.port1: {'allocation': uuids.dest_bw_rp, 'some-key': 'value'}}) @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_migration_data(self, mock_bind_ports): """Assert that if only the migration_data contains binding profile related information then that is sent to neutron. """ self.task.migrate_data = objects.LibvirtLiveMigrateData( vifs=[ objects.VIFMigrateData( port_id=uuids.port1, profile_json=jsonutils.dumps( {'some-key': 'value'})) ]) provider_mappings = {} self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with( context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={uuids.port1: {'some-key': 'value'}}) @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_provider_mapping(self, mock_bind_ports): """Assert that if only the provider mapping contains binding profile related information then that is sent to neutron. """ self.task.migrate_data = objects.LibvirtLiveMigrateData( vifs=[ objects.VIFMigrateData( port_id=uuids.port1) ]) provider_mappings = {uuids.port1: [uuids.dest_bw_rp]} self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with( context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={uuids.port1: {'allocation': uuids.dest_bw_rp}}) @mock.patch( 'nova.compute.utils.' 'update_pci_request_spec_with_allocated_interface_name') @mock.patch('nova.scheduler.utils.fill_provider_mapping') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_with_resource_request( self, mock_setup, mock_reset, mock_select, mock_check, mock_call, mock_fill_provider_mapping, mock_update_pci_req): resource_req = [objects.RequestGroup(requester_id=uuids.port_id)] self.mock_get_res_req.return_value = resource_req self.instance.pci_requests = objects.InstancePCIRequests(requests=[]) self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) # Make sure that requested_resources are added to the request spec self.assertEqual( resource_req, self.task.request_spec.requested_resources) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {uuids.port_id: []}) mock_fill_provider_mapping.assert_called_once_with( self.task.request_spec, fake_selection1) mock_update_pci_req.assert_called_once_with( self.context, self.task.report_client, [], {uuids.port_id: []}) @mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid', side_effect=exception.InstanceMappingNotFound( uuid=uuids.instance)) def test_get_source_cell_mapping_not_found(self, mock_get): """Negative test where InstanceMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_source_cell_mapping) mock_get.assert_called_once_with( self.task.context, self.task.instance.uuid) @mock.patch.object(objects.HostMapping, 'get_by_host', side_effect=exception.HostMappingNotFound( name='destination')) def test_get_destination_cell_mapping_not_found(self, mock_get): """Negative test where HostMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_destination_cell_mapping) mock_get.assert_called_once_with( self.task.context, self.task.destination) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'remove_provider_tree_from_instance_allocation') def test_remove_host_allocations(self, remove_provider): self.task._remove_host_allocations(uuids.cn) remove_provider.assert_called_once_with( self.task.context, self.task.instance.uuid, uuids.cn) def test_check_can_migrate_pci(self): """Tests that _check_can_migrate_pci() allows live-migration if instance does not contain non-network related PCI requests and raises MigrationPreCheckError otherwise """ @mock.patch.object(self.task.network_api, 'supports_port_binding_extension') @mock.patch.object(live_migrate, 'supports_vif_related_pci_allocations') def _test(instance_pci_reqs, supp_binding_ext_retval, supp_vif_related_pci_alloc_retval, mock_supp_vif_related_pci_alloc, mock_supp_port_binding_ext): mock_supp_vif_related_pci_alloc.return_value = \ supp_vif_related_pci_alloc_retval mock_supp_port_binding_ext.return_value = \ supp_binding_ext_retval self.task.instance.pci_requests = instance_pci_reqs self.task._check_can_migrate_pci("Src", "Dst") # in case we managed to get away without rasing, check mocks if instance_pci_reqs: mock_supp_port_binding_ext.assert_called_once_with( self.context) self.assertTrue(mock_supp_vif_related_pci_alloc.called) # instance has no PCI requests _test(None, False, False) # No support in Neutron and Computes _test(None, True, False) # No support in Computes _test(None, False, True) # No support in Neutron _test(None, True, True) # Support in both Neutron and Computes # instance contains network related PCI requests (alias_name=None) pci_requests = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(alias_name=None)]) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, True) _test(pci_requests, True, True) # instance contains Non network related PCI requests (alias_name!=None) pci_requests.requests.append( objects.InstancePCIRequest(alias_name="non-network-related-pci")) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, True) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, True) def test_check_can_migrate_specific_resources(self): """Test _check_can_migrate_specific_resources allows live migration with vpmem. """ @mock.patch.object(live_migrate, 'supports_vpmem_live_migration') def _test(resources, supp_lm_vpmem_retval, mock_support_lm_vpmem): self.instance.resources = resources mock_support_lm_vpmem.return_value = supp_lm_vpmem_retval self.task._check_can_migrate_specific_resources() vpmem_0 = objects.LibvirtVPMEMDevice( label='4GB', name='ns_0', devpath='/dev/dax0.0', size=4292870144, align=2097152) resource_0 = objects.Resource( provider_uuid=uuids.rp, resource_class="CUSTOM_PMEM_NAMESPACE_4GB", identifier='ns_0', metadata=vpmem_0) resources = objects.ResourceList( objects=[resource_0]) _test(None, False) _test(None, True) _test(resources, True) self.assertRaises(exception.MigrationPreCheckError, _test, resources, False)
class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = "context" self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() _p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv') self.heal_reqspec_is_bfv_mock = _p.start() self.addCleanup(_p.stop) _p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata') self.ensure_network_metadata_mock = _p.start() self.addCleanup(_p.stop) def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) def test_execute_with_destination(self): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination', return_value=(mock.sentinel.source_node, dest_node)), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig, m_alloc): mock_mig.return_value = "bob" m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs) self.assertEqual("bob", self.task.execute()) mock_check_up.assert_called_once_with(self.instance_host) mock_check_dest.assert_called_once_with() allocs = mock.sentinel.allocs mock_claim.assert_called_once_with( self.context, self.task.scheduler_client.reportclient, self.instance, mock.sentinel.source_node, dest_node, source_allocations=allocs, consumer_generation=None) mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, self.migration.dest_node) self.assertEqual(self.task.destination, self.migration.dest_compute) m_alloc.assert_called_once_with(self.context, self.instance, self.migration) # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # heal the request spec. self.heal_reqspec_is_bfv_mock.assert_not_called() # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # modify the request spec self.ensure_network_metadata_mock.assert_not_called() def test_execute_without_destination(self): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), ) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc): mock_find.return_value = ("found_host", "found_node") mock_mig.return_value = "bob" mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock()) self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) self.assertTrue(mock_alloc.called) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" with mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False): self.assertEqual((hypervisor_details, hypervisor_details), self.task._check_requested_destination()) self.assertEqual("migrate_data", self.task.migrate_data) mock_get_host.assert_called_once_with(self.context, self.destination) mock_is_up.assert_called_once_with("service") self.assertEqual([ mock.call(self.destination), mock.call(self.instance_host), mock.call(self.destination) ], mock_get_info.call_args_list) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_requested_destination_fails_when_destination_is_up( self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_requested_destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_requested_destination_fails_with_not_enough_memory( self, mock_get_first, mock_is_up): mock_get_first.return_value = (objects.ComputeNode( free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_diff( self, mock_get_info, mock_check, mock_is_up): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a') ] self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_too_old( self, mock_get_info, mock_check, mock_is_up): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [ objects.ComputeNode(**host1), objects.ComputeNode(**host2) ] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') @mock.patch.object( objects.HostMapping, 'get_by_host', return_value=objects.HostMapping(cell_mapping=objects.CellMapping( uuid=uuids.different))) def test_check_requested_destination_fails_different_cells( self, mock_get_host_mapping, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" with mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False): ex = self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) self.assertIn('across cells', six.text_type(ex)) @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_works(self, mock_setup, mock_reset, mock_select, mock_check, mock_call): self.assertEqual(("host1", "node1"), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1') def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [[fake_selection1]] self.assertEqual(("host1", "node1"), task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) setup_ig.assert_called_once_with(self.context, another_spec) self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, another_spec, self.instance) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid], return_objects=True, return_alternates=False) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(another_spec.requested_destination.cell) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test() @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_no_image_works(self, mock_setup, mock_select, mock_check, mock_call): self.instance['image_ref'] = '' self.assertEqual(("host1", "node1"), self.task._find_destination()) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1') @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def _test_find_destination_retry_hypervisor_raises(self, error, mock_setup, mock_select, mock_check, mock_call, mock_remove): mock_check.side_effect = [error, None] self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_called_once_with('host2') def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_invalid_livem_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [exception.Invalid(), None] self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls([mock.call('host1'), mock.call('host2')]) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_failed_migration_pre_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [ exception.MigrationPreCheckError('reason'), None ] self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls([mock.call('host1'), mock.call('host2')]) @mock.patch.object(objects.Migration, 'save') @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor', side_effect=exception.DestinationHypervisorTooOld()) @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_exceeds_max(self, mock_setup, mock_select, mock_check, mock_remove, mock_save): self.flags(migrate_max_retries=0) self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) mock_save.assert_called_once_with() # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') @mock.patch.object(scheduler_client.SchedulerClient, 'select_destinations', side_effect=exception.NoValidHost(reason="")) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_when_runs_out_of_hosts(self, mock_setup, mock_select): self.assertRaises(exception.NoValidHost, self.task._find_destination) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError( self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.scheduler_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}) @mock.patch('nova.conductor.tasks.live_migrate.' 'supports_extended_port_binding', return_value=True) def test_call_livem_checks_on_host_bind_ports(self, mock_supports_ext): data = objects.LibvirtLiveMigrateData() bindings = { uuids.port1: { 'host': 'dest-host' }, uuids.port2: { 'host': 'dest-host' } } @mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', return_value=data) @mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=True) @mock.patch.object(self.task.network_api, 'bind_ports_to_host', return_value=bindings) def _test(mock_bind_ports_to_host, mock_supports_port_binding, mock_check_can_live_migrate_dest): nwinfo = network_model.NetworkInfo([ network_model.VIF(uuids.port1), network_model.VIF(uuids.port2) ]) self.instance.info_cache = objects.InstanceInfoCache( network_info=nwinfo) self.task._call_livem_checks_on_host('dest-host') # Assert the migrate_data set on the task based on the port # bindings created. self.assertIn('vifs', data) self.assertEqual(2, len(data.vifs)) for vif in data.vifs: self.assertIn('source_vif', vif) self.assertEqual('dest-host', vif.host) self.assertEqual(vif.port_id, vif.source_vif['id']) _test() @mock.patch.object( objects.InstanceMapping, 'get_by_instance_uuid', side_effect=exception.InstanceMappingNotFound(uuid=uuids.instance)) def test_get_source_cell_mapping_not_found(self, mock_get): """Negative test where InstanceMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_source_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.instance.uuid) @mock.patch.object( objects.HostMapping, 'get_by_host', side_effect=exception.HostMappingNotFound(name='destination')) def test_get_destination_cell_mapping_not_found(self, mock_get): """Negative test where HostMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_destination_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.destination) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename', side_effect=exception.ComputeHostNotFound(host='host')) def test_remove_host_allocations_compute_host_not_found(self, get_cn): """Tests that failing to find a ComputeNode will not blow up the _remove_host_allocations method. """ with mock.patch.object( self.task.scheduler_client.reportclient, 'remove_provider_from_instance_allocation') as remove_provider: self.task._remove_host_allocations('host', 'node') remove_provider.assert_not_called()