class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = nova_context.get_admin_context() self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.instance.numa_topology = None self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() _p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv') self.heal_reqspec_is_bfv_mock = _p.start() self.addCleanup(_p.stop) _p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata') self.ensure_network_metadata_mock = _p.start() self.addCleanup(_p.stop) def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), query.SchedulerQueryClient(), report.SchedulerReportClient(), self.fake_spec) @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='fake-az') def test_execute_with_destination(self, mock_get_az): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination', return_value=(mock.sentinel.source_node, dest_node)), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig, m_alloc): mock_mig.return_value = "bob" m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs) self.assertEqual("bob", self.task.execute()) mock_check_up.assert_called_once_with(self.instance_host) mock_check_dest.assert_called_once_with() allocs = mock.sentinel.allocs mock_claim.assert_called_once_with(self.context, self.task.report_client, self.instance, mock.sentinel.source_node, dest_node, source_allocations=allocs, consumer_generation=None) mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) mock_get_az.assert_called_once_with(self.context, self.destination) self.assertEqual('fake-az', self.instance.availability_zone) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, self.migration.dest_node) self.assertEqual(self.task.destination, self.migration.dest_compute) m_alloc.assert_called_once_with(self.context, self.instance, self.migration) # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # heal the request spec. self.heal_reqspec_is_bfv_mock.assert_not_called() # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # modify the request spec self.ensure_network_metadata_mock.assert_not_called() @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='nova') def test_execute_without_destination(self, mock_get_az): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), ) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc): mock_find.return_value = ("found_host", "found_node") mock_mig.return_value = "bob" mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock()) self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) mock_get_az.assert_called_once_with(self.context, 'found_host') self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) self.assertTrue(mock_alloc.called) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_no_numa(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') self.task.instance.numa_topology = None mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='kvm') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_non_kvm(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=1024) ]) mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='xen') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_workaround(self, mock_get): self.flags(enable_numa_live_migration=True, group='workarounds') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=1024) ]) mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='kvm') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_fails(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='QEMU') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=1024) ]) self.assertRaises(exception.MigrationPreCheckError, self.task._check_instance_has_no_numa) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" with test.nested( mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False), mock.patch.object(self.task, '_check_can_migrate_pci')): self.assertEqual((hypervisor_details, hypervisor_details), self.task._check_requested_destination()) self.assertEqual("migrate_data", self.task.migrate_data) mock_get_host.assert_called_once_with(self.context, self.destination) mock_is_up.assert_called_once_with("service") self.assertEqual([ mock.call(self.destination), mock.call(self.instance_host), mock.call(self.destination) ], mock_get_info.call_args_list) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_requested_destination_fails_when_destination_is_up( self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_requested_destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_requested_destination_fails_with_not_enough_memory( self, mock_get_first, mock_is_up): mock_get_first.return_value = (objects.ComputeNode( free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_diff( self, mock_get_info, mock_check, mock_is_up): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a') ] self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_too_old( self, mock_get_info, mock_check, mock_is_up): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [ objects.ComputeNode(**host1), objects.ComputeNode(**host2) ] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') @mock.patch.object( objects.HostMapping, 'get_by_host', return_value=objects.HostMapping(cell_mapping=objects.CellMapping( uuid=uuids.different))) def test_check_requested_destination_fails_different_cells( self, mock_get_host_mapping, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" with test.nested( mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False), mock.patch.object(self.task, '_check_can_migrate_pci')): ex = self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) self.assertIn('across cells', six.text_type(ex)) @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_works(self, mock_setup, mock_reset, mock_select, mock_check, mock_call): self.assertEqual(("host1", "node1"), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_metadata_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_no_image_works(self, mock_setup, mock_select, mock_check, mock_call): self.instance['image_ref'] = '' self.assertEqual(("host1", "node1"), self.task._find_destination()) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1') @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def _test_find_destination_retry_hypervisor_raises(self, error, mock_setup, mock_select, mock_check, mock_call, mock_remove): mock_check.side_effect = [error, None] self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_called_once_with('host2') def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_invalid_livem_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [exception.Invalid(), None] self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls([mock.call('host1'), mock.call('host2')]) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_failed_migration_pre_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [ exception.MigrationPreCheckError('reason'), None ] self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls([mock.call('host1'), mock.call('host2')]) @mock.patch.object(objects.Migration, 'save') @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor', side_effect=exception.DestinationHypervisorTooOld()) @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_exceeds_max(self, mock_setup, mock_select, mock_check, mock_remove, mock_save): self.flags(migrate_max_retries=0) self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) mock_save.assert_called_once_with() # Should have removed allocations for the first host. mock_remove.assert_called_once_with('host1', 'node1') mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=exception.NoValidHost(reason="")) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_when_runs_out_of_hosts(self, mock_setup, mock_select): self.assertRaises(exception.NoValidHost, self.task._find_destination) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError( self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.query_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with test.nested( mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout), mock.patch.object(self.task, '_check_can_migrate_pci')): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}) @mock.patch('nova.conductor.tasks.live_migrate.' 'supports_extended_port_binding', return_value=True) def test_call_livem_checks_on_host_bind_ports(self, mock_supports_ext): data = objects.LibvirtLiveMigrateData() bindings = { uuids.port1: { 'host': 'dest-host' }, uuids.port2: { 'host': 'dest-host' } } @mock.patch.object(self.task, '_check_can_migrate_pci') @mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', return_value=data) @mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=True) @mock.patch.object(self.task.network_api, 'bind_ports_to_host', return_value=bindings) def _test(mock_bind_ports_to_host, mock_supports_port_binding, mock_check_can_live_migrate_dest, mock_check_can_migrate_pci): nwinfo = network_model.NetworkInfo([ network_model.VIF(uuids.port1), network_model.VIF(uuids.port2) ]) self.instance.info_cache = objects.InstanceInfoCache( network_info=nwinfo) self.task._call_livem_checks_on_host('dest-host') # Assert the migrate_data set on the task based on the port # bindings created. self.assertIn('vifs', data) self.assertEqual(2, len(data.vifs)) for vif in data.vifs: self.assertIn('source_vif', vif) self.assertEqual('dest-host', vif.host) self.assertEqual(vif.port_id, vif.source_vif['id']) _test() @mock.patch.object( objects.InstanceMapping, 'get_by_instance_uuid', side_effect=exception.InstanceMappingNotFound(uuid=uuids.instance)) def test_get_source_cell_mapping_not_found(self, mock_get): """Negative test where InstanceMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_source_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.instance.uuid) @mock.patch.object( objects.HostMapping, 'get_by_host', side_effect=exception.HostMappingNotFound(name='destination')) def test_get_destination_cell_mapping_not_found(self, mock_get): """Negative test where HostMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_destination_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.destination) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename', side_effect=exception.ComputeHostNotFound(host='host')) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'remove_provider_tree_from_instance_allocation') def test_remove_host_allocations_compute_host_not_found( self, remove_provider, get_cn): """Tests that failing to find a ComputeNode will not blow up the _remove_host_allocations method. """ self.task._remove_host_allocations('host', 'node') remove_provider.assert_not_called() def test_check_can_migrate_pci(self): """Tests that _check_can_migrate_pci() allows live-migration if instance does not contain non-network related PCI requests and raises MigrationPreCheckError otherwise """ @mock.patch.object(self.task.network_api, 'supports_port_binding_extension') @mock.patch.object(live_migrate, 'supports_vif_related_pci_allocations') def _test(instance_pci_reqs, supp_binding_ext_retval, supp_vif_related_pci_alloc_retval, mock_supp_vif_related_pci_alloc, mock_supp_port_binding_ext): mock_supp_vif_related_pci_alloc.return_value = \ supp_vif_related_pci_alloc_retval mock_supp_port_binding_ext.return_value = \ supp_binding_ext_retval self.task.instance.pci_requests = instance_pci_reqs self.task._check_can_migrate_pci("Src", "Dst") # in case we managed to get away without rasing, check mocks mock_supp_port_binding_ext.called_once_with(self.context) if instance_pci_reqs: self.assertTrue(mock_supp_vif_related_pci_alloc.called) # instance has no PCI requests _test(None, False, False) # No support in Neutron and Computes _test(None, True, False) # No support in Computes _test(None, False, True) # No support in Neutron _test(None, True, True) # Support in both Neutron and Computes # instance contains network related PCI requests (alias_name=None) pci_requests = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(alias_name=None)]) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, True) _test(pci_requests, True, True) # instance contains Non network related PCI requests (alias_name!=None) pci_requests.requests.append( objects.InstancePCIRequest(alias_name="non-network-related-pci")) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, True) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, True)
def stub_add_host_to_aggregate(context, aggregate, host): raise exception.ComputeHostNotFound(host=host)
def stub_remove_host_from_aggregate(context, aggregate, host): raise exception.ComputeHostNotFound(host=host)
def test_compat_service_id_compute_host_not_found(self, mock_get): mock_get.side_effect = exception.ComputeHostNotFound(host='fake-host') compute = objects.ComputeNode(host='fake-host', service_id=None) primitive = compute.obj_to_primitive(target_version='1.12') self.assertEqual(-1, primitive['nova_object.data']['service_id'])
def service_get_by_compute_host(self, context, host_name): try: return self.cells_rpcapi.service_get_by_compute_host( context, host_name) except exception.CellRoutingInconsistency: raise exception.ComputeHostNotFound(host=host_name)
class ComputeHostAPITestCase(test.TestCase): def setUp(self): super(ComputeHostAPITestCase, self).setUp() self.host_api = compute.HostAPI() self.aggregate_api = compute.AggregateAPI() self.ctxt = context.get_admin_context() self.notifier = self.useFixture( nova_fixtures.NotificationFixture(self)) self.req = fakes.HTTPRequest.blank('') self.controller = services.ServiceController() self.useFixture(nova_fixtures.SingleCellSimple()) def _compare_obj(self, obj, db_obj): test_objects.compare_obj(self, obj, db_obj, allow_missing=test_service.OPTIONAL) def _compare_objs(self, obj_list, db_obj_list): self.assertEqual(len(obj_list), len(db_obj_list), "The length of two object lists are different.") for index, obj in enumerate(obj_list): self._compare_obj(obj, db_obj_list[index]) def test_set_host_enabled(self): @mock.patch.object(self.host_api.rpcapi, 'set_host_enabled', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_set_host_enabled): result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', 'fake_enabled') self.assertEqual('fake-result', result) self.assertEqual(2, len(self.notifier.notifications)) msg = self.notifier.notifications[0] self.assertEqual('HostAPI.set_enabled.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) msg = self.notifier.notifications[1] self.assertEqual('HostAPI.set_enabled.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) _do_test() def test_host_name_from_assert_hosts_exists(self): @mock.patch.object(self.host_api.rpcapi, 'set_host_enabled', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_set_host_enabled): result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', 'fake_enabled') self.assertEqual('fake-result', result) _do_test() def test_get_host_uptime(self): @mock.patch.object(self.host_api.rpcapi, 'get_host_uptime', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_get_host_uptime): result = self.host_api.get_host_uptime(self.ctxt, 'fake_host') self.assertEqual('fake-result', result) _do_test() @mock.patch('nova.db.main.api.service_get_by_compute_host') def test_get_host_uptime_service_down( self, mock_get_service_get_by_compute_host, ): mock_get_service_get_by_compute_host.return_value = dict( test_service.fake_service, id=1) with mock.patch.object( self.host_api.servicegroup_api, 'service_is_up', return_value=False, ): self.assertRaises(exception.ComputeServiceUnavailable, self.host_api.get_host_uptime, self.ctxt, 'fake_host') def test_host_power_action(self): @mock.patch.object(self.host_api.rpcapi, 'host_power_action', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_host_power_action): result = self.host_api.host_power_action(self.ctxt, 'fake_host', 'fake_action') self.assertEqual('fake-result', result) self.assertEqual(2, len(self.notifier.notifications)) msg = self.notifier.notifications[0] self.assertEqual('HostAPI.power_action.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) msg = self.notifier.notifications[1] self.assertEqual('HostAPI.power_action.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) _do_test() def test_set_host_maintenance(self): @mock.patch.object(self.host_api.rpcapi, 'host_maintenance_mode', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_host_maintenance_mode): result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host', 'fake_mode') self.assertEqual('fake-result', result) self.assertEqual(2, len(self.notifier.notifications)) msg = self.notifier.notifications[0] self.assertEqual('HostAPI.set_maintenance.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) msg = self.notifier.notifications[1] self.assertEqual('HostAPI.set_maintenance.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) _do_test() def test_service_get_all_cells(self): cells = objects.CellMappingList.get_all(self.ctxt) for cell in cells: with context.target_cell(self.ctxt, cell) as cctxt: objects.Service(context=cctxt, binary='nova-compute', host='host-%s' % cell.uuid).create() services = self.host_api.service_get_all(self.ctxt, all_cells=True) self.assertEqual(sorted(['host-%s' % cell.uuid for cell in cells]), sorted([svc.host for svc in services])) @mock.patch('nova.context.scatter_gather_cells') def test_service_get_all_cells_with_failures(self, mock_sg): service = objects.Service(binary='nova-compute', host='host-%s' % uuids.cell1) mock_sg.return_value = { uuids.cell1: [service], uuids.cell2: context.did_not_respond_sentinel } services = self.host_api.service_get_all(self.ctxt, all_cells=True) # returns the results from cell1 and ignores cell2. self.assertEqual(['host-%s' % uuids.cell1], [svc.host for svc in services]) @mock.patch('nova.objects.CellMappingList.get_all') @mock.patch.object(objects.HostMappingList, 'get_by_cell_id') @mock.patch('nova.context.scatter_gather_all_cells') def test_service_get_all_cells_with_minimal_constructs(self, mock_sg, mock_get_hm, mock_cm_list): service = objects.Service(binary='nova-compute', host='host-%s' % uuids.cell0) cells = [ objects.CellMapping(uuid=uuids.cell1, id=1), objects.CellMapping(uuid=uuids.cell2, id=2), ] mock_cm_list.return_value = cells context.load_cells() # create two hms in cell1, which is the down cell in this test. hm1 = objects.HostMapping(self.ctxt, host='host1-unavailable', cell_mapping=cells[0]) hm1.create() hm2 = objects.HostMapping(self.ctxt, host='host2-unavailable', cell_mapping=cells[0]) hm2.create() mock_sg.return_value = { cells[0].uuid: [service], cells[1].uuid: context.did_not_respond_sentinel, } mock_get_hm.return_value = [hm1, hm2] services = self.host_api.service_get_all(self.ctxt, all_cells=True, cell_down_support=True) # returns the results from cell0 and minimal construct from cell1. self.assertEqual(sorted(['host-%s' % uuids.cell0, 'host1-unavailable', 'host2-unavailable']), sorted([svc.host for svc in services])) mock_sg.assert_called_once_with(self.ctxt, objects.ServiceList.get_all, None, set_zones=False) mock_get_hm.assert_called_once_with(self.ctxt, cells[1].id) @mock.patch('nova.db.main.api.service_get_all') def test_service_get_all_no_zones(self, mock_service_get_all): services = [dict(test_service.fake_service, id=1, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2')] mock_service_get_all.return_value = services # Test no filters result = self.host_api.service_get_all(self.ctxt) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, services) # Test no filters #2 mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters={}) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, services) # Test w/ filter mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2')) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, [services[1]]) @mock.patch('nova.db.main.api.service_get_all') def test_service_get_all(self, mock_service_get_all): services = [dict(test_service.fake_service, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2')] exp_services = [] for service in services: exp_service = {} exp_service.update(availability_zone='nova', **service) exp_services.append(exp_service) mock_service_get_all.return_value = services # Test no filters result = self.host_api.service_get_all(self.ctxt, set_zones=True) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, exp_services) # Test no filters #2 mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters={}, set_zones=True) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, exp_services) # Test w/ filter mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2'), set_zones=True) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, [exp_services[1]]) # Test w/ zone filter but no set_zones arg. mock_service_get_all.reset_mock() filters = {'availability_zone': 'nova'} result = self.host_api.service_get_all(self.ctxt, filters=filters) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, exp_services) @mock.patch( 'nova.db.main.api.service_get_by_compute_host', return_value=test_service.fake_service) def test_service_get_by_compute_host( self, mock_service_get_by_compute_host, ): result = self.host_api.service_get_by_compute_host( self.ctxt, 'fake-host') self.assertEqual(test_service.fake_service['id'], result.id) @mock.patch('nova.db.main.api.service_get_by_host_and_binary') @mock.patch('nova.db.main.api.service_update') def test_service_update_by_host_and_binary( self, mock_service_update, mock_service_get_by_host_and_binary, ): host_name = 'fake-host' binary = 'nova-compute' params_to_update = dict(disabled=True) service_id = 42 expected_result = dict(test_service.fake_service, id=service_id) mock_service_get_by_host_and_binary.return_value = expected_result mock_service_update.return_value = expected_result with mock.patch.object( self.host_api, '_update_compute_provider_status', ) as mock_update_compute_provider_status: result = self.host_api.service_update_by_host_and_binary( self.ctxt, host_name, binary, params_to_update) self._compare_obj(result, expected_result) mock_update_compute_provider_status.assert_called_once_with( self.ctxt, test.MatchType(objects.Service)) @mock.patch('nova.compute.api.HostAPI._update_compute_provider_status', new_callable=mock.NonCallableMock) def test_service_update_no_update_provider_status(self, mock_ucps): """Tests the scenario that the service is updated but the disabled field is not changed, for example the forced_down field is only updated. In this case _update_compute_provider_status should not be called. """ service = objects.Service(forced_down=True) self.assertIn('forced_down', service.obj_what_changed()) with mock.patch.object(service, 'save') as mock_save: retval = self.host_api.service_update(self.ctxt, service) self.assertIs(retval, service) mock_save.assert_called_once_with() @mock.patch('nova.compute.rpcapi.ComputeAPI.set_host_enabled', new_callable=mock.NonCallableMock) def test_update_compute_provider_status_service_too_old(self, mock_she): """Tests the scenario that the service is up but is too old to sync the COMPUTE_STATUS_DISABLED trait. """ service = objects.Service(host='fake-host') service.version = compute.MIN_COMPUTE_SYNC_COMPUTE_STATUS_DISABLED - 1 with mock.patch.object( self.host_api.servicegroup_api, 'service_is_up', return_value=True) as service_is_up: self.host_api._update_compute_provider_status(self.ctxt, service) service_is_up.assert_called_once_with(service) self.assertIn('Compute service on host fake-host is too old to sync ' 'the COMPUTE_STATUS_DISABLED trait in Placement.', self.stdlog.logger.output) @mock.patch('nova.compute.rpcapi.ComputeAPI.set_host_enabled', side_effect=messaging.MessagingTimeout) def test_update_compute_provider_status_service_rpc_error(self, mock_she): """Tests the scenario that the RPC call to the compute service raised some exception. """ service = objects.Service(host='fake-host', disabled=True) with mock.patch.object( self.host_api.servicegroup_api, 'service_is_up', return_value=True) as service_is_up: self.host_api._update_compute_provider_status(self.ctxt, service) service_is_up.assert_called_once_with(service) mock_she.assert_called_once_with(self.ctxt, 'fake-host', False) log_output = self.stdlog.logger.output self.assertIn('An error occurred while updating the ' 'COMPUTE_STATUS_DISABLED trait on compute node ' 'resource providers managed by host fake-host.', log_output) self.assertIn('MessagingTimeout', log_output) @mock.patch.object(objects.InstanceList, 'get_by_host', return_value = ['fake-responses']) def test_instance_get_all_by_host(self, mock_get): result = self.host_api.instance_get_all_by_host(self.ctxt, 'fake-host') self.assertEqual(['fake-responses'], result) @mock.patch( 'nova.db.main.api.task_log_get_all', return_value='fake-response') def test_task_log_get_all(self, mock_task_log_get_all): result = self.host_api.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state') self.assertEqual('fake-response', result) @mock.patch.object(objects.CellMappingList, 'get_all', return_value=objects.CellMappingList(objects=[ objects.CellMapping( uuid=uuids.cell1_uuid, transport_url='mq://fake1', database_connection='db://fake1'), objects.CellMapping( uuid=uuids.cell2_uuid, transport_url='mq://fake2', database_connection='db://fake2'), objects.CellMapping( uuid=uuids.cell3_uuid, transport_url='mq://fake3', database_connection='db://fake3')])) @mock.patch.object(objects.Service, 'get_by_uuid', side_effect=[ exception.ServiceNotFound( service_id=uuids.service_uuid), objects.Service(uuid=uuids.service_uuid)]) def test_service_get_by_id_using_uuid(self, service_get_by_uuid, cell_mappings_get_all): """Tests that we can lookup a service in the HostAPI using a uuid. There are two calls to objects.Service.get_by_uuid and the first raises ServiceNotFound so that we ensure we keep looping over the cells. We'll find the service in the second cell and break the loop so that we don't needlessly check in the third cell. """ def _fake_set_target_cell(ctxt, cell_mapping): if cell_mapping: # These aren't really what would be set for values but let's # keep this simple so we can assert something is set when a # mapping is provided. ctxt.db_connection = cell_mapping.database_connection ctxt.mq_connection = cell_mapping.transport_url # We have to override the SingleCellSimple fixture. self.useFixture(fixtures.MonkeyPatch( 'nova.context.set_target_cell', _fake_set_target_cell)) ctxt = context.get_admin_context() self.assertIsNone(ctxt.db_connection) self.host_api.service_get_by_id(ctxt, uuids.service_uuid) # We should have broken the loop over the cells and set the target cell # on the context. service_get_by_uuid.assert_has_calls( [mock.call(ctxt, uuids.service_uuid)] * 2) self.assertEqual('db://fake2', ctxt.db_connection) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'aggregate_remove_host') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'aggregate_add_host') @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host') @mock.patch.object(objects.HostMapping, 'get_by_host') def test_service_delete_compute_in_aggregate( self, mock_hm, mock_get_cn, mock_add_host, mock_remove_host): compute = objects.Service(self.ctxt, **{'host': 'fake-compute-host', 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0}) compute.create() # This is needed because of lazy-loading service.compute_node cn = objects.ComputeNode(uuid=uuids.cn, host="fake-compute-host", hypervisor_hostname="fake-compute-host") mock_get_cn.return_value = [cn] aggregate = self.aggregate_api.create_aggregate(self.ctxt, 'aggregate', None) self.aggregate_api.add_host_to_aggregate(self.ctxt, aggregate.id, 'fake-compute-host') mock_add_host.assert_called_once_with( mock.ANY, aggregate.uuid, host_name='fake-compute-host') self.controller.delete(self.req, compute.id) result = self.aggregate_api.get_aggregate(self.ctxt, aggregate.id).hosts self.assertEqual([], result) mock_hm.return_value.destroy.assert_called_once_with() mock_remove_host.assert_called_once_with( mock.ANY, aggregate.uuid, 'fake-compute-host') @mock.patch('nova.db.main.api.compute_node_statistics') def test_compute_node_statistics(self, mock_cns): # Note this should only be called twice mock_cns.side_effect = [ {'stat1': 1, 'stat2': 4.0}, {'stat1': 5, 'stat2': 1.2}, ] compute.CELLS = [objects.CellMapping(uuid=uuids.cell1), objects.CellMapping( uuid=objects.CellMapping.CELL0_UUID), objects.CellMapping(uuid=uuids.cell2)] stats = self.host_api.compute_node_statistics(self.ctxt) self.assertEqual({'stat1': 6, 'stat2': 5.2}, stats) @mock.patch.object(objects.CellMappingList, 'get_all', return_value=objects.CellMappingList(objects=[ objects.CellMapping( uuid=objects.CellMapping.CELL0_UUID, transport_url='mq://cell0', database_connection='db://cell0'), objects.CellMapping( uuid=uuids.cell1_uuid, transport_url='mq://fake1', database_connection='db://fake1'), objects.CellMapping( uuid=uuids.cell2_uuid, transport_url='mq://fake2', database_connection='db://fake2')])) @mock.patch.object(objects.ComputeNode, 'get_by_uuid', side_effect=[exception.ComputeHostNotFound( host=uuids.cn_uuid), objects.ComputeNode(uuid=uuids.cn_uuid)]) def test_compute_node_get_using_uuid(self, compute_get_by_uuid, cell_mappings_get_all): """Tests that we can lookup a compute node in the HostAPI using a uuid. """ self.host_api.compute_node_get(self.ctxt, uuids.cn_uuid) # cell0 should have been skipped, and the compute node wasn't found # in cell1 so we checked cell2 and found it self.assertEqual(2, compute_get_by_uuid.call_count) compute_get_by_uuid.assert_has_calls( [mock.call(self.ctxt, uuids.cn_uuid)] * 2) @mock.patch.object(objects.CellMappingList, 'get_all', return_value=objects.CellMappingList(objects=[ objects.CellMapping( uuid=objects.CellMapping.CELL0_UUID, transport_url='mq://cell0', database_connection='db://cell0'), objects.CellMapping( uuid=uuids.cell1_uuid, transport_url='mq://fake1', database_connection='db://fake1'), objects.CellMapping( uuid=uuids.cell2_uuid, transport_url='mq://fake2', database_connection='db://fake2')])) @mock.patch.object(objects.ComputeNode, 'get_by_uuid', side_effect=exception.ComputeHostNotFound( host=uuids.cn_uuid)) def test_compute_node_get_not_found(self, compute_get_by_uuid, cell_mappings_get_all): """Tests that we can lookup a compute node in the HostAPI using a uuid and will fail with ComputeHostNotFound if we didn't find it in any cell. """ self.assertRaises(exception.ComputeHostNotFound, self.host_api.compute_node_get, self.ctxt, uuids.cn_uuid) # cell0 should have been skipped, and the compute node wasn't found # in cell1 or cell2. self.assertEqual(2, compute_get_by_uuid.call_count) compute_get_by_uuid.assert_has_calls( [mock.call(self.ctxt, uuids.cn_uuid)] * 2)
def test_migrate_nonexistent_host(self): exc_info = exception.ComputeHostNotFound(host='nonexistent_host') self._test_migrate_exception(exc_info, webob.exc.HTTPBadRequest)
def fake_service_get_by_compute_host(self, context, host): if host == 'bad-host': raise exception.ComputeHostNotFound(host=host) else: return {'host_name': host, 'service': 'compute', 'zone': 'nova'}
class ComputeHostAPITestCase(test.TestCase): def setUp(self): super(ComputeHostAPITestCase, self).setUp() self.host_api = compute.HostAPI() self.aggregate_api = compute_api.AggregateAPI() self.ctxt = context.get_admin_context() fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) self.req = fakes.HTTPRequest.blank('') self.controller = services.ServiceController() self.useFixture(nova_fixtures.SingleCellSimple()) def _compare_obj(self, obj, db_obj): test_objects.compare_obj(self, obj, db_obj, allow_missing=test_service.OPTIONAL) def _compare_objs(self, obj_list, db_obj_list): self.assertEqual(len(obj_list), len(db_obj_list), "The length of two object lists are different.") for index, obj in enumerate(obj_list): self._compare_obj(obj, db_obj_list[index]) def test_set_host_enabled(self): fake_notifier.NOTIFICATIONS = [] @mock.patch.object(self.host_api.rpcapi, 'set_host_enabled', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_set_host_enabled): result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', 'fake_enabled') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.set_enabled.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.set_enabled.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_enabled', msg.payload['enabled']) self.assertEqual('fake_host', msg.payload['host_name']) _do_test() def test_host_name_from_assert_hosts_exists(self): @mock.patch.object(self.host_api.rpcapi, 'set_host_enabled', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_set_host_enabled): result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', 'fake_enabled') self.assertEqual('fake-result', result) _do_test() def test_get_host_uptime(self): @mock.patch.object(self.host_api.rpcapi, 'get_host_uptime', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_get_host_uptime): result = self.host_api.get_host_uptime(self.ctxt, 'fake_host') self.assertEqual('fake-result', result) _do_test() def test_get_host_uptime_service_down(self): @mock.patch.object(self.host_api.db, 'service_get_by_compute_host', return_value=dict(test_service.fake_service, id=1)) @mock.patch.object(self.host_api.servicegroup_api, 'service_is_up', return_value=False) def _do_test(mock_service_is_up, mock_service_get_by_compute_host): self.assertRaises(exception.ComputeServiceUnavailable, self.host_api.get_host_uptime, self.ctxt, 'fake_host') _do_test() def test_host_power_action(self): fake_notifier.NOTIFICATIONS = [] @mock.patch.object(self.host_api.rpcapi, 'host_power_action', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_host_power_action): result = self.host_api.host_power_action(self.ctxt, 'fake_host', 'fake_action') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.power_action.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.power_action.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_action', msg.payload['action']) self.assertEqual('fake_host', msg.payload['host_name']) _do_test() def test_set_host_maintenance(self): fake_notifier.NOTIFICATIONS = [] @mock.patch.object(self.host_api.rpcapi, 'host_maintenance_mode', return_value='fake-result') @mock.patch.object(self.host_api, '_assert_host_exists', return_value='fake_host') def _do_test(mock_assert_host_exists, mock_host_maintenance_mode): result = self.host_api.set_host_maintenance( self.ctxt, 'fake_host', 'fake_mode') self.assertEqual('fake-result', result) self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('HostAPI.set_maintenance.start', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual('HostAPI.set_maintenance.end', msg.event_type) self.assertEqual('api.fake_host', msg.publisher_id) self.assertEqual('INFO', msg.priority) self.assertEqual('fake_host', msg.payload['host_name']) self.assertEqual('fake_mode', msg.payload['mode']) _do_test() def test_service_get_all_cells(self): cells = objects.CellMappingList.get_all(self.ctxt) for cell in cells: with context.target_cell(self.ctxt, cell) as cctxt: objects.Service(context=cctxt, binary='nova-compute', host='host-%s' % cell.uuid).create() services = self.host_api.service_get_all(self.ctxt, all_cells=True) self.assertEqual(sorted(['host-%s' % cell.uuid for cell in cells]), sorted([svc.host for svc in services])) @mock.patch('nova.context.scatter_gather_cells') def test_service_get_all_cells_with_failures(self, mock_sg): service = objects.Service(binary='nova-compute', host='host-%s' % uuids.cell1) mock_sg.return_value = { uuids.cell1: [service], uuids.cell2: context.raised_exception_sentinel } services = self.host_api.service_get_all(self.ctxt, all_cells=True) # returns the results from cell1 and ignores cell2. self.assertEqual(['host-%s' % uuids.cell1], [svc.host for svc in services]) def test_service_get_all_no_zones(self): services = [ dict(test_service.fake_service, id=1, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2') ] @mock.patch.object(self.host_api.db, 'service_get_all') def _do_test(mock_service_get_all): mock_service_get_all.return_value = services # Test no filters result = self.host_api.service_get_all(self.ctxt) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, services) # Test no filters #2 mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters={}) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, services) # Test w/ filter mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2')) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, [services[1]]) _do_test() def test_service_get_all(self): services = [ dict(test_service.fake_service, topic='compute', host='host1'), dict(test_service.fake_service, topic='compute', host='host2') ] exp_services = [] for service in services: exp_service = {} exp_service.update(availability_zone='nova', **service) exp_services.append(exp_service) @mock.patch.object(self.host_api.db, 'service_get_all') def _do_test(mock_service_get_all): mock_service_get_all.return_value = services # Test no filters result = self.host_api.service_get_all(self.ctxt, set_zones=True) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, exp_services) # Test no filters #2 mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters={}, set_zones=True) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, exp_services) # Test w/ filter mock_service_get_all.reset_mock() result = self.host_api.service_get_all(self.ctxt, filters=dict(host='host2'), set_zones=True) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, [exp_services[1]]) # Test w/ zone filter but no set_zones arg. mock_service_get_all.reset_mock() filters = {'availability_zone': 'nova'} result = self.host_api.service_get_all(self.ctxt, filters=filters) mock_service_get_all.assert_called_once_with(self.ctxt, disabled=None) self._compare_objs(result, exp_services) _do_test() def test_service_get_by_compute_host(self): @mock.patch.object(self.host_api.db, 'service_get_by_compute_host', return_value=test_service.fake_service) def _do_test(mock_service_get_by_compute_host): result = self.host_api.service_get_by_compute_host( self.ctxt, 'fake-host') self.assertEqual(test_service.fake_service['id'], result.id) _do_test() def test_service_update(self): host_name = 'fake-host' binary = 'nova-compute' params_to_update = dict(disabled=True) service_id = 42 expected_result = dict(test_service.fake_service, id=service_id) @mock.patch.object(self.host_api.db, 'service_get_by_host_and_binary') @mock.patch.object(self.host_api.db, 'service_update') def _do_test(mock_service_update, mock_service_get_by_host_and_binary): mock_service_get_by_host_and_binary.return_value = expected_result mock_service_update.return_value = expected_result result = self.host_api.service_update(self.ctxt, host_name, binary, params_to_update) self._compare_obj(result, expected_result) _do_test() @mock.patch.object(objects.InstanceList, 'get_by_host', return_value=['fake-responses']) def test_instance_get_all_by_host(self, mock_get): result = self.host_api.instance_get_all_by_host(self.ctxt, 'fake-host') self.assertEqual(['fake-responses'], result) def test_task_log_get_all(self): @mock.patch.object(self.host_api.db, 'task_log_get_all', return_value='fake-response') def _do_test(mock_task_log_get_all): result = self.host_api.task_log_get_all(self.ctxt, 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state') self.assertEqual('fake-response', result) _do_test() @mock.patch.object( objects.CellMappingList, 'get_all', return_value=objects.CellMappingList(objects=[ objects.CellMapping(uuid=uuids.cell1_uuid, transport_url='mq://fake1', database_connection='db://fake1'), objects.CellMapping(uuid=uuids.cell2_uuid, transport_url='mq://fake2', database_connection='db://fake2'), objects.CellMapping(uuid=uuids.cell3_uuid, transport_url='mq://fake3', database_connection='db://fake3') ])) @mock.patch.object( objects.Service, 'get_by_uuid', side_effect=[ exception.ServiceNotFound(service_id=uuids.service_uuid), objects.Service(uuid=uuids.service_uuid) ]) def test_service_get_by_id_using_uuid(self, service_get_by_uuid, cell_mappings_get_all): """Tests that we can lookup a service in the HostAPI using a uuid. There are two calls to objects.Service.get_by_uuid and the first raises ServiceNotFound so that we ensure we keep looping over the cells. We'll find the service in the second cell and break the loop so that we don't needlessly check in the third cell. """ def _fake_set_target_cell(ctxt, cell_mapping): if cell_mapping: # These aren't really what would be set for values but let's # keep this simple so we can assert something is set when a # mapping is provided. ctxt.db_connection = cell_mapping.database_connection ctxt.mq_connection = cell_mapping.transport_url # We have to override the SingleCellSimple fixture. self.useFixture( fixtures.MonkeyPatch('nova.context.set_target_cell', _fake_set_target_cell)) ctxt = context.get_admin_context() self.assertIsNone(ctxt.db_connection) self.host_api.service_get_by_id(ctxt, uuids.service_uuid) # We should have broken the loop over the cells and set the target cell # on the context. service_get_by_uuid.assert_has_calls( [mock.call(ctxt, uuids.service_uuid)] * 2) self.assertEqual('db://fake2', ctxt.db_connection) @mock.patch('nova.context.set_target_cell') @mock.patch('nova.compute.api.load_cells') @mock.patch('nova.objects.Service.get_by_id') def test_service_delete(self, get_by_id, load_cells, set_target): compute_api.CELLS = [ objects.CellMapping(), objects.CellMapping(), objects.CellMapping(), ] service = mock.MagicMock() get_by_id.side_effect = [ exception.ServiceNotFound(service_id=1), service, exception.ServiceNotFound(service_id=1) ] self.host_api.service_delete(self.ctxt, 1) get_by_id.assert_has_calls([ mock.call(self.ctxt, 1), mock.call(self.ctxt, 1), mock.call(self.ctxt, 1) ]) service.destroy.assert_called_once_with() set_target.assert_called_once_with(self.ctxt, compute_api.CELLS[1]) @mock.patch('nova.context.set_target_cell') @mock.patch('nova.compute.api.load_cells') @mock.patch('nova.objects.Service.get_by_id') def test_service_delete_ambiguous(self, get_by_id, load_cells, set_target): compute_api.CELLS = [ objects.CellMapping(), objects.CellMapping(), objects.CellMapping(), ] service1 = mock.MagicMock() service2 = mock.MagicMock() get_by_id.side_effect = [ exception.ServiceNotFound(service_id=1), service1, service2 ] self.assertRaises(exception.ServiceNotUnique, self.host_api.service_delete, self.ctxt, 1) self.assertFalse(service1.destroy.called) self.assertFalse(service2.destroy.called) self.assertFalse(set_target.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'aggregate_remove_host') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'aggregate_add_host') @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host') @mock.patch.object(objects.HostMapping, 'get_by_host') def test_service_delete_compute_in_aggregate(self, mock_hm, mock_get_cn, mock_add_host, mock_remove_host): compute = self.host_api.db.service_create( self.ctxt, { 'host': 'fake-compute-host', 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0 }) # This is needed because of lazy-loading service.compute_node cn = objects.ComputeNode(uuid=uuids.cn, host="fake-compute-host", hypervisor_hostname="fake-compute-host") mock_get_cn.return_value = [cn] aggregate = self.aggregate_api.create_aggregate( self.ctxt, 'aggregate', None) self.aggregate_api.add_host_to_aggregate(self.ctxt, aggregate.id, 'fake-compute-host') mock_add_host.assert_called_once_with(mock.ANY, aggregate.uuid, 'fake-compute-host') self.controller.delete(self.req, compute.id) result = self.aggregate_api.get_aggregate(self.ctxt, aggregate.id).hosts self.assertEqual([], result) mock_hm.return_value.destroy.assert_called_once_with() mock_remove_host.assert_called_once_with(mock.ANY, aggregate.uuid, 'fake-compute-host') @mock.patch('nova.db.api.compute_node_statistics') def test_compute_node_statistics(self, mock_cns): # Note this should only be called twice mock_cns.side_effect = [ { 'stat1': 1, 'stat2': 4.0 }, { 'stat1': 5, 'stat2': 1.2 }, ] compute_api.CELLS = [ objects.CellMapping(uuid=uuids.cell1), objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID), objects.CellMapping(uuid=uuids.cell2) ] stats = self.host_api.compute_node_statistics(self.ctxt) self.assertEqual({'stat1': 6, 'stat2': 5.2}, stats) @mock.patch.object( objects.CellMappingList, 'get_all', return_value=objects.CellMappingList(objects=[ objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID, transport_url='mq://cell0', database_connection='db://cell0'), objects.CellMapping(uuid=uuids.cell1_uuid, transport_url='mq://fake1', database_connection='db://fake1'), objects.CellMapping(uuid=uuids.cell2_uuid, transport_url='mq://fake2', database_connection='db://fake2') ])) @mock.patch.object(objects.ComputeNode, 'get_by_uuid', side_effect=[ exception.ComputeHostNotFound(host=uuids.cn_uuid), objects.ComputeNode(uuid=uuids.cn_uuid) ]) def test_compute_node_get_using_uuid(self, compute_get_by_uuid, cell_mappings_get_all): """Tests that we can lookup a compute node in the HostAPI using a uuid. """ self.host_api.compute_node_get(self.ctxt, uuids.cn_uuid) # cell0 should have been skipped, and the compute node wasn't found # in cell1 so we checked cell2 and found it self.assertEqual(2, compute_get_by_uuid.call_count) compute_get_by_uuid.assert_has_calls( [mock.call(self.ctxt, uuids.cn_uuid)] * 2) @mock.patch.object( objects.CellMappingList, 'get_all', return_value=objects.CellMappingList(objects=[ objects.CellMapping(uuid=objects.CellMapping.CELL0_UUID, transport_url='mq://cell0', database_connection='db://cell0'), objects.CellMapping(uuid=uuids.cell1_uuid, transport_url='mq://fake1', database_connection='db://fake1'), objects.CellMapping(uuid=uuids.cell2_uuid, transport_url='mq://fake2', database_connection='db://fake2') ])) @mock.patch.object( objects.ComputeNode, 'get_by_uuid', side_effect=exception.ComputeHostNotFound(host=uuids.cn_uuid)) def test_compute_node_get_not_found(self, compute_get_by_uuid, cell_mappings_get_all): """Tests that we can lookup a compute node in the HostAPI using a uuid and will fail with ComputeHostNotFound if we didn't find it in any cell. """ self.assertRaises(exception.ComputeHostNotFound, self.host_api.compute_node_get, self.ctxt, uuids.cn_uuid) # cell0 should have been skipped, and the compute node wasn't found # in cell1 or cell2. self.assertEqual(2, compute_get_by_uuid.call_count) compute_get_by_uuid.assert_has_calls( [mock.call(self.ctxt, uuids.cn_uuid)] * 2)
def fake_service_get_by_compute_host(context, host): if host == TEST_HYPERS[0]['host']: return TEST_SERVICES[0] raise exception.ComputeHostNotFound(host=host)
def fake_execute(_self): raise exception.ComputeHostNotFound(host=hostname)
def get_by_uuid(cls, context, compute_uuid): nodes = ComputeNodeList.get_all_by_uuids(context, [compute_uuid]) # We have a unique index on the uuid column so we can get back 0 or 1. if not nodes: raise exception.ComputeHostNotFound(host=compute_uuid) return nodes[0]
class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = "context" self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) def test_execute_with_destination(self, new_mode=True): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination', return_value=(mock.sentinel.source_node, dest_node)), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), mock.patch( 'nova.conductor.tasks.live_migrate.' 'should_do_migration_allocation')) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig, m_alloc, mock_sda): mock_mig.return_value = "bob" m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs) mock_sda.return_value = new_mode self.assertEqual("bob", self.task.execute()) mock_check_up.assert_called_once_with(self.instance_host) mock_check_dest.assert_called_once_with() if new_mode: allocs = mock.sentinel.allocs else: allocs = None mock_claim.assert_called_once_with( self.task.scheduler_client.reportclient, self.instance, mock.sentinel.source_node, dest_node, source_node_allocations=allocs) mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, self.migration.dest_node) self.assertEqual(self.task.destination, self.migration.dest_compute) if new_mode: m_alloc.assert_called_once_with(self.context, self.instance, self.migration) else: m_alloc.assert_not_called() def test_execute_with_destination_old_school(self): self.test_execute_with_destination(new_mode=False) def test_execute_without_destination(self): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), mock.patch('nova.conductor.tasks.live_migrate.' 'should_do_migration_allocation'), ) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc, mock_sda): mock_find.return_value = ("found_host", "found_node") mock_mig.return_value = "bob" mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock()) mock_sda.return_value = True self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) self.assertTrue(mock_alloc.called) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" self.assertEqual((hypervisor_details, hypervisor_details), self.task._check_requested_destination()) self.assertEqual("migrate_data", self.task.migrate_data) mock_get_host.assert_called_once_with(self.context, self.destination) mock_is_up.assert_called_once_with("service") self.assertEqual([ mock.call(self.destination), mock.call(self.instance_host), mock.call(self.destination) ], mock_get_info.call_args_list) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_requested_destination_fails_when_destination_is_up( self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_requested_destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_requested_destination_fails_with_not_enough_memory( self, mock_get_first, mock_is_up): mock_get_first.return_value = (objects.ComputeNode( free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_diff( self, mock_get_info, mock_check, mock_is_up): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a') ] self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_too_old( self, mock_get_info, mock_check, mock_is_up): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [ objects.ComputeNode(**host1), objects.ComputeNode(**host2) ] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') @mock.patch.object( objects.HostMapping, 'get_by_host', return_value=objects.HostMapping(cell_mapping=objects.CellMapping( uuid=uuids.different))) def test_check_requested_destination_fails_different_cells( self, mock_get_host_mapping, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" ex = self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) self.assertIn('across cells', six.text_type(ex)) def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.fake_spec.reset_forced_destinations() self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection1]]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual(("host1", "node1"), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [[fake_selection1]] self.assertEqual(("host1", "node1"), task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) setup_ig.assert_called_once_with(self.context, another_spec) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid], return_objects=True, return_alternates=False) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(another_spec.requested_destination.cell) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test() def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection1]]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual(("host1", "node1"), self.task._find_destination()) def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection1]]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection2]]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection1]]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection2]]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection1]]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection2]]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndReturn([[fake_selection1]]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with test.nested( mock.patch.object(self.task.migration, 'save'), mock.patch.object( self.task, '_remove_host_allocations')) as (save_mock, remove_allocs): self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with() # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1') def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False).AndRaise(exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError( self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.scheduler_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}) @mock.patch.object( objects.InstanceMapping, 'get_by_instance_uuid', side_effect=exception.InstanceMappingNotFound(uuid=uuids.instance)) def test_get_source_cell_mapping_not_found(self, mock_get): """Negative test where InstanceMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_source_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.instance.uuid) @mock.patch.object( objects.HostMapping, 'get_by_host', side_effect=exception.HostMappingNotFound(name='destination')) def test_get_destination_cell_mapping_not_found(self, mock_get): """Negative test where HostMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_destination_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.destination) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename', side_effect=exception.ComputeHostNotFound(host='host')) def test_remove_host_allocations_compute_host_not_found(self, get_cn): """Tests that failing to find a ComputeNode will not blow up the _remove_host_allocations method. """ with mock.patch.object( self.task.scheduler_client.reportclient, 'remove_provider_from_instance_allocation') as remove_provider: self.task._remove_host_allocations('host', 'node') remove_provider.assert_not_called()
class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = "context" self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), self.fake_spec) def test_execute_with_destination(self): with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), ) as (mock_check_up, mock_check_dest, mock_mig): mock_mig.return_value = "bob" self.assertEqual("bob", self.task.execute()) mock_check_up.assert_called_once_with(self.instance_host) mock_check_dest.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) def test_execute_without_destination(self): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save')) as (mock_check, mock_find, mock_mig, mock_save): mock_find.return_value = "found_host" mock_mig.return_value = "bob" self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) self.assertEqual('found_host', self.migration.dest_compute) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" self.task._check_requested_destination() self.assertEqual("migrate_data", self.task.migrate_data) mock_get_host.assert_called_once_with(self.context, self.destination) mock_is_up.assert_called_once_with("service") self.assertEqual([ mock.call(self.destination), mock.call(self.instance_host), mock.call(self.destination) ], mock_get_info.call_args_list) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit) def test_check_requested_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_requested_destination) @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_requested_destination_fails_when_destination_is_up( self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_requested_destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_requested_destination_fails_with_not_enough_memory( self, mock_get_first, mock_is_up): mock_get_first.return_value = (objects.ComputeNode( free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_diff( self, mock_get_info, mock_check, mock_is_up): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a') ] self.assertRaises(exception.InvalidHypervisorType, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_requested_destination_fails_with_hypervisor_too_old( self, mock_get_info, mock_check, mock_is_up): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [ objects.ComputeNode(**host1), objects.ComputeNode(**host2) ] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_requested_destination) mock_is_up.assert_called_once_with(self.destination) mock_check.assert_called_once_with() self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.fake_spec.reset_forced_destinations() self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination()) def test_find_destination_works_with_no_request_spec(self): task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), scheduler_client.SchedulerClient(), request_spec=None) another_spec = objects.RequestSpec() self.instance.flavor = objects.Flavor() self.instance.numa_topology = None self.instance.pci_requests = None @mock.patch.object(task, '_call_livem_checks_on_host') @mock.patch.object(task, '_check_compatible_with_source_hypervisor') @mock.patch.object(task.scheduler_client, 'select_destinations') @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(utils, 'get_image_from_system_metadata') def do_test(get_image, setup_ig, from_components, select_dest, check_compat, call_livem_checks): get_image.return_value = "image" from_components.return_value = another_spec select_dest.return_value = [{'host': 'host1'}] self.assertEqual("host1", task._find_destination()) get_image.assert_called_once_with(self.instance.system_metadata) fake_props = {'instance_properties': {'uuid': self.instance_uuid}} setup_ig.assert_called_once_with( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) select_dest.assert_called_once_with(self.context, another_spec, [self.instance.uuid]) check_compat.assert_called_once_with("host1") call_livem_checks.assert_called_once_with("host1") do_test() def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination()) def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination()) def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with mock.patch.object(self.task.migration, 'save') as save_mock: self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with() def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndRaise(exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError( self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.scheduler_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {})
def fake_compute_node_get(context, compute_id): for hyper in TEST_HYPERS_OBJ: if hyper.id == int(compute_id): return hyper raise exception.ComputeHostNotFound(host=compute_id)
def fake_compute_node_get(context, compute_id): for hyper in TEST_HYPERS: if hyper['id'] == compute_id: return hyper raise exception.ComputeHostNotFound(host=compute_id)
def test_migrate_live_compute_service_not_found(self): self._test_migrate_live_failed_with_exception( exception.ComputeHostNotFound(host='host'))
class LiveMigrationTaskTestCase(test.NoDBTestCase): def setUp(self): super(LiveMigrationTaskTestCase, self).setUp() self.context = nova_context.get_admin_context() self.instance_host = "host" self.instance_uuid = uuids.instance self.instance_image = "image_ref" db_instance = fake_instance.fake_db_instance( host=self.instance_host, uuid=self.instance_uuid, power_state=power_state.RUNNING, vm_state=vm_states.ACTIVE, memory_mb=512, image_ref=self.instance_image) self.instance = objects.Instance._from_db_object( self.context, objects.Instance(), db_instance) self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.instance.numa_topology = None self.instance.pci_requests = None self.instance.resources = None self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" self.migration = objects.Migration() self.fake_spec = objects.RequestSpec() self._generate_task() _p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv') self.heal_reqspec_is_bfv_mock = _p.start() self.addCleanup(_p.stop) _p = mock.patch('nova.objects.RequestSpec.ensure_network_information') self.ensure_network_information_mock = _p.start() self.addCleanup(_p.stop) _p = mock.patch( 'nova.network.neutron.API.' 'get_requested_resource_for_instance', return_value=([], objects.RequestLevelParams())) self.mock_get_res_req = _p.start() self.addCleanup(_p.stop) def _generate_task(self): self.task = live_migrate.LiveMigrationTask( self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(), servicegroup.API(), query.SchedulerQueryClient(), report.SchedulerReportClient(), self.fake_spec) @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='fake-az') def test_execute_with_destination(self, mock_get_az): dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_check_requested_destination'), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), mock.patch.object(self.task, '_check_destination_is_not_source'), mock.patch.object(self.task, '_check_destination_has_enough_memory'), mock.patch.object(self.task, '_check_compatible_with_source_hypervisor', return_value=(mock.sentinel.source_node, dest_node)), ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig, m_alloc, m_check_diff, m_check_enough_mem, m_check_compatible): mock_mig.return_value = "bob" m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs) self.assertEqual("bob", self.task.execute()) mock_check_up.assert_has_calls( [mock.call(self.instance_host), mock.call(self.destination)]) mock_check_dest.assert_called_once_with() m_check_diff.assert_called_once() m_check_enough_mem.assert_called_once() m_check_compatible.assert_called_once() allocs = mock.sentinel.allocs mock_claim.assert_called_once_with(self.context, self.task.report_client, self.instance, mock.sentinel.source_node, dest_node, source_allocations=allocs, consumer_generation=None) mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) mock_get_az.assert_called_once_with(self.context, self.destination) self.assertEqual('fake-az', self.instance.availability_zone) # make sure the source/dest fields were set on the migration object self.assertEqual(self.instance.node, self.migration.source_node) self.assertEqual(dest_node.hypervisor_hostname, self.migration.dest_node) self.assertEqual(self.task.destination, self.migration.dest_compute) m_alloc.assert_called_once_with(self.context, self.instance, self.migration) # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # heal the request spec. self.heal_reqspec_is_bfv_mock.assert_not_called() # When the task is executed with a destination it means the host is # being forced and we don't call the scheduler, so we don't need to # modify the request spec self.ensure_network_information_mock.assert_not_called() @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='nova') def test_execute_without_destination(self, mock_get_az): self.destination = None self._generate_task() self.assertIsNone(self.task.destination) with test.nested( mock.patch.object(self.task, '_check_host_is_up'), mock.patch.object(self.task, '_find_destination'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch.object(self.migration, 'save'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), ) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc): mock_find.return_value = ("found_host", "found_node", None) mock_mig.return_value = "bob" mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock()) self.assertEqual("bob", self.task.execute()) mock_check.assert_called_once_with(self.instance_host) mock_find.assert_called_once_with() mock_mig.assert_called_once_with( self.context, host=self.instance_host, instance=self.instance, dest="found_host", block_migration=self.block_migration, migration=self.migration, migrate_data=None) self.assertTrue(mock_save.called) mock_get_az.assert_called_once_with(self.context, 'found_host') self.assertEqual('found_host', self.migration.dest_compute) self.assertEqual('found_node', self.migration.dest_node) self.assertEqual(self.instance.node, self.migration.source_node) self.assertTrue(mock_alloc.called) def test_check_instance_is_active_passes_when_paused(self): self.task.instance['power_state'] = power_state.PAUSED self.task._check_instance_is_active() def test_check_instance_is_active_fails_when_shutdown(self): self.task.instance['power_state'] = power_state.SHUTDOWN self.assertRaises(exception.InstanceInvalidState, self.task._check_instance_is_active) @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_no_numa(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') self.task.instance.numa_topology = None mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='qemu') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') def test_check_instance_has_no_numa_passes_non_kvm(self, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='xen') self.task._check_instance_has_no_numa() @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') @mock.patch.object(objects.Service, 'get_minimum_version', return_value=39) def test_check_instance_has_no_numa_passes_workaround( self, mock_get_min_ver, mock_get): self.flags(enable_numa_live_migration=True, group='workarounds') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='qemu') self.task._check_instance_has_no_numa() mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute') @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') @mock.patch.object(objects.Service, 'get_minimum_version', return_value=39) def test_check_instance_has_no_numa_fails(self, mock_get_min_ver, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='qemu') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) self.assertRaises(exception.MigrationPreCheckError, self.task._check_instance_has_no_numa) mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute') @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename') @mock.patch.object(objects.Service, 'get_minimum_version', return_value=40) def test_check_instance_has_no_numa_new_svc_passes(self, mock_get_min_ver, mock_get): self.flags(enable_numa_live_migration=False, group='workarounds') mock_get.return_value = objects.ComputeNode(uuid=uuids.cn1, hypervisor_type='qemu') self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0]), pcpuset=set(), memory=1024), ]) self.task._check_instance_has_no_numa() mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute') @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = True self.task._check_host_is_up("host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(servicegroup.API, 'service_is_up') def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up, mock_get): mock_get.return_value = "service" mock_is_up.return_value = False self.assertRaises(exception.ComputeServiceUnavailable, self.task._check_host_is_up, "host") mock_get.assert_called_once_with(self.context, "host") mock_is_up.assert_called_once_with("service") @mock.patch.object(objects.Service, 'get_by_compute_host', side_effect=exception.ComputeHostNotFound(host='host')) def test_check_instance_host_is_up_fails_if_not_found(self, mock): self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") def test_check_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, self.task._check_destination_is_not_source) @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') def test_check_destination_fails_with_not_enough_memory( self, mock_get_first): mock_get_first.return_value = (objects.ComputeNode( free_ram_mb=513, memory_mb=1024, ram_allocation_ratio=0.9, )) # free_ram is bigger than instance.ram (512) but the allocation # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, self.task._check_destination_has_enough_memory) mock_get_first.assert_called_once_with(self.context, self.destination) @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_compatible_fails_with_hypervisor_diff(self, mock_get_info): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a') ] self.assertRaises(exception.InvalidHypervisorType, self.task._check_compatible_with_source_hypervisor, self.destination) self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') def test_check_compatible_fails_with_hypervisor_too_old( self, mock_get_info): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [ objects.ComputeNode(**host1), objects.ComputeNode(**host2) ] self.assertRaises(exception.DestinationHypervisorTooOld, self.task._check_compatible_with_source_hypervisor, self.destination) self.assertEqual( [mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') def test_check_requested_destination(self, mock_check): mock_check.return_value = "migrate_data" self.task.limits = fake_limits1 with test.nested( mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False), mock.patch.object(self.task, '_check_can_migrate_pci')): self.assertIsNone(self.task._check_requested_destination()) self.assertEqual("migrate_data", self.task.migrate_data) mock_check.assert_called_once_with(self.context, self.instance, self.destination, self.block_migration, self.disk_over_commit, self.task.migration, fake_limits1) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @mock.patch.object(compute_rpcapi.ComputeAPI, 'check_can_live_migrate_destination') @mock.patch.object( objects.HostMapping, 'get_by_host', return_value=objects.HostMapping(cell_mapping=objects.CellMapping( uuid=uuids.different))) def test_check_requested_destination_fails_different_cells( self, mock_get_host_mapping, mock_check, mock_is_up, mock_get_info, mock_get_host): mock_get_host.return_value = "service" mock_is_up.return_value = True hypervisor_details = objects.ComputeNode(hypervisor_type="a", hypervisor_version=6.1, free_ram_mb=513, memory_mb=512, ram_allocation_ratio=1.0) mock_get_info.return_value = hypervisor_details mock_check.return_value = "migrate_data" with test.nested( mock.patch.object(self.task.network_api, 'supports_port_binding_extension', return_value=False), mock.patch.object(self.task, '_check_can_migrate_pci')): ex = self.assertRaises(exception.MigrationPreCheckError, self.task._check_requested_destination) self.assertIn('across cells', str(ex)) @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_works(self, mock_setup, mock_reset, mock_select, mock_check, mock_call): self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_information_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {}) @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_no_image_works(self, mock_setup, mock_select, mock_check, mock_call): self.instance['image_ref'] = '' self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {}) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def _test_find_destination_retry_hypervisor_raises(self, error, mock_setup, mock_select, mock_check, mock_call, mock_remove): mock_check.side_effect = [error, None] self.assertEqual(("host2", "node2", fake_limits2), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_called_once_with('host2', {}) def test_find_destination_retry_with_old_hypervisor(self): self._test_find_destination_retry_hypervisor_raises( exception.DestinationHypervisorTooOld) def test_find_destination_retry_with_invalid_hypervisor_type(self): self._test_find_destination_retry_hypervisor_raises( exception.InvalidHypervisorType) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_invalid_livem_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [exception.Invalid(), None] self.assertEqual(("host2", "node2", fake_limits2), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls( [mock.call('host1', {}), mock.call('host2', {})]) @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=[[[fake_selection1]], [[fake_selection2]]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_with_failed_migration_pre_checks( self, mock_setup, mock_select, mock_check, mock_call, mock_remove): self.flags(migrate_max_retries=1) mock_call.side_effect = [ exception.MigrationPreCheckError('reason'), None ] self.assertEqual(("host2", "node2", fake_limits2), self.task._find_destination()) # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_has_calls([ mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False), mock.call(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) ]) mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')]) mock_call.assert_has_calls( [mock.call('host1', {}), mock.call('host2', {})]) @mock.patch.object(objects.Migration, 'save') @mock.patch.object(live_migrate.LiveMigrationTask, '_remove_host_allocations') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor', side_effect=exception.DestinationHypervisorTooOld()) @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_retry_exceeds_max(self, mock_setup, mock_select, mock_check, mock_remove, mock_save): self.flags(migrate_max_retries=0) self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) mock_save.assert_called_once_with() # Should have removed allocations for the first host. mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', side_effect=exception.NoValidHost(reason="")) @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_when_runs_out_of_hosts(self, mock_setup, mock_select): self.assertRaises(exception.NoValidHost, self.task._find_destination) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) @mock.patch("nova.utils.get_image_from_system_metadata") @mock.patch("nova.scheduler.utils.build_request_spec") @mock.patch("nova.scheduler.utils.setup_instance_group") @mock.patch("nova.objects.RequestSpec.from_primitives") def test_find_destination_with_remoteError( self, m_from_primitives, m_setup_instance_group, m_build_request_spec, m_get_image_from_system_metadata): m_get_image_from_system_metadata.return_value = {'properties': {}} m_build_request_spec.return_value = {} fake_spec = objects.RequestSpec() m_from_primitives.return_value = fake_spec with mock.patch.object(self.task.query_client, 'select_destinations') as m_select_destinations: error = messaging.RemoteError() m_select_destinations.side_effect = error self.assertRaises(exception.MigrationSchedulerRPCError, self.task._find_destination) def test_call_livem_checks_on_host(self): with test.nested( mock.patch.object(self.task.compute_rpcapi, 'check_can_live_migrate_destination', side_effect=messaging.MessagingTimeout), mock.patch.object(self.task, '_check_can_migrate_pci')): self.assertRaises(exception.MigrationPreCheckError, self.task._call_livem_checks_on_host, {}, {}) @mock.patch('nova.network.neutron.API.get_binding_profile_allocation') @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_merges_profiles( self, mock_bind_ports, mock_get_binding_profile_alloc): """Assert that if both the migration_data and the provider mapping contains binding profile related information then such information is merged in the resulting profile. """ self.task.migrate_data = objects.LibvirtLiveMigrateData(vifs=[ objects.VIFMigrateData(port_id=uuids.port1, profile_json=jsonutils.dumps( {'some-key': 'value'})) ]) provider_mappings = {uuids.port1: [uuids.dest_bw_rp]} mock_get_binding_profile_alloc.return_value = uuids.dest_bw_rp self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with(context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={ uuids.port1: { 'allocation': uuids.dest_bw_rp, 'some-key': 'value' } }) mock_get_binding_profile_alloc.assert_called_once_with( self.context, uuids.port1, provider_mappings) @mock.patch('nova.network.neutron.API.get_binding_profile_allocation') @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_merges_profiles_extended_res_req( self, mock_bind_ports, mock_get_binding_profile_alloc): """Assert that if both the migration_data and the provider mapping contains binding profile related information and the port has extended resource request then such information is merged in the resulting profile. """ self.task.migrate_data = objects.LibvirtLiveMigrateData(vifs=[ objects.VIFMigrateData(port_id=uuids.port1, profile_json=jsonutils.dumps( {'some-key': 'value'})) ]) provider_mappings = { uuids.bw_group: [uuids.dest_bw_rp], uuids.pps_group: [uuids.dest_pps_rp], uuids.accel_group: [uuids.cyborg_rp], } mock_get_binding_profile_alloc.return_value = { uuids.bw_group: uuids.dest_bw_rp, uuids.pps_group: uuids.dest_pps_rp, } self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with(context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={ uuids.port1: { 'allocation': { uuids.bw_group: uuids.dest_bw_rp, uuids.pps_group: uuids.dest_pps_rp, }, 'some-key': 'value' } }) mock_get_binding_profile_alloc.assert_called_once_with( self.context, uuids.port1, provider_mappings) @mock.patch('nova.network.neutron.API.get_binding_profile_allocation', new=mock.Mock(return_value=None)) @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_migration_data(self, mock_bind_ports): """Assert that if only the migration_data contains binding profile related information then that is sent to neutron. """ self.task.migrate_data = objects.LibvirtLiveMigrateData(vifs=[ objects.VIFMigrateData(port_id=uuids.port1, profile_json=jsonutils.dumps( {'some-key': 'value'})) ]) provider_mappings = {} self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with( context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={uuids.port1: { 'some-key': 'value' }}) @mock.patch('nova.network.neutron.API.get_binding_profile_allocation') @mock.patch('nova.network.neutron.API.bind_ports_to_host') def test_bind_ports_on_destination_provider_mapping( self, mock_bind_ports, mock_get_binding_profile_alloc): """Assert that if only the provider mapping contains binding profile related information then that is sent to neutron. """ self.task.migrate_data = objects.LibvirtLiveMigrateData( vifs=[objects.VIFMigrateData(port_id=uuids.port1)]) provider_mappings = {uuids.port1: [uuids.dest_bw_rp]} mock_get_binding_profile_alloc.return_value = uuids.dest_bw_rp self.task._bind_ports_on_destination('dest-host', provider_mappings) mock_bind_ports.assert_called_once_with( context=self.context, instance=self.instance, host='dest-host', vnic_types=None, port_profiles={uuids.port1: { 'allocation': uuids.dest_bw_rp }}) mock_get_binding_profile_alloc.assert_called_once_with( self.context, uuids.port1, provider_mappings) @mock.patch('nova.compute.utils.' 'update_pci_request_spec_with_allocated_interface_name') @mock.patch('nova.scheduler.utils.fill_provider_mapping') @mock.patch.object(live_migrate.LiveMigrationTask, '_call_livem_checks_on_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_check_compatible_with_source_hypervisor') @mock.patch.object(query.SchedulerQueryClient, 'select_destinations', return_value=[[fake_selection1]]) @mock.patch.object(objects.RequestSpec, 'reset_forced_destinations') @mock.patch.object(scheduler_utils, 'setup_instance_group') def test_find_destination_with_resource_request(self, mock_setup, mock_reset, mock_select, mock_check, mock_call, mock_fill_provider_mapping, mock_update_pci_req): resource_req = [objects.RequestGroup(requester_id=uuids.port_id)] self.mock_get_res_req.return_value = (resource_req, objects.RequestLevelParams()) self.instance.pci_requests = objects.InstancePCIRequests(requests=[]) self.assertEqual(("host1", "node1", fake_limits1), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id) # Make sure that requested_resources are added to the request spec self.assertEqual(resource_req, self.task.request_spec.requested_resources) mock_setup.assert_called_once_with(self.context, self.fake_spec) mock_reset.assert_called_once_with() self.ensure_network_information_mock.assert_called_once_with( self.instance) self.heal_reqspec_is_bfv_mock.assert_called_once_with( self.context, self.fake_spec, self.instance) mock_select.assert_called_once_with(self.context, self.fake_spec, [self.instance.uuid], return_objects=True, return_alternates=False) mock_check.assert_called_once_with('host1') mock_call.assert_called_once_with('host1', {uuids.port_id: []}) mock_fill_provider_mapping.assert_called_once_with( self.task.request_spec, fake_selection1) mock_update_pci_req.assert_called_once_with(self.context, self.task.report_client, [], {uuids.port_id: []}) @mock.patch.object( objects.InstanceMapping, 'get_by_instance_uuid', side_effect=exception.InstanceMappingNotFound(uuid=uuids.instance)) def test_get_source_cell_mapping_not_found(self, mock_get): """Negative test where InstanceMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_source_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.instance.uuid) @mock.patch.object( objects.HostMapping, 'get_by_host', side_effect=exception.HostMappingNotFound(name='destination')) def test_get_destination_cell_mapping_not_found(self, mock_get): """Negative test where HostMappingNotFound is raised and converted to MigrationPreCheckError. """ self.assertRaises(exception.MigrationPreCheckError, self.task._get_destination_cell_mapping) mock_get.assert_called_once_with(self.task.context, self.task.destination) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'remove_provider_tree_from_instance_allocation') def test_remove_host_allocations(self, remove_provider): self.task._remove_host_allocations(uuids.cn) remove_provider.assert_called_once_with(self.task.context, self.task.instance.uuid, uuids.cn) def test_check_can_migrate_pci(self): """Tests that _check_can_migrate_pci() allows live-migration if instance does not contain non-network related PCI requests and raises MigrationPreCheckError otherwise """ @mock.patch.object(self.task.network_api, 'supports_port_binding_extension') @mock.patch.object(live_migrate, 'supports_vif_related_pci_allocations') def _test(instance_pci_reqs, supp_binding_ext_retval, supp_vif_related_pci_alloc_retval, mock_supp_vif_related_pci_alloc, mock_supp_port_binding_ext): mock_supp_vif_related_pci_alloc.return_value = \ supp_vif_related_pci_alloc_retval mock_supp_port_binding_ext.return_value = \ supp_binding_ext_retval self.task.instance.pci_requests = instance_pci_reqs self.task._check_can_migrate_pci("Src", "Dst") # in case we managed to get away without rasing, check mocks if instance_pci_reqs: mock_supp_port_binding_ext.assert_called_once_with( self.context) self.assertTrue(mock_supp_vif_related_pci_alloc.called) # instance has no PCI requests _test(None, False, False) # No support in Neutron and Computes _test(None, True, False) # No support in Computes _test(None, False, True) # No support in Neutron _test(None, True, True) # Support in both Neutron and Computes # instance contains network related PCI requests (alias_name=None) pci_requests = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(alias_name=None)]) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, True) _test(pci_requests, True, True) # instance contains Non network related PCI requests (alias_name!=None) pci_requests.requests.append( objects.InstancePCIRequest(alias_name="non-network-related-pci")) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, False) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, False, True) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, True) def test_check_can_migrate_specific_resources(self): """Test _check_can_migrate_specific_resources allows live migration with vpmem. """ @mock.patch.object(live_migrate, 'supports_vpmem_live_migration') def _test(resources, supp_lm_vpmem_retval, mock_support_lm_vpmem): self.instance.resources = resources mock_support_lm_vpmem.return_value = supp_lm_vpmem_retval self.task._check_can_migrate_specific_resources() vpmem_0 = objects.LibvirtVPMEMDevice(label='4GB', name='ns_0', devpath='/dev/dax0.0', size=4292870144, align=2097152) resource_0 = objects.Resource( provider_uuid=uuids.rp, resource_class="CUSTOM_PMEM_NAMESPACE_4GB", identifier='ns_0', metadata=vpmem_0) resources = objects.ResourceList(objects=[resource_0]) _test(None, False) _test(None, True) _test(resources, True) self.assertRaises(exception.MigrationPreCheckError, _test, resources, False)