def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination)
def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') scheduler_utils.build_request_spec( self.context, {'properties': {'hw_disk_bus': 'scsi'}}, mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def _find_destination(self): # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = None if self.instance.image_ref: image = compute_utils.get_image_metadata(self.context, self.image_api, self.instance.image_ref, self.instance) request_spec = scheduler_utils.build_request_spec(self.context, image, [self.instance]) host = None while host is None: self._check_not_over_max_retries(attempted_hosts) filter_properties = {'ignore_hosts': attempted_hosts} scheduler_utils.setup_instance_group(self.context, request_spec, filter_properties) host = self.scheduler_client.select_destinations(self.context, request_spec, filter_properties)[0]['host'] try: self._check_compatible_with_source_hypervisor(host) self._call_livem_checks_on_host(host) except exception.Invalid as e: LOG.debug("Skipping host: %(host)s because: %(e)s", {"host": host, "e": e}) attempted_hosts.append(host) host = None return host
def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations(self.context, self.fake_spec, [self.instance.uuid]).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations(self.context, self.fake_spec, [self.instance.uuid]).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with mock.patch.object(self.task.migration, 'save') as save_mock: self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with()
def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def _execute(self): image = self.request_spec.get('image') self.quotas = objects.Quotas.from_reservations(self.context, self.reservations, instance=self.instance) scheduler_utils.setup_instance_group(self.context, self.request_spec, self.filter_properties) scheduler_utils.populate_retry(self.filter_properties, self.instance.uuid) # TODO(sbauza): Hydrate here the object until we modify the # scheduler.utils methods to directly use the RequestSpec object spec_obj = objects.RequestSpec.from_primitives( self.context, self.request_spec, self.filter_properties) hosts = self.scheduler_client.select_destinations( self.context, spec_obj) host_state = hosts[0] scheduler_utils.populate_filter_properties(self.filter_properties, host_state) # context is not serializable self.filter_properties.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.prep_resize( self.context, image, self.instance, self.flavor, host, self.reservations, request_spec=self.request_spec, filter_properties=self.filter_properties, node=node, clean_shutdown=self.clean_shutdown)
def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, "get_image_from_system_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor") self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host") utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host1"}] ) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1").AndRaise(exception.MigrationPreCheckError("reason")) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host2"}] ) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) fake_spec = objects.RequestSpec() objects.RequestSpec.from_primitives( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec) self.task.scheduler_client.select_destinations( self.context, fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def _execute(self): self.quotas = objects.Quotas.from_reservations(self.context, self.reservations, instance=self.instance) # TODO(sbauza): Remove that once prep_resize() accepts a RequestSpec # object in the signature and all the scheduler.utils methods too legacy_spec = self.request_spec.to_legacy_request_spec_dict() legacy_props = self.request_spec.to_legacy_filter_properties_dict() scheduler_utils.setup_instance_group(self.context, legacy_spec, legacy_props) scheduler_utils.populate_retry(legacy_props, self.instance.uuid) # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host self.request_spec.reset_forced_destinations() # NOTE(danms): Right now we only support migrate to the same # cell as the current instance, so request that the scheduler # limit thusly. instance_mapping = objects.InstanceMapping.get_by_instance_uuid( self.context, self.instance.uuid) LOG.debug('Requesting cell %(cell)s while migrating', {'cell': instance_mapping.cell_mapping.identity}, instance=self.instance) if ('requested_destination' in self.request_spec and self.request_spec.requested_destination): self.request_spec.requested_destination.cell = ( instance_mapping.cell_mapping) else: self.request_spec.requested_destination = objects.Destination( cell=instance_mapping.cell_mapping) hosts = self.scheduler_client.select_destinations( self.context, self.request_spec) host_state = hosts[0] scheduler_utils.populate_filter_properties(legacy_props, host_state) # context is not serializable legacy_props.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) self.instance.availability_zone = ( availability_zones.get_host_availability_zone( self.context, host)) # FIXME(sbauza): Serialize/Unserialize the legacy dict because of # oslo.messaging #1529084 to transform datetime values into strings. # tl;dr: datetimes in dicts are not accepted as correct values by the # rpc fake driver. legacy_spec = jsonutils.loads(jsonutils.dumps(legacy_spec)) self.compute_rpcapi.prep_resize( self.context, self.instance, legacy_spec['image'], self.flavor, host, self.reservations, request_spec=legacy_spec, filter_properties=legacy_props, node=node, clean_shutdown=self.clean_shutdown)
def _schedule_instances(self, context, request_spec, filter_properties): scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # TODO(sbauza): Hydrate here the object until we modify the # scheduler.utils methods to directly use the RequestSpec object spec_obj = objects.RequestSpec.from_primitives(context, request_spec, filter_properties) hosts = self.scheduler_client.select_destinations(context, spec_obj) return hosts
def _execute(self): image = self.request_spec.get("image") self.quotas = objects.Quotas.from_reservations(self.context, self.reservations, instance=self.instance) scheduler_utils.setup_instance_group(self.context, self.request_spec, self.filter_properties) scheduler_utils.populate_retry(self.filter_properties, self.instance.uuid) hosts = self.scheduler_client.select_destinations(self.context, self.request_spec, self.filter_properties) host_state = hosts[0] scheduler_utils.populate_filter_properties(self.filter_properties, host_state) # context is not serializable self.filter_properties.pop("context", None) (host, node) = (host_state["host"], host_state["nodename"]) self.compute_rpcapi.prep_resize( self.context, image, self.instance, self.flavor, host, self.reservations, request_spec=self.request_spec, filter_properties=self.filter_properties, node=node, clean_shutdown=self.clean_shutdown, )
def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') compute_utils.get_image_metadata(self.context, self.task.image_api, self.instance_image, self.instance).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.fake_spec.reset_forced_destinations() self.task.scheduler_client.select_destinations( self.context, self.fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations): image_ref = instance.image_ref image = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec( context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) try: scheduler_utils.populate_retry(filter_properties, instance['uuid']) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance['vm_state'] if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, 'migrate_server', updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor['id'] == instance['instance_type_id']: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) # TODO(timello): originally, instance_type in request_spec # on compute.api.resize does not have 'extra_specs', so we # remove it for now to keep tests backward compatibility. request_spec['instance_type'].pop('extra_specs', None) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {'vm_state': instance['vm_state'], 'task_state': None} self._set_vm_state_and_notify(context, 'migrate_server', updates, ex, request_spec) quotas.rollback()
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown): image_ref = instance.image_ref image = compute_utils.get_image_metadata(context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) scheduler_utils.populate_retry(filter_properties, instance["uuid"]) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor["id"] == instance["instance_type_id"]: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback() try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop("context", None) (host, node) = (host_state["host"], host_state["nodename"]) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node, clean_shutdown=clean_shutdown, ) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {"vm_state": instance.vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback()
def _schedule_instances(self, context, image, filter_properties, *instances): request_spec = scheduler_utils.build_request_spec(context, image, instances) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) return hosts
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec(context, image, instances) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) except Exception as exc: for instance in instances: scheduler_driver.handle_schedule_error(context, exc, instance.uuid, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def _create_minimal_request_spec(ctxt, instance): request_spec = objects.RequestSpec.from_components( ctxt, instance.uuid, instance.image_meta, instance.flavor, instance.numa_topology, instance.pci_requests, {}, None, instance.availability_zone, project_id=instance.project_id, user_id=instance.user_id ) scheduler_utils.setup_instance_group(ctxt, request_spec) request_spec.create()
def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None): with compute_utils.EventReporter(context, 'rebuild_server', instance.uuid): if not host: # NOTE(lcostantino): Retrieve scheduler filters for the # instance when the feature is available filter_properties = {'ignore_hosts': [instance.host]} request_spec = scheduler_utils.build_request_spec(context, image_ref, [instance]) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) host = hosts.pop(0)['host'] except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify(context, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) LOG.warning(_LW("No valid host found for rebuild"), instance=instance) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify(context, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) LOG.warning(_LW("Server with unsupported policy " "cannot be rebuilt"), instance=instance) compute_utils.notify_about_instance_usage( self.notifier, context, instance, "rebuild.scheduled") self.compute_rpcapi.rebuild_instance(context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, host=host)
def test_setup_instance_group_with_no_group(self, mock_ggd): mock_ggd.return_value = None spec = objects.RequestSpec(instance_uuid=uuids.instance) spec.instance_group = objects.InstanceGroup(hosts=['hostC']) scheduler_utils.setup_instance_group(self.context, spec) mock_ggd.assert_called_once_with(self.context, uuids.instance, ['hostC']) # Make sure the field isn't touched by the caller. self.assertFalse(spec.instance_group.obj_attr_is_set('policies')) self.assertEqual(['hostC'], spec.instance_group.hosts)
def _create_minimal_request_spec(context, instance): image = instance.image_meta # This is an old instance. Let's try to populate a RequestSpec # object using the existing information we have previously saved. request_spec = objects.RequestSpec.from_components( context, instance.uuid, image, instance.flavor, instance.numa_topology, instance.pci_requests, {}, None, instance.availability_zone ) scheduler_utils.setup_instance_group(context, request_spec) request_spec.create()
def test_setup_instance_group_with_no_group(self, mock_ggd): mock_ggd.return_value = None spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, 'fake-uuid', ['hostC']) self.assertNotIn('group_updated', filter_props) self.assertNotIn('group_policies', filter_props) self.assertEqual(['hostC'], filter_props['group_hosts'])
def rebuild_instance( self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None, ): with compute_utils.EventReporter(context, "rebuild_server", instance.uuid): if not host: # NOTE(lcostantino): Retrieve scheduler filters for the # instance when the feature is available filter_properties = {"ignore_hosts": [instance.host]} request_spec = scheduler_utils.build_request_spec(context, image_ref, [instance]) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) try: hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) host = hosts.pop(0)["host"] except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, "rebuild_server", {"vm_state": instance.vm_state, "task_state": None}, ex, request_spec, ) LOG.warning(_LW("No valid host found for rebuild"), instance=instance) self.compute_rpcapi.rebuild_instance( context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, host=host, )
def _find_destination(self): # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = utils.get_image_from_system_metadata( self.instance.system_metadata) filter_properties = {'ignore_hosts': attempted_hosts} # TODO(sbauza): Remove that once setup_instance_group() accepts a # RequestSpec object request_spec = {'instance_properties': {'uuid': self.instance.uuid}} scheduler_utils.setup_instance_group(self.context, request_spec, filter_properties) if not self.request_spec: # NOTE(sbauza): We were unable to find an original RequestSpec # object - probably because the instance is old. # We need to mock that the old way request_spec = objects.RequestSpec.from_components( self.context, self.instance.uuid, image, self.instance.flavor, self.instance.numa_topology, self.instance.pci_requests, filter_properties, None, self.instance.availability_zone ) else: request_spec = self.request_spec # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() host = None while host is None: self._check_not_over_max_retries(attempted_hosts) request_spec.ignore_hosts = attempted_hosts try: host = self.scheduler_client.select_destinations(self.context, request_spec, [self.instance.uuid])[0]['host'] except messaging.RemoteError as ex: # TODO(ShaoHe Feng) There maybe multi-scheduler, and the # scheduling algorithm is R-R, we can let other scheduler try. # Note(ShaoHe Feng) There are types of RemoteError, such as # NoSuchMethod, UnsupportedVersion, we can distinguish it by # ex.exc_type. raise exception.MigrationSchedulerRPCError( reason=six.text_type(ex)) try: self._check_compatible_with_source_hypervisor(host) self._call_livem_checks_on_host(host) except (exception.Invalid, exception.MigrationPreCheckError) as e: LOG.debug("Skipping host: %(host)s because: %(e)s", {"host": host, "e": e}) attempted_hosts.append(host) host = None return host
def _find_destination(self): # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = utils.get_image_from_system_metadata( self.instance.system_metadata) filter_properties = {'ignore_hosts': attempted_hosts} # TODO(sbauza): Remove that once setup_instance_group() accepts a # RequestSpec object request_spec = {'instance_properties': {'uuid': self.instance.uuid}} scheduler_utils.setup_instance_group(self.context, request_spec, filter_properties) if not self.request_spec: # NOTE(sbauza): We were unable to find an original RequestSpec # object - probably because the instance is old. # We need to mock that the old way request_spec = objects.RequestSpec.from_components( self.context, self.instance.uuid, image, self.instance.flavor, self.instance.numa_topology, self.instance.pci_requests, filter_properties, None, self.instance.availability_zone ) else: request_spec = self.request_spec # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() host = None while host is None: self._check_not_over_max_retries(attempted_hosts) request_spec.ignore_hosts = attempted_hosts try: host = self.scheduler_client.select_destinations(self.context, request_spec)[0]['host'] except messaging.RemoteError as ex: # TODO(ShaoHe Feng) There maybe multi-scheduler, and the # scheduling algorithm is R-R, we can let other scheduler try. # Note(ShaoHe Feng) There are types of RemoteError, such as # NoSuchMethod, UnsupportedVersion, we can distinguish it by # ex.exc_type. raise exception.MigrationSchedulerRPCError( reason=six.text_type(ex)) try: self._check_compatible_with_source_hypervisor(host) self._call_livem_checks_on_host(host) except (exception.Invalid, exception.MigrationPreCheckError) as e: LOG.debug("Skipping host: %(host)s because: %(e)s", {"host": host, "e": e}) attempted_hosts.append(host) host = None return host
def _create_minimal_request_spec(context, instance): image = instance.image_meta # This is an old instance. Let's try to populate a RequestSpec # object using the existing information we have previously saved. request_spec = objects.RequestSpec.from_components( context, instance.uuid, image, instance.flavor, instance.numa_topology, instance.pci_requests, {}, None, instance.availability_zone, project_id=instance.project_id ) scheduler_utils.setup_instance_group(context, request_spec) request_spec.create()
def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, "get_image_from_system_metadata") self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image") fake_props = {"instance_properties": {"uuid": self.instance_uuid}} scheduler_utils.setup_instance_group(self.context, fake_props, {"ignore_hosts": [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndRaise( exception.NoValidHost(reason="") ) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_setup_instance_group_in_filter_properties(self, mock_ggd): mock_ggd.return_value = (set(['hostA', 'hostB']), ['policy']) spec = {'instance_uuids': ['fake-uuid']} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, ['fake-uuid'], ['hostC']) expected_filter_props = {'group_updated': True, 'group_hosts': set(['hostA', 'hostB']), 'group_policies': ['policy']} self.assertEqual(expected_filter_props, filter_props)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None): with compute_utils.EventReporter(context, 'rebuild_server', instance.uuid): if not host: # NOTE(lcostantino): Retrieve scheduler filters for the # instance when the feature is available filter_properties = {'ignore_hosts': [instance.host]} request_spec = scheduler_utils.build_request_spec( context, image_ref, [instance]) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) try: hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host = hosts.pop(0)['host'] except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, 'rebuild_server', { 'vm_state': instance.vm_state, 'task_state': None }, ex, request_spec) LOG.warning(_LW("No valid host found for rebuild"), instance=instance) self.compute_rpcapi.rebuild_instance( context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, host=host)
def _get_request_spec_for_select_destinations(self, attempted_hosts=None): """Builds a RequestSpec that can be passed to select_destinations Used when calling the scheduler to pick a destination host for live migrating the instance. :param attempted_hosts: List of host names to ignore in the scheduler. This is generally at least seeded with the source host. :returns: nova.objects.RequestSpec object """ if not self.request_spec: # NOTE(sbauza): We were unable to find an original RequestSpec # object - probably because the instance is old. # We need to mock that the old way image = utils.get_image_from_system_metadata( self.instance.system_metadata) filter_properties = {'ignore_hosts': attempted_hosts} request_spec = objects.RequestSpec.from_components( self.context, self.instance.uuid, image, self.instance.flavor, self.instance.numa_topology, self.instance.pci_requests, filter_properties, None, self.instance.availability_zone ) else: request_spec = self.request_spec # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() scheduler_utils.setup_instance_group(self.context, request_spec) # We currently only support live migrating to hosts in the same # cell that the instance lives in, so we need to tell the scheduler # to limit the applicable hosts based on cell. cell_mapping = self._get_source_cell_mapping() LOG.debug('Requesting cell %(cell)s while live migrating', {'cell': cell_mapping.identity}, instance=self.instance) if ('requested_destination' in request_spec and request_spec.requested_destination): request_spec.requested_destination.cell = cell_mapping else: request_spec.requested_destination = objects.Destination( cell=cell_mapping) request_spec.ensure_project_and_user_id(self.instance) request_spec.ensure_network_metadata(self.instance) compute_utils.heal_reqspec_is_bfv( self.context, request_spec, self.instance) return request_spec
def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None): with compute_utils.EventReporter(context, 'rebuild_server', instance.uuid): if not host: # NOTE(): Retrieve scheduler filters for the # instance when the feature is available filter_properties = {'ignore_hosts': [instance.host]} extra = objects.HuaweiInstanceExtra.get_by_instance_uuid( context, instance['uuid']) if not extra.scheduler_hints: sch_hints = {} else: sch_hints = jsonutils.loads(extra.scheduler_hints) filter_properties['scheduler_hints'] = sch_hints request_spec = scheduler_utils.build_request_spec(context, image_ref, [instance]) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) try: hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) host = hosts.pop(0)['host'] except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify(context, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) LOG.warning(_("No valid host found for rebuild"), instance=instance) self.compute_rpcapi.rebuild_instance(context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, host=host)
def _create_minimal_request_spec(ctxt, instance): request_spec = objects.RequestSpec.from_components( ctxt, instance.uuid, instance.image_meta, instance.flavor, instance.numa_topology, instance.pci_requests, {}, None, instance.availability_zone, project_id=instance.project_id, user_id=instance.user_id) scheduler_utils.setup_instance_group(ctxt, request_spec) request_spec.create()
def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, "get_image_from_system_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndRaise( exception.NoValidHost(reason="") ) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.instance.numa_topology = None self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') self.mox.StubOutWithMock(self.instance, 'is_volume_backed') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.instance.is_volume_backed().AndReturn(False) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1', 'limits': 'fake-limits' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host( "host1", limits='fake-limits').AndRaise( exception.MigrationPreCheckError("reason")) self.instance.is_volume_backed().AndReturn(False) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2', 'nodename': 'node2', 'limits': 'fake-limits' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2", limits='fake-limits') self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", 'fake-limits'), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1', self.fake_spec)
def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndRaise(exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def _execute(self): image = self.request_spec.image self.quotas = objects.Quotas.from_reservations(self.context, self.reservations, instance=self.instance) # TODO(sbauza): Remove that once prep_resize() accepts a RequestSpec # object in the signature and all the scheduler.utils methods too legacy_spec = self.request_spec.to_legacy_request_spec_dict() legacy_props = self.request_spec.to_legacy_filter_properties_dict() scheduler_utils.setup_instance_group(self.context, legacy_spec, legacy_props) scheduler_utils.populate_retry(legacy_props, self.instance.uuid) # TODO(sbauza): Remove that RequestSpec rehydratation once # scheduler.utils methods use directly the NovaObject. self.request_spec = objects.RequestSpec.from_components( self.context, self.instance.uuid, image, self.flavor, self.instance.numa_topology, self.instance.pci_requests, legacy_props, None, self.instance.availability_zone) # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host self.request_spec.reset_forced_destinations() hosts = self.scheduler_client.select_destinations( self.context, self.request_spec) host_state = hosts[0] scheduler_utils.populate_filter_properties(legacy_props, host_state) # context is not serializable legacy_props.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) # FIXME(sbauza): Serialize/Unserialize the legacy dict because of # oslo.messaging #1529084 to transform datetime values into strings. # tl;dr: datetimes in dicts are not accepted as correct values by the # rpc fake driver. legacy_spec = jsonutils.loads(jsonutils.dumps(legacy_spec)) self.compute_rpcapi.prep_live_resize( self.context, self.instance, legacy_spec['image'], self.flavor, host, self.reservations, request_spec=legacy_spec, filter_properties=legacy_props, node=node, clean_shutdown=self.clean_shutdown)
def test_setup_instance_group_in_request_spec(self, mock_ggd): mock_ggd.return_value = scheduler_utils.GroupDetails( hosts=set(['hostA', 'hostB']), policies=['policy'], members=['instance1']) spec = objects.RequestSpec(instance_uuid=uuids.instance) spec.instance_group = objects.InstanceGroup(hosts=['hostC']) scheduler_utils.setup_instance_group(self.context, spec) mock_ggd.assert_called_once_with(self.context, uuids.instance, ['hostC']) # Given it returns a list from a set, make sure it's sorted. self.assertEqual(['hostA', 'hostB'], sorted(spec.instance_group.hosts)) self.assertEqual(['policy'], spec.instance_group.policies) self.assertEqual(['instance1'], spec.instance_group.members)
def _create_minimal_request_spec(context, instance): image = instance.image_meta # TODO(sbauza): Modify that once setup_instance_group() accepts a # RequestSpec object request_spec = {'instance_properties': {'uuid': instance.uuid}} filter_properties = {} scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # This is an old instance. Let's try to populate a RequestSpec # object using the existing information we have previously saved. request_spec = objects.RequestSpec.from_components( context, instance.uuid, image, instance.flavor, instance.numa_topology, instance.pci_requests, filter_properties, None, instance.availability_zone) request_spec.create()
def _get_request_spec_for_select_destinations(self, attempted_hosts=None): """Builds a RequestSpec that can be passed to select_destinations Used when calling the scheduler to pick a destination host for live migrating the instance. :param attempted_hosts: List of host names to ignore in the scheduler. This is generally at least seeded with the source host. :returns: nova.objects.RequestSpec object """ request_spec = self.request_spec # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() port_res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( self.context, self.instance.uuid)) # NOTE(gibi): When cyborg or other module wants to handle # similar non-nova resources then here we have to collect # all the external resource requests in a single list and # add them to the RequestSpec. request_spec.requested_resources = port_res_req request_spec.request_level_params = req_lvl_params scheduler_utils.setup_instance_group(self.context, request_spec) # We currently only support live migrating to hosts in the same # cell that the instance lives in, so we need to tell the scheduler # to limit the applicable hosts based on cell. cell_mapping = self._get_source_cell_mapping() LOG.debug('Requesting cell %(cell)s while live migrating', {'cell': cell_mapping.identity}, instance=self.instance) if ('requested_destination' in request_spec and request_spec.requested_destination): request_spec.requested_destination.cell = cell_mapping else: request_spec.requested_destination = objects.Destination( cell=cell_mapping) request_spec.ensure_project_and_user_id(self.instance) request_spec.ensure_network_information(self.instance) compute_utils.heal_reqspec_is_bfv(self.context, request_spec, self.instance) return request_spec
def test_setup_instance_group_in_filter_properties(self, mock_ggd): mock_ggd.return_value = scheduler_utils.GroupDetails( hosts=set(['hostA', 'hostB']), policies=['policy'], members=['instance1']) spec = {'instance_properties': {'uuid': 'fake-uuid'}} filter_props = {'group_hosts': ['hostC']} scheduler_utils.setup_instance_group(self.context, spec, filter_props) mock_ggd.assert_called_once_with(self.context, 'fake-uuid', ['hostC']) expected_filter_props = {'group_updated': True, 'group_hosts': set(['hostA', 'hostB']), 'group_policies': ['policy'], 'group_members': ['instance1']} self.assertEqual(expected_filter_props, filter_props)
def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) fake_spec = objects.RequestSpec() objects.RequestSpec.from_primitives( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec) self.task.scheduler_client.select_destinations(self.context, fake_spec).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.MigrationPreCheckError("reason")) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]}) fake_spec = objects.RequestSpec() objects.RequestSpec.from_primitives( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec) self.task.scheduler_client.select_destinations(self.context, fake_spec).AndReturn([{ 'host': 'host2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(exception.NoValidHost(reason="")) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations(self.context, self.fake_spec, [self.instance.uuid]).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host2', 'nodename': 'node2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() with mock.patch.object(self.task, '_remove_host_allocations') as remove_allocs: self.assertEqual(("host2", "node2"), self.task._find_destination()) # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1')
def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.instance.numa_topology = None self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.instance, 'is_volume_backed') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.instance.is_volume_backed().AndReturn(False) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1', 'limits': 'fake-limits' }]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with test.nested( mock.patch.object(self.task.migration, 'save'), mock.patch.object( self.task, '_remove_host_allocations')) as (save_mock, remove_allocs): self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with() # Should have removed allocations for the first host. remove_allocs.assert_called_once_with('host1', 'node1', self.fake_spec)
def _get_request_spec_for_select_destinations(self, attempted_hosts=None): """Builds a RequestSpec that can be passed to select_destinations Used when calling the scheduler to pick a destination host for live migrating the instance. :param attempted_hosts: List of host names to ignore in the scheduler. This is generally at least seeded with the source host. :returns: nova.objects.RequestSpec object """ request_spec = self.request_spec # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() # TODO(gibi): We need to make sure that the requested_resources field # is re calculated based on neutron ports. scheduler_utils.setup_instance_group(self.context, request_spec) # We currently only support live migrating to hosts in the same # cell that the instance lives in, so we need to tell the scheduler # to limit the applicable hosts based on cell. cell_mapping = self._get_source_cell_mapping() LOG.debug('Requesting cell %(cell)s while live migrating', {'cell': cell_mapping.identity}, instance=self.instance) if ('requested_destination' in request_spec and request_spec.requested_destination): request_spec.requested_destination.cell = cell_mapping else: request_spec.requested_destination = objects.Destination( cell=cell_mapping) request_spec.ensure_project_and_user_id(self.instance) request_spec.ensure_network_metadata(self.instance) compute_utils.heal_reqspec_is_bfv(self.context, request_spec, self.instance) return request_spec
def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') compute_utils.get_image_metadata(self.context, self.task.image_api, self.instance_image, self.instance).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([{ 'host': 'host2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_works(self): self.instance.numa_topology = None self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'reset_forced_destinations') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') self.mox.StubOutWithMock(self.instance, 'is_volume_backed') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.fake_spec.reset_forced_destinations() self.instance.is_volume_backed().AndReturn(False) self.task.scheduler_client.select_destinations( self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{ 'host': 'host1', 'nodename': 'node1', 'limits': 'fake-limits' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1", limits='fake-limits') self.mox.ReplayAll() self.assertEqual(("host1", 'fake-limits'), self.task._find_destination()) # Make sure the request_spec was updated to include the cell # mapping. self.assertIsNotNone(self.fake_spec.requested_destination.cell) # Make sure the spec was updated to include the project_id. self.assertEqual(self.fake_spec.project_id, self.instance.project_id)
def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') scheduler_utils.build_request_spec(self.context, None, mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") fake_props = {'instance_properties': {'uuid': self.instance_uuid}} scheduler_utils.setup_instance_group( self.context, fake_props, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, self.fake_spec).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.setup_instance_group(self.context, self.fake_spec) self.task.scheduler_client.select_destinations(self.context, self.fake_spec, [self.instance.uuid]).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with mock.patch.object(self.task.migration, 'save') as save_mock: self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with()
def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([{ 'host': 'host1' }]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([{ 'host': 'host2' }]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def _group_details_in_filter_properties(self, group, func='get_by_uuid', hint=None, policy=None): group_hint = hint group_hosts = ['hostB'] with contextlib.nested( mock.patch.object(objects.InstanceGroup, func, return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None group_info = scheduler_utils.setup_instance_group( self.context, group_hint, group_hosts) self.assertEqual((set(['hostA', 'hostB']), [policy]), group_info)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec( context, image, instances) # NOTE(sbauza): filter_properties['hints'] can be None hints = filter_properties.get('scheduler_hints', {}) or {} group_hint = hints.get('group') group_hosts = filter_properties.get('group_hosts') group_info = scheduler_utils.setup_instance_group( context, group_hint, group_hosts) if isinstance(group_info, tuple): filter_properties['group_updated'] = True (filter_properties['group_hosts'], filter_properties['group_policies']) = group_info # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList(objects=[ objects.NetworkRequest.from_tuple(t) for t in requested_networks ]) try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) except Exception as exc: for instance in instances: scheduler_driver.handle_schedule_error(context, exc, instance.uuid, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self.compute_rpcapi.build_and_run_instance( context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec( context, image, instances) # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList(objects=[ objects.NetworkRequest.from_tuple(t) for t in requested_networks ]) # TODO(melwitt): Remove this in version 2.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) except Exception as exc: updates = {'vm_state': vm_states.ERROR, 'task_state': None} for instance in instances: self._set_vm_state_and_notify(context, instance.uuid, 'build_instances', updates, exc, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self.compute_rpcapi.build_and_run_instance( context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])