def test_find_destination_retry_with_failed_migration_pre_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, "get_image_from_system_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor") self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host") utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host1"}] ) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1").AndRaise(exception.MigrationPreCheckError("reason")) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host2"}] ) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) fake_spec = objects.RequestSpec() objects.RequestSpec.from_primitives( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec) self.task.scheduler_client.select_destinations(self.context, fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() with mock.patch.object(self.task.migration, 'save') as save_mock: self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination) self.assertEqual('failed', self.task.migration.status) save_mock.assert_called_once_with()
def test_find_destination_works(self): self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') compute_utils.get_image_metadata(self.context, self.task.image_api, self.instance_image, self.instance).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(compute_utils, "get_image_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor") self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host") compute_utils.get_image_metadata( self.context, self.task.image_api, self.instance_image, self.instance ).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host1"}] ) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1").AndRaise(exception.Invalid) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host2"}] ) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_no_image_works(self): self.instance['image_ref'] = '' self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') scheduler_utils.build_request_spec( self.context, {'properties': {'hw_disk_bus': 'scsi'}}, mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination)
def _test_find_destination_retry_hypervisor_raises(self, error): self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') compute_utils.get_image_metadata(self.context, self.task.image_api, self.instance_image, self.instance).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1")\ .AndRaise(error) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_invalid_livem_checks(self): self.flags(migrate_max_retries=1) self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(self.task.scheduler_rpcapi, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') compute_utils.get_image_metadata(self.context, self.task.image_service, self.instance_image, self.instance).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) self.task.scheduler_rpcapi.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1")\ .AndRaise(exception.Invalid) self.task.scheduler_rpcapi.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{'host': 'host2'}]) self.task._check_compatible_with_source_hypervisor("host2") self.task._call_livem_checks_on_host("host2") self.mox.ReplayAll() self.assertEqual("host2", self.task._find_destination())
def test_find_destination_works(self): self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata') self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec') self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group') self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives') self.mox.StubOutWithMock(self.task.scheduler_client, 'select_destinations') self.mox.StubOutWithMock(self.task, '_check_compatible_with_source_hypervisor') self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host') utils.get_image_from_system_metadata( self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group( self.context, {}, {'ignore_hosts': [self.instance_host]}) fake_spec = objects.RequestSpec() objects.RequestSpec.from_primitives( self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec) self.task.scheduler_client.select_destinations( self.context, fake_spec).AndReturn( [{'host': 'host1'}]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(compute_utils, "get_image_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") compute_utils.get_image_metadata( self.context, self.task.image_api, self.instance_image, self.instance ).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndRaise( exception.NoValidHost(reason="") ) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_when_runs_out_of_hosts(self): self.mox.StubOutWithMock(utils, "get_image_from_system_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndRaise( exception.NoValidHost(reason="") ) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_find_destination_no_image_works(self): self.instance["image_ref"] = "" self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(self.task.scheduler_rpcapi, "select_hosts") self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor") self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host") scheduler_utils.build_request_spec(self.context, None, mox.IgnoreArg()).AndReturn({}) self.task.scheduler_rpcapi.select_hosts(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(["host1"]) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown): image = utils.get_image_from_system_metadata(instance.system_metadata) request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor) task = self._build_cold_migrate_task( context, instance, flavor, filter_properties, request_spec, reservations, clean_shutdown ) try: task.execute() except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) # if the flavor IDs match, it's migrate; otherwise resize if flavor.id == instance.instance_type_id: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {"vm_state": instance.vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)
def test_find_destination_retry_exceeds_max(self): self.flags(migrate_max_retries=0) self.mox.StubOutWithMock(compute_utils, "get_image_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(self.task.scheduler_rpcapi, "select_hosts") self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor") compute_utils.get_image_metadata( self.context, self.task.image_service, self.instance_image, self.instance ).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) self.task.scheduler_rpcapi.select_hosts(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(["host1"]) self.task._check_compatible_with_source_hypervisor("host1").AndRaise(exception.DestinationHypervisorTooOld) self.mox.ReplayAll() self.assertRaises(exception.NoValidHost, self.task._find_destination)
def _schedule_instances(self, context, image, filter_properties, *instances): request_spec = scheduler_utils.build_request_spec(context, image, instances) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) return hosts
def build_instances( self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True, ): request_spec = scheduler_utils.build_request_spec(context, image, instances) # NOTE(alaski): For compatibility until a new scheduler method is used. request_spec.update({"block_device_mapping": block_device_mapping, "security_group": security_groups}) self.scheduler_rpcapi.run_instance( context, request_spec=request_spec, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, is_first_time=True, filter_properties=filter_properties, legacy_bdm_in_spec=legacy_bdm, )
def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None): if live and not rebuild and not flavor: destination = scheduler_hint.get("host") self.scheduler_rpcapi.live_migration(context, block_migration, disk_over_commit, instance, destination) elif not live and not rebuild and flavor: image_ref = instance.get('image_ref') if image_ref: image = self._get_image(context, image_ref) else: image = {} request_spec = scheduler_utils.build_request_spec( context, image, [instance]) # NOTE(timello): originally, instance_type in request_spec # on compute.api.resize does not have 'extra_specs', so we # remove it for now to keep tests backward compatibility. request_spec['instance_type'].pop('extra_specs') self.scheduler_rpcapi.prep_resize( context, instance=instance, instance_type=flavor, image=image, request_spec=request_spec, filter_properties=scheduler_hint['filter_properties'], reservations=reservations) else: raise NotImplementedError()
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): request_spec = scheduler_utils.build_request_spec(context, image, instances) #Petter LOG.error('********** manager build_instances') # NOTE(alaski): For compatibility until a new scheduler method is used. request_spec.update({'block_device_mapping': block_device_mapping, 'security_group': security_groups}) self.scheduler_rpcapi.run_instance(context, request_spec=request_spec, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, is_first_time=True, filter_properties=filter_properties, legacy_bdm_in_spec=legacy_bdm) #Petter LOG.error('i********** checking instance state after scheduling') for instance in instances: state = instance['vm_state'] uuid = instance['uuid'] LOG.error('instance %s has state %s',uuid,state) if (state == vm_states.ERROR): #error state here means scheduling failed LOG.error('Scheduling failed for instance %s',instance.instance_uuid)
def test_stat_consumption_from_instance_with_pci_exception(self): fake_requests = [{'request_id': 'fake_request1', 'count': 3, 'spec': [{'vendor_id': '8086'}]}] fake_requests_obj = objects.InstancePCIRequests( requests=[objects.InstancePCIRequest(**r) for r in fake_requests], instance_uuid='fake-uuid') instance = objects.Instance(root_gb=0, ephemeral_gb=0, memory_mb=512, vcpus=1, project_id='12345', vm_state=vm_states.BUILDING, task_state=task_states.SCHEDULING, os_type='Linux', uuid='fake-uuid', pci_requests=fake_requests_obj, id=1243) req_spec = sched_utils.build_request_spec(None, None, [instance], objects.Flavor( root_gb=0, ephemeral_gb=0, memory_mb=1024, vcpus=1)) host = host_manager.HostState("fakehost", "fakenode") self.assertIsNone(host.updated) fake_updated = mock.sentinel.fake_updated host.updated = fake_updated host.pci_stats = pci_stats.PciDeviceStats() with mock.patch.object(host.pci_stats, 'apply_requests', side_effect=exception.PciDeviceRequestFailed): host.consume_from_instance(req_spec['instance_properties']) self.assertEqual(fake_updated, host.updated)
def _find_destination(self): # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = None if self.instance.image_ref: image = compute_utils.get_image_metadata(self.context, self.image_api, self.instance.image_ref, self.instance) request_spec = scheduler_utils.build_request_spec(self.context, image, [self.instance]) host = None while host is None: self._check_not_over_max_retries(attempted_hosts) filter_properties = {'ignore_hosts': attempted_hosts} scheduler_utils.setup_instance_group(self.context, request_spec, filter_properties) host = self.scheduler_client.select_destinations(self.context, request_spec, filter_properties)[0]['host'] try: self._check_compatible_with_source_hypervisor(host) self._call_livem_checks_on_host(host) except exception.Invalid as e: LOG.debug("Skipping host: %(host)s because: %(e)s", {"host": host, "e": e}) attempted_hosts.append(host) host = None return host
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations): image_ref = instance.image_ref image = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec( context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) try: scheduler_utils.populate_retry(filter_properties, instance['uuid']) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance['vm_state'] if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, 'migrate_server', updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor['id'] == instance['instance_type_id']: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) # TODO(timello): originally, instance_type in request_spec # on compute.api.resize does not have 'extra_specs', so we # remove it for now to keep tests backward compatibility. request_spec['instance_type'].pop('extra_specs', None) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {'vm_state': instance['vm_state'], 'task_state': None} self._set_vm_state_and_notify(context, 'migrate_server', updates, ex, request_spec) quotas.rollback()
def unshelve_instance(self, context, instance): sys_meta = instance.system_metadata def safe_image_show(ctx, image_id): if image_id: return self.image_api.get(ctx, image_id, show_deleted=False) else: raise exception.ImageNotFound(image_id="") if instance.vm_state == vm_states.SHELVED: instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=task_states.UNSHELVING) self.compute_rpcapi.start_instance(context, instance) elif instance.vm_state == vm_states.SHELVED_OFFLOADED: image = None image_id = sys_meta.get("shelved_image_id") # No need to check for image if image_id is None as # "shelved_image_id" key is not set for volume backed # instance during the shelve process if image_id: with compute_utils.EventReporter(context, "get_image_info", instance.uuid): try: image = safe_image_show(context, image_id) except exception.ImageNotFound: instance.vm_state = vm_states.ERROR instance.save() reason = _("Unshelve attempted but the image %s " "cannot be found.") % image_id LOG.error(reason, instance=instance) raise exception.UnshelveException(instance_id=instance.uuid, reason=reason) try: with compute_utils.EventReporter(context, "schedule_instances", instance.uuid): filter_properties = {} scheduler_utils.populate_retry(filter_properties, instance.uuid) request_spec = scheduler_utils.build_request_spec(context, image, [instance]) hosts = self._schedule_instances(context, request_spec, filter_properties) host_state = hosts[0] scheduler_utils.populate_filter_properties(filter_properties, host_state) (host, node) = (host_state["host"], host_state["nodename"]) self.compute_rpcapi.unshelve_instance( context, instance, host, image=image, filter_properties=filter_properties, node=node ) except (exception.NoValidHost, exception.UnsupportedPolicyException): instance.task_state = None instance.save() LOG.warning(_LW("No valid host found for unshelve instance"), instance=instance) return except Exception: with excutils.save_and_reraise_exception(): instance.task_state = None instance.save() LOG.error(_LE("Unshelve attempted but an error " "has occurred"), instance=instance) else: LOG.error(_LE("Unshelve attempted but vm_state not SHELVED or " "SHELVED_OFFLOADED"), instance=instance) instance.vm_state = vm_states.ERROR instance.save() return
def _schedule_instances(self, context, image, filter_properties, *instances): request_spec = scheduler_utils.build_request_spec(context, image, instances) # dict(host='', nodename='', limits='') hosts = self.scheduler_rpcapi.select_destinations(context, request_spec, filter_properties) return hosts
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown): image_ref = instance.image_ref image = compute_utils.get_image_metadata(context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) scheduler_utils.populate_retry(filter_properties, instance["uuid"]) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor["id"] == instance["instance_type_id"]: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback() try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop("context", None) (host, node) = (host_state["host"], host_state["nodename"]) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node, clean_shutdown=clean_shutdown, ) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {"vm_state": instance.vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback()
def test_build_request_spec_with_object(self, extract_flavor): instance_type = {'flavorid': 'fake-id'} instance = fake_instance.fake_instance_obj(self.context) extract_flavor.return_value = instance_type request_spec = scheduler_utils.build_request_spec(self.context, None, [instance]) self.assertIsInstance(request_spec['instance_properties'], dict)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec(context, image, instances) scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) except Exception as exc: for instance in instances: scheduler_driver.handle_schedule_error(context, exc, instance.uuid, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def test_find_destination_works(self): self.mox.StubOutWithMock(utils, "get_image_from_system_metadata") self.mox.StubOutWithMock(scheduler_utils, "build_request_spec") self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group") self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations") self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor") self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host") utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image") scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({}) scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]}) self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [{"host": "host1"}] ) self.task._check_compatible_with_source_hypervisor("host1") self.task._call_livem_checks_on_host("host1") self.mox.ReplayAll() self.assertEqual("host1", self.task._find_destination())
def test_build_request_spec_without_image(self): instance = {'uuid': uuids.instance} instance_type = objects.Flavor(**test_flavor.fake_flavor) with mock.patch.object(flavors, 'extract_flavor') as mock_extract: mock_extract.return_value = instance_type request_spec = scheduler_utils.build_request_spec(None, [instance]) mock_extract.assert_called_once_with({'uuid': uuids.instance}) self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self): instance_type = objects.Flavor() instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(instance, 'get_flavor') as mock_get: mock_get.return_value = instance_type request_spec = scheduler_utils.build_request_spec(None, [instance]) mock_get.assert_called_once_with() self.assertIsInstance(request_spec['instance_properties'], dict)
def unshelve_instance(self, context, instance): sys_meta = instance.system_metadata def safe_image_show(ctx, image_id): if image_id: return self.image_api.get(ctx, image_id, show_deleted=False) else: raise exception.ImageNotFound(image_id='') if instance.vm_state == vm_states.SHELVED: instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=task_states.UNSHELVING) self.compute_rpcapi.start_instance(context, instance) elif instance.vm_state == vm_states.SHELVED_OFFLOADED: image = None image_id = sys_meta.get('shelved_image_id') # No need to check for image if image_id is None as # "shelved_image_id" key is not set for volume backed # instance during the shelve process if image_id: with compute_utils.EventReporter( context, 'get_image_info', instance.uuid): try: image = safe_image_show(context, image_id) except exception.ImageNotFound: instance.vm_state = vm_states.ERROR instance.save() reason = _('Unshelve attempted but the image %s ' 'cannot be found.') % image_id LOG.error(reason, instance=instance) raise exception.UnshelveException( instance_id=instance.uuid, reason=reason) try: with compute_utils.EventReporter(context, 'schedule_instances', instance.uuid): filter_properties = {} scheduler_utils.populate_retry(filter_properties, instance.uuid) request_spec = scheduler_utils.build_request_spec( context, image, [instance]) hosts = self._schedule_instances( context, request_spec, filter_properties) host_state = hosts[0] scheduler_utils.populate_filter_properties( filter_properties, host_state) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.unshelve_instance( context, instance, host, image=image, filter_properties=filter_properties, node=node) except (exception.NoValidHost, exception.UnsupportedPolicyException): instance.task_state = None instance.save() LOG.warning(_LW("No valid host found for unshelve instance"), instance=instance) return except Exception: with excutils.save_and_reraise_exception(): instance.task_state = None instance.save() LOG.error(_LE("Unshelve attempted but an error " "has occurred"), instance=instance) else: LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or ' 'SHELVED_OFFLOADED'), instance=instance) instance.vm_state = vm_states.ERROR instance.save() return
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec( context, image, instances) # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList(objects=[ objects.NetworkRequest.from_tuple(t) for t in requested_networks ]) # TODO(melwitt): Remove this in version 2.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) except Exception as exc: updates = {'vm_state': vm_states.ERROR, 'task_state': None} for instance in instances: self._set_vm_state_and_notify(context, instance.uuid, 'build_instances', updates, exc, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self.compute_rpcapi.build_and_run_instance( context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations): image_ref = instance.image_ref image = compute_utils.get_image_metadata(context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: scheduler_utils.populate_retry(filter_properties, instance['uuid']) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance['vm_state'] if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, 'migrate_server', updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor['id'] == instance['instance_type_id']: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) # TODO(timello): originally, instance_type in request_spec # on compute.api.resize does not have 'extra_specs', so we # remove it for now to keep tests backward compatibility. request_spec['instance_type'].pop('extra_specs') (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node) except Exception as ex: with excutils.save_and_reraise_exception(): updates = { 'vm_state': instance['vm_state'], 'task_state': None } self._set_vm_state_and_notify(context, 'migrate_server', updates, ex, request_spec) quotas.rollback()
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown): image_ref = instance.image_ref image = compute_utils.get_image_metadata(context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) scheduler_utils.populate_retry(filter_properties, instance['uuid']) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor['id'] == instance['instance_type_id']: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) quotas.rollback() try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node, clean_shutdown=clean_shutdown) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {'vm_state': instance.vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) quotas.rollback()
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec( context, image, instances) # NOTE(sbauza): filter_properties['hints'] can be None hints = filter_properties.get('scheduler_hints', {}) or {} group_hint = hints.get('group') group_hosts = filter_properties.get('group_hosts') group_info = scheduler_utils.setup_instance_group( context, group_hint, group_hosts) if isinstance(group_info, tuple): filter_properties['group_updated'] = True (filter_properties['group_hosts'], filter_properties['group_policies']) = group_info # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList(objects=[ objects.NetworkRequest.from_tuple(t) for t in requested_networks ]) try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. # (luzhq) 验证重试策略 # 更新filter_properties中的重试属性,若当前为重试部署则同时检测当前 # 的重试次数是否超过最大重试次数,需要注意的是:这里使用instances[0] # 表示如重试的话只会有一个instance重试 scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) except Exception as exc: for instance in instances: scheduler_driver.handle_schedule_error(context, exc, instance.uuid, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # (luzhq) self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.compute_rpcapi.build_and_run_instance( context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None): with compute_utils.EventReporter(context, 'rebuild_server', instance.uuid): node = limits = None if not host: # NOTE(lcostantino): Retrieve scheduler filters for the # instance when the feature is available filter_properties = {'ignore_hosts': [instance.host]} request_spec = scheduler_utils.build_request_spec( context, image_ref, [instance]) try: scheduler_utils.setup_instance_group( context, request_spec, filter_properties) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host_dict = hosts.pop(0) host, node, limits = (host_dict['host'], host_dict['nodename'], host_dict['limits']) except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', { 'vm_state': instance.vm_state, 'task_state': None }, ex, request_spec) LOG.warning(_LW("No valid host found for rebuild"), instance=instance) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', { 'vm_state': instance.vm_state, 'task_state': None }, ex, request_spec) LOG.warning(_LW("Server with unsupported policy " "cannot be rebuilt"), instance=instance) try: migration = objects.Migration.get_by_instance_and_status( context, instance.uuid, 'accepted') except exception.MigrationNotFoundByStatus: LOG.debug( "No migration record for the rebuild/evacuate " "request.", instance=instance) migration = None compute_utils.notify_about_instance_usage(self.notifier, context, instance, "rebuild.scheduled") self.compute_rpcapi.rebuild_instance( context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, migration=migration, host=host, node=node, limits=limits)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None, request_spec=None): with compute_utils.EventReporter(context, 'rebuild_server', instance.uuid): node = limits = None if not host: if not request_spec: # NOTE(sbauza): We were unable to find an original # RequestSpec object - probably because the instance is old # We need to mock that the old way filter_properties = {'ignore_hosts': [instance.host]} request_spec = scheduler_utils.build_request_spec( context, image_ref, [instance]) else: # NOTE(sbauza): Augment the RequestSpec object by excluding # the source host for avoiding the scheduler to pick it request_spec.ignore_hosts = request_spec.ignore_hosts or [] request_spec.ignore_hosts.append(instance.host) # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() # TODO(sbauza): Provide directly the RequestSpec object # when _schedule_instances() and _set_vm_state_and_notify() # accept it filter_properties = request_spec.\ to_legacy_filter_properties_dict() request_spec = request_spec.to_legacy_request_spec_dict() try: hosts = self._schedule_instances( context, request_spec, filter_properties) host_dict = hosts.pop(0) host, node, limits = (host_dict['host'], host_dict['nodename'], host_dict['limits']) except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify(context, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) LOG.warning(_LW("No valid host found for rebuild"), instance=instance) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify(context, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) LOG.warning(_LW("Server with unsupported policy " "cannot be rebuilt"), instance=instance) try: migration = objects.Migration.get_by_instance_and_status( context, instance.uuid, 'accepted') except exception.MigrationNotFoundByStatus: LOG.debug("No migration record for the rebuild/evacuate " "request.", instance=instance) migration = None compute_utils.notify_about_instance_usage( self.notifier, context, instance, "rebuild.scheduled") self.compute_rpcapi.rebuild_instance(context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, migration=migration, host=host, node=node, limits=limits)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None): with compute_utils.EventReporter(context, 'rebuild_server', instance.uuid): if not host: # NOTE(lcostantino): Retrieve scheduler filters for the # instance when the feature is available filter_properties = {'ignore_hosts': [instance.host]} request_spec = scheduler_utils.build_request_spec( context, image_ref, [instance]) try: scheduler_utils.setup_instance_group( context, request_spec, filter_properties) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host = hosts.pop(0)['host'] except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', { 'vm_state': instance.vm_state, 'task_state': None }, ex, request_spec) LOG.warning(_LW("No valid host found for rebuild"), instance=instance) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', { 'vm_state': instance.vm_state, 'task_state': None }, ex, request_spec) LOG.warning(_LW("Server with unsupported policy " "cannot be rebuilt"), instance=instance) self.compute_rpcapi.rebuild_instance( context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, host=host)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList.from_tuples( requested_networks) # TODO(melwitt): Remove this in version 2.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) request_spec = {} try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. request_spec = scheduler_utils.build_request_spec( context, image, instances) scheduler_utils.populate_retry( filter_properties, instances[0].uuid) hosts = self._schedule_instances( context, request_spec, filter_properties) except Exception as exc: updates = {'vm_state': vm_states.ERROR, 'task_state': None} for instance in instances: self._set_vm_state_and_notify( context, instance.uuid, 'build_instances', updates, exc, request_spec) try: # If the BuildRequest stays around then instance show/lists # will pull from it rather than the errored instance. self._destroy_build_request(context, instance) except exception.BuildRequestNotFound: pass self._cleanup_allocated_networks( context, instance, requested_networks) return for (instance, host) in six.moves.zip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # This is populated in scheduler_utils.populate_retry num_attempts = local_filter_props.get('retry', {}).get('num_attempts', 1) if num_attempts <= 1: # If this is a reschedule the instance is already mapped to # this cell and the BuildRequest is already deleted so ignore # the logic below. inst_mapping = self._populate_instance_mapping(context, instance, host) try: self._destroy_build_request(context, instance) except exception.BuildRequestNotFound: # This indicates an instance delete has been requested in # the API. Stop the build, cleanup the instance_mapping and # potentially the block_device_mappings # TODO(alaski): Handle block_device_mapping cleanup if inst_mapping: inst_mapping.destroy() return self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def unshelve_instance(self, context, instance, request_spec=None): sys_meta = instance.system_metadata def safe_image_show(ctx, image_id): if image_id: return self.image_api.get(ctx, image_id, show_deleted=False) else: raise exception.ImageNotFound(image_id='') if instance.vm_state == vm_states.SHELVED: instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=task_states.UNSHELVING) self.compute_rpcapi.start_instance(context, instance) elif instance.vm_state == vm_states.SHELVED_OFFLOADED: image = None image_id = sys_meta.get('shelved_image_id') # No need to check for image if image_id is None as # "shelved_image_id" key is not set for volume backed # instance during the shelve process if image_id: with compute_utils.EventReporter( context, 'get_image_info', instance.uuid): try: image = safe_image_show(context, image_id) except exception.ImageNotFound: instance.vm_state = vm_states.ERROR instance.save() reason = _('Unshelve attempted but the image %s ' 'cannot be found.') % image_id LOG.error(reason, instance=instance) raise exception.UnshelveException( instance_id=instance.uuid, reason=reason) try: with compute_utils.EventReporter(context, 'schedule_instances', instance.uuid): if not request_spec: # NOTE(sbauza): We were unable to find an original # RequestSpec object - probably because the instance is # old. We need to mock that the old way filter_properties = {} request_spec = scheduler_utils.build_request_spec( context, image, [instance]) else: # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() # TODO(sbauza): Provide directly the RequestSpec object # when _schedule_instances(), # populate_filter_properties and populate_retry() # accept it filter_properties = request_spec.\ to_legacy_filter_properties_dict() request_spec = request_spec.\ to_legacy_request_spec_dict() scheduler_utils.populate_retry(filter_properties, instance.uuid) hosts = self._schedule_instances( context, request_spec, filter_properties) host_state = hosts[0] scheduler_utils.populate_filter_properties( filter_properties, host_state) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.unshelve_instance( context, instance, host, image=image, filter_properties=filter_properties, node=node) except (exception.NoValidHost, exception.UnsupportedPolicyException): instance.task_state = None instance.save() LOG.warning(_LW("No valid host found for unshelve instance"), instance=instance) return except Exception: with excutils.save_and_reraise_exception(): instance.task_state = None instance.save() LOG.error(_LE("Unshelve attempted but an error " "has occurred"), instance=instance) else: LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or ' 'SHELVED_OFFLOADED'), instance=instance) instance.vm_state = vm_states.ERROR instance.save() return