예제 #1
0
    def _test_find_destination_retry_hypervisor_raises(self, error):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec, [self.instance.uuid]).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(error)

        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec, [self.instance.uuid]).AndReturn(
                        [{'host': 'host2'}])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
예제 #2
0
    def test_find_destination_retry_exceeds_max(self):
        self.flags(migrate_max_retries=0)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
        scheduler_utils.setup_instance_group(
            self.context, fake_props, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(exception.DestinationHypervisorTooOld)

        self.mox.ReplayAll()
        with mock.patch.object(self.task.migration, 'save') as save_mock:
            self.assertRaises(exception.MaxRetriesExceeded,
                              self.task._find_destination)
            self.assertEqual('failed', self.task.migration.status)
            save_mock.assert_called_once_with()
예제 #3
0
    def test_find_destination_retry_with_failed_migration_pre_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
        scheduler_utils.setup_instance_group(
            self.context, fake_props, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")\
                .AndRaise(exception.MigrationPreCheckError("reason"))

        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec).AndReturn(
                        [{'host': 'host2'}])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
    def test_find_destination_works(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        fake_spec = objects.RequestSpec()
        objects.RequestSpec.from_primitives(
            self.context, mox.IgnoreArg(),
            mox.IgnoreArg()).AndReturn(fake_spec)
        self.task.scheduler_client.select_destinations(
            self.context, fake_spec).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")

        self.mox.ReplayAll()
        self.assertEqual("host1", self.task._find_destination())
예제 #5
0
    def test_find_destination_works(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(objects.RequestSpec,
                                 'reset_forced_destinations')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
        scheduler_utils.setup_instance_group(
            self.context, fake_props, {'ignore_hosts': [self.instance_host]})
        self.fake_spec.reset_forced_destinations()
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")

        self.mox.ReplayAll()
        self.assertEqual("host1", self.task._find_destination())
예제 #6
0
    def test_find_destination_retry_with_failed_migration_pre_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "build_request_spec")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor")
        self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host")

        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            [{"host": "host1"}]
        )
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1").AndRaise(exception.MigrationPreCheckError("reason"))

        scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host, "host1"]})
        self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            [{"host": "host2"}]
        )
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
예제 #7
0
    def test_find_destination_retry_with_invalid_livem_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context,
                mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")\
                .AndRaise(exception.Invalid)

        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
        self.task.scheduler_client.select_destinations(self.context,
                mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
                        [{'host': 'host2'}])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
예제 #8
0
    def test_find_destination_retry_exceeds_max(self):
        self.flags(migrate_max_retries=0)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context,
                mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(exception.DestinationHypervisorTooOld)

        self.mox.ReplayAll()
        self.assertRaises(exception.MaxRetriesExceeded,
                          self.task._find_destination)
예제 #9
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        fake_props = {"instance_properties": {"uuid": self.instance_uuid}}
        scheduler_utils.setup_instance_group(self.context, fake_props, {"ignore_hosts": [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndRaise(
            exception.NoValidHost(reason="")
        )

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
예제 #10
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec, [self.instance.uuid]).AndRaise(
                        exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec, [self.instance.uuid]).AndRaise(
                        exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
예제 #12
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "build_request_spec")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
            exception.NoValidHost(reason="")
        )

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
예제 #13
0
    def _test_find_destination_retry_livem_checks_fail(self, error):
        self.flags(migrate_max_retries=1)
        self.instance.numa_topology = None
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
        self.mox.StubOutWithMock(self.instance, 'is_volume_backed')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.instance.is_volume_backed().AndReturn(False)
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{
                'host':
                'host1',
                'nodename':
                'node1',
                'limits':
                'fake-limits'
            }])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host(
            "host1", limits='fake-limits').AndRaise(error)

        self.instance.is_volume_backed().AndReturn(False)
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{
                'host':
                'host2',
                'nodename':
                'node2',
                'limits':
                'fake-limits'
            }])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2", limits='fake-limits')

        self.mox.ReplayAll()
        with mock.patch.object(self.task,
                               '_remove_host_allocations') as remove_allocs:
            self.assertEqual(("host2", 'fake-limits'),
                             self.task._find_destination())
        # Should have removed allocations for the first host.
        remove_allocs.assert_called_once_with('host1', 'node1', self.fake_spec)
예제 #14
0
    def drop_resize_claim(self, context, instance, instance_type=None,
                          image_meta=None, prefix='new_'):
        """Remove usage for an incoming/outgoing migration."""
        if instance['uuid'] in self.tracked_migrations:
            migration, itype = self.tracked_migrations.pop(instance['uuid'])

            if not instance_type:
                ctxt = context.elevated()
                instance_type = self._get_instance_type(ctxt, instance, prefix)

            if image_meta is None:
                image_meta = utils.get_image_from_system_metadata(
                        instance['system_metadata'])

            if instance_type['id'] == itype['id']:
                sys_metadata = instance['system_metadata']
                if instance['task_state'] == task_states.RESIZE_REVERTING:
                    numa_topology = sys_metadata.get('new_numa_topo')
                else:
                    numa_topology = sys_metadata.get('old_numa_topo')
                if numa_topology:
                    numa_topology = jsonutils.loads(numa_topology)
                usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
                if self.pci_tracker:
                    self.pci_tracker.update_pci_for_migration(context,
                                                              instance,
                                                              sign=-1)
                self._update_usage(context, self.compute_node, usage, sign=-1)

                ctxt = context.elevated()
                self._update(ctxt, self.compute_node)
예제 #15
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        request_spec = scheduler_utils.build_request_spec(self.context, image,
                                                          [self.instance])

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            filter_properties = {'ignore_hosts': attempted_hosts}
            scheduler_utils.setup_instance_group(self.context, request_spec,
                                                 filter_properties)
            host = self.scheduler_client.select_destinations(self.context,
                            request_spec, filter_properties)[0]['host']
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except exception.Invalid as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s",
                    {"host": host, "e": e})
                attempted_hosts.append(host)
                host = None
        return host
예제 #16
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        request_spec = scheduler_utils.build_request_spec(
            self.context, image, [self.instance])

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            filter_properties = {'ignore_hosts': attempted_hosts}
            scheduler_utils.setup_instance_group(self.context, request_spec,
                                                 filter_properties)
            host = self.scheduler_client.select_destinations(
                self.context, request_spec, filter_properties)[0]['host']
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except exception.Invalid as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s", {
                    "host": host,
                    "e": e
                })
                attempted_hosts.append(host)
                host = None
        return host
예제 #17
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        request_spec = scheduler_utils.build_request_spec(
            self.context, image, [self.instance])

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            filter_properties = {'ignore_hosts': attempted_hosts}
            scheduler_utils.setup_instance_group(self.context, request_spec,
                                                 filter_properties)
            # TODO(sbauza): Hydrate here the object until we modify the
            # scheduler.utils methods to directly use the RequestSpec object
            spec_obj = objects.RequestSpec.from_primitives(
                self.context, request_spec, filter_properties)
            host = self.scheduler_client.select_destinations(
                self.context, spec_obj)[0]['host']
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s", {
                    "host": host,
                    "e": e
                })
                attempted_hosts.append(host)
                host = None
        return host
예제 #18
0
파일: manager.py 프로젝트: ordbogen/nova
    def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown):
        image = utils.get_image_from_system_metadata(instance.system_metadata)

        request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor)
        task = self._build_cold_migrate_task(
            context, instance, flavor, filter_properties, request_spec, reservations, clean_shutdown
        )
        try:
            task.execute()
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {"vm_state": vm_state, "task_state": None}
            self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor.id == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {"vm_state": vm_state, "task_state": None}
                self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {"vm_state": instance.vm_state, "task_state": None}
                self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)
예제 #19
0
def get_image_metadata(context, image_api, image_id_or_uri, instance):
    image_system_meta = {}
    # In case of boot from volume, image_id_or_uri may be None or ''
    if image_id_or_uri is not None and image_id_or_uri != '':
        # If the base image is still available, get its metadata
        try:
            image = image_api.get(context, image_id_or_uri)
        except (exception.ImageNotAuthorized,
                exception.ImageNotFound,
                exception.Invalid) as e:
            LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
                        {"image_id": image_id_or_uri, "error": e},
                        instance=instance)
        else:
            flavor = instance.get_flavor()
            image_system_meta = utils.get_system_metadata_from_image(image,
                                                                     flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
예제 #20
0
    def drop_resize_claim(self,
                          context,
                          instance,
                          instance_type=None,
                          image_meta=None,
                          prefix='new_'):
        """Remove usage for an incoming/outgoing migration."""
        if instance['uuid'] in self.tracked_migrations:
            migration, itype = self.tracked_migrations.pop(instance['uuid'])

            if not instance_type:
                ctxt = context.elevated()
                instance_type = self._get_instance_type(ctxt, instance, prefix)

            if image_meta is None:
                image_meta = utils.get_image_from_system_metadata(
                    instance['system_metadata'])

            if instance_type['id'] == itype['id']:
                numa_topology = (
                    hardware.VirtNUMAInstanceTopology.get_constraints(
                        itype, image_meta))
                usage = self._get_usage_dict(itype,
                                             numa_topology=numa_topology)
                if self.pci_tracker:
                    self.pci_tracker.update_pci_for_migration(context,
                                                              instance,
                                                              sign=-1)
                self._update_usage(context, self.compute_node, usage, sign=-1)

                ctxt = context.elevated()
                self._update(ctxt, self.compute_node)
예제 #21
0
def get_image_metadata(context, image_api, image_id_or_uri, instance):
    image_system_meta = {}
    # In case of boot from volume, image_id_or_uri may be None or ''
    if image_id_or_uri is not None and image_id_or_uri != '':
        # If the base image is still available, get its metadata
        try:
            image = image_api.get(context, image_id_or_uri)
        except (exception.ImageNotAuthorized, exception.ImageNotFound,
                exception.Invalid) as e:
            LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"), {
                "image_id": image_id_or_uri,
                "error": e
            },
                        instance=instance)
        else:
            flavor = instance.get_flavor()
            image_system_meta = utils.get_system_metadata_from_image(
                image, flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
예제 #22
0
    def drop_resize_claim(self, context, instance, instance_type=None, image_meta=None, prefix="new_"):
        """Remove usage for an incoming/outgoing migration."""
        if instance["uuid"] in self.tracked_migrations:
            migration, itype = self.tracked_migrations.pop(instance["uuid"])

            if not instance_type:
                ctxt = context.elevated()
                instance_type = self._get_instance_type(ctxt, instance, prefix)

            if image_meta is None:
                image_meta = utils.get_image_from_system_metadata(instance["system_metadata"])

            if instance_type["id"] == itype["id"]:
                sys_metadata = instance["system_metadata"]
                if instance["task_state"] == task_states.RESIZE_REVERTING:
                    numa_topology = sys_metadata.get("new_numa_topo")
                else:
                    numa_topology = sys_metadata.get("old_numa_topo")
                if numa_topology:
                    numa_topology = jsonutils.loads(numa_topology)
                usage = self._get_usage_dict(itype, numa_topology=numa_topology)
                if self.pci_tracker:
                    self.pci_tracker.update_pci_for_migration(context, instance, sign=-1)
                self._update_usage(context, self.compute_node, usage, sign=-1)

                ctxt = context.elevated()
                self._update(ctxt, self.compute_node)
예제 #23
0
    def drop_resize_claim(self, context, instance, instance_type=None,
                          image_meta=None, prefix='new_'):
        """Remove usage for an incoming/outgoing migration."""
        if instance['uuid'] in self.tracked_migrations:
            migration, itype = self.tracked_migrations.pop(instance['uuid'])

            if not instance_type:
                ctxt = context.elevated()
                instance_type = self._get_instance_type(ctxt, instance, prefix)

            if image_meta is None:
                image_meta = utils.get_image_from_system_metadata(
                        instance['system_metadata'])

            if instance_type['id'] == itype['id']:
                numa_topology = (
                        hardware.VirtNUMAInstanceTopology.get_constraints(
                            itype, image_meta))
                usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
                if self.pci_tracker:
                    self.pci_tracker.update_pci_for_migration(context,
                                                              instance,
                                                              sign=-1)
                self._update_usage(context, self.compute_node, usage, sign=-1)

                ctxt = context.elevated()
                self._update(ctxt, self.compute_node)
예제 #24
0
    def test_find_destination_retry_with_failed_migration_pre_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        fake_spec = objects.RequestSpec()
        objects.RequestSpec.from_primitives(
            self.context, mox.IgnoreArg(),
            mox.IgnoreArg()).AndReturn(fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                                                       fake_spec).AndReturn([{
                                                           'host':
                                                           'host1'
                                                       }])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")\
                .AndRaise(exception.MigrationPreCheckError("reason"))

        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
        fake_spec = objects.RequestSpec()
        objects.RequestSpec.from_primitives(
            self.context, mox.IgnoreArg(),
            mox.IgnoreArg()).AndReturn(fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                                                       fake_spec).AndReturn([{
                                                           'host':
                                                           'host2'
                                                       }])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
예제 #25
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration['instance_uuid']
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = (migration['dest_compute'] == self.host and
                    migration['dest_node'] == self.nodename)
        outbound = (migration['source_compute'] == self.host and
                    migration['source_node'] == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration['old_instance_type_id']):
                itype = self._get_instance_type(context, instance, 'new_',
                        migration['new_instance_type_id'])
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                        migration['old_instance_type_id'])

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                    migration['new_instance_type_id'])

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                    migration['old_instance_type_id'])

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(
                    instance['system_metadata'])

        if itype:
            numa_topology = (
                    hardware.VirtNUMAInstanceTopology.get_constraints(
                        itype, image_meta))
            usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources['pci_stats'] = jsonutils.dumps(
                        self.pci_tracker.stats)
            else:
                resources['pci_stats'] = jsonutils.dumps([])
            self.tracked_migrations[uuid] = (migration, itype)
예제 #26
0
    def _update_usage_from_migration(self, context, instance, image_meta, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration["instance_uuid"]
        LOG.audit(_("Updating from migration %s") % uuid)

        incoming = migration["dest_compute"] == self.host and migration["dest_node"] == self.nodename
        outbound = migration["source_compute"] == self.host and migration["source_node"] == self.nodename
        same_node = incoming and outbound
        instance = objects.Instance.get_by_uuid(context, uuid, expected_attrs=["system_metadata"])
        record = self.tracked_instances.get(uuid, None)
        itype = None
        numa_topology = None
        core_bind = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if instance["instance_type_id"] == migration["old_instance_type_id"]:
                itype = self._get_instance_type(context, instance, "new_", migration["new_instance_type_id"])
                numa_topology = instance["system_metadata"].get("new_numa_topo")
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, "old_", migration["old_instance_type_id"])
                numa_topology = instance["system_metadata"].get("old_numa_topo")

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, "new_", migration["new_instance_type_id"])
            numa_topology = instance["system_metadata"].get("new_numa_topo")

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, "old_", migration["old_instance_type_id"])
            numa_topology = instance["system_metadata"].get("old_numa_topo")

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(instance["system_metadata"])

        if itype:
            host_topology = resources.get("numa_topology")
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            if numa_topology:
                numa_topology = jsonutils.loads(numa_topology)
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)

            if self.pci_tracker:
                if same_node or not outbound:
                    self.pci_tracker.update_pci_for_migration(context, instance)

            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources["pci_stats"] = jsonutils.dumps(self.pci_tracker.stats)
            else:
                resources["pci_stats"] = jsonutils.dumps([])
            self.tracked_migrations[uuid] = (migration, itype)
예제 #27
0
    def test_find_destination_retry_exceeds_max(self):
        self.flags(migrate_max_retries=0)
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor")

        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        fake_props = {"instance_properties": {"uuid": self.instance_uuid}}
        scheduler_utils.setup_instance_group(self.context, fake_props, {"ignore_hosts": [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn([{"host": "host1"}])
        self.task._check_compatible_with_source_hypervisor("host1").AndRaise(exception.DestinationHypervisorTooOld)

        self.mox.ReplayAll()
        with mock.patch.object(self.task.migration, "save") as save_mock:
            self.assertRaises(exception.MaxRetriesExceeded, self.task._find_destination)
            self.assertEqual("failed", self.task.migration.status)
            save_mock.assert_called_once_with()
예제 #28
0
파일: manager.py 프로젝트: alvarolopez/nova
    def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown):
        image = utils.get_image_from_system_metadata(instance.system_metadata)

        request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor)

        quotas = objects.Quotas.from_reservations(context, reservations, instance=instance)
        try:
            scheduler_utils.setup_instance_group(context, request_spec, filter_properties)
            scheduler_utils.populate_retry(filter_properties, instance.uuid)
            hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties)
            host_state = hosts[0]
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {"vm_state": vm_state, "task_state": None}
            self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)
            quotas.rollback()

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor["id"] == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {"vm_state": vm_state, "task_state": None}
                self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)
                quotas.rollback()

        try:
            scheduler_utils.populate_filter_properties(filter_properties, host_state)
            # context is not serializable
            filter_properties.pop("context", None)

            (host, node) = (host_state["host"], host_state["nodename"])
            self.compute_rpcapi.prep_resize(
                context,
                image,
                instance,
                flavor,
                host,
                reservations,
                request_spec=request_spec,
                filter_properties=filter_properties,
                node=node,
                clean_shutdown=clean_shutdown,
            )
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {"vm_state": instance.vm_state, "task_state": None}
                self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec)
                quotas.rollback()
예제 #29
0
    def test_find_destination_works(self):
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(objects.RequestSpec, "reset_forced_destinations")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor")
        self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host")

        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        fake_props = {"instance_properties": {"uuid": self.instance_uuid}}
        scheduler_utils.setup_instance_group(self.context, fake_props, {"ignore_hosts": [self.instance_host]})
        self.fake_spec.reset_forced_destinations()
        self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn([{"host": "host1"}])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")

        self.mox.ReplayAll()
        self.assertEqual("host1", self.task._find_destination())
예제 #30
0
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(
            self.context, mox.IgnoreArg(),
            mox.IgnoreArg()).AndRaise(exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
예제 #31
0
파일: test_utils.py 프로젝트: LeoDuo/nova
    def test_non_inheritable_image_properties(self):
        sys_meta = self.get_system_metadata()
        sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"

        CONF.non_inheritable_image_properties = ["foo1"]

        image = utils.get_image_from_system_metadata(sys_meta)

        # Verify that the foo1 key has not been inherited
        self.assertNotIn("foo1", image)
예제 #32
0
    def test_find_destination_works(self):
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "build_request_spec")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor")
        self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host")

        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(self.context, {}, {"ignore_hosts": [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
            [{"host": "host1"}]
        )
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")

        self.mox.ReplayAll()
        self.assertEqual("host1", self.task._find_destination())
예제 #33
0
    def test_non_inheritable_image_properties(self):
        sys_meta = self.get_system_metadata()
        sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"

        CONF.non_inheritable_image_properties = ["foo1"]

        image = utils.get_image_from_system_metadata(sys_meta)

        # Verify that the foo1 key has not been inherited
        self.assertNotIn("foo1", image)
예제 #34
0
    def test_dont_inherit_empty_values(self):
        sys_meta = self.get_system_metadata()

        for key in utils.SM_INHERITABLE_KEYS:
            sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
            sys_meta[sys_key] = None

        image = utils.get_image_from_system_metadata(sys_meta)

        # Verify that the empty properties have not been inherited
        for key in utils.SM_INHERITABLE_KEYS:
            self.assertNotIn(key, image)
예제 #35
0
파일: manager.py 프로젝트: JerryDog/nova
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown, request_spec):
        image = utils.get_image_from_system_metadata(
            instance.system_metadata)

        # NOTE(sbauza): If a reschedule occurs when prep_resize(), then
        # it only provides filter_properties legacy dict back to the
        # conductor with no RequestSpec part of the payload.
        if not request_spec:
            request_spec = objects.RequestSpec.from_components(
                context, instance.uuid, image,
                instance.flavor, instance.numa_topology, instance.pci_requests,
                filter_properties, None, instance.availability_zone)

        task = self._build_cold_migrate_task(context, instance, flavor,
                                             request_spec,
                                             reservations, clean_shutdown)
        # TODO(sbauza): Provide directly the RequestSpec object once
        # _set_vm_state_and_notify() accepts it
        legacy_spec = request_spec.to_legacy_request_spec_dict()
        try:
            task.execute()
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server',
                                          updates, ex, legacy_spec)

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor.id == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state,
                           'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
예제 #36
0
    def test_find_destination_retry_with_invalid_livem_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(utils, "get_image_from_system_metadata")
        self.mox.StubOutWithMock(scheduler_utils, "setup_instance_group")
        self.mox.StubOutWithMock(self.task.scheduler_client, "select_destinations")
        self.mox.StubOutWithMock(self.task, "_check_compatible_with_source_hypervisor")
        self.mox.StubOutWithMock(self.task, "_call_livem_checks_on_host")

        utils.get_image_from_system_metadata(self.instance.system_metadata).AndReturn("image")
        fake_props = {"instance_properties": {"uuid": self.instance_uuid}}
        scheduler_utils.setup_instance_group(self.context, fake_props, {"ignore_hosts": [self.instance_host]})
        self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn([{"host": "host1"}])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1").AndRaise(exception.Invalid)

        self.task.scheduler_client.select_destinations(self.context, self.fake_spec).AndReturn([{"host": "host2"}])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
예제 #37
0
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown, request_spec):
        image = utils.get_image_from_system_metadata(
            instance.system_metadata)

        # NOTE(sbauza): If a reschedule occurs when prep_resize(), then
        # it only provides filter_properties legacy dict back to the
        # conductor with no RequestSpec part of the payload.
        if not request_spec:
            request_spec = objects.RequestSpec.from_components(
                context, instance.uuid, image,
                instance.flavor, instance.numa_topology, instance.pci_requests,
                filter_properties, None, instance.availability_zone)

        task = self._build_cold_migrate_task(context, instance, flavor,
                                             request_spec,
                                             reservations, clean_shutdown)
        # TODO(sbauza): Provide directly the RequestSpec object once
        # _set_vm_state_and_notify() accepts it
        legacy_spec = request_spec.to_legacy_request_spec_dict()
        try:
            task.execute()
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server',
                                          updates, ex, legacy_spec)

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor.id == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state,
                           'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
예제 #38
0
파일: test_utils.py 프로젝트: gweuieon/nova
    def test_dont_inherit_empty_values(self):
        sys_meta = self.get_system_metadata()

        for key in utils.SM_INHERITABLE_KEYS:
            sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
            sys_meta[sys_key] = None

        image = utils.get_image_from_system_metadata(sys_meta)

        # Verify that the empty properties have not been inherited
        for key in utils.SM_INHERITABLE_KEYS:
            self.assertNotIn(key, image)
예제 #39
0
    def test_find_destination_retry_with_failed_migration_pre_checks(self):
        self.flags(migrate_max_retries=1)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{
                'host':
                'host1',
                'nodename':
                'node1'
            }])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")\
                .AndRaise(exception.MigrationPreCheckError("reason"))

        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{
                'host':
                'host2',
                'nodename':
                'node2'
            }])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        with mock.patch.object(self.task,
                               '_remove_host_allocations') as remove_allocs:
            self.assertEqual(("host2", "node2"), self.task._find_destination())
        # Should have removed allocations for the first host.
        remove_allocs.assert_called_once_with('host1', 'node1')
예제 #40
0
    def test_find_destination_retry_exceeds_max(self):
        self.flags(migrate_max_retries=0)
        self.instance.numa_topology = None
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.instance, 'is_volume_backed')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.instance.is_volume_backed().AndReturn(False)
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{
                'host':
                'host1',
                'nodename':
                'node1',
                'limits':
                'fake-limits'
            }])
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(exception.DestinationHypervisorTooOld)

        self.mox.ReplayAll()
        with test.nested(
                mock.patch.object(self.task.migration, 'save'),
                mock.patch.object(
                    self.task,
                    '_remove_host_allocations')) as (save_mock, remove_allocs):
            self.assertRaises(exception.MaxRetriesExceeded,
                              self.task._find_destination)
            self.assertEqual('failed', self.task.migration.status)
            save_mock.assert_called_once_with()
            # Should have removed allocations for the first host.
            remove_allocs.assert_called_once_with('host1', 'node1',
                                                  self.fake_spec)
예제 #41
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        filter_properties = {'ignore_hosts': attempted_hosts}
        # TODO(sbauza): Remove that once setup_instance_group() accepts a
        # RequestSpec object
        request_spec = {'instance_properties': {'uuid': self.instance.uuid}}
        scheduler_utils.setup_instance_group(self.context, request_spec,
                                                 filter_properties)
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way
            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image,
                self.instance.flavor, self.instance.numa_topology,
                self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone
            )
        else:
            request_spec = self.request_spec
            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                host = self.scheduler_client.select_destinations(self.context,
                        request_spec, [self.instance.uuid])[0]['host']
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s",
                    {"host": host, "e": e})
                attempted_hosts.append(host)
                host = None
        return host
예제 #42
0
    def _find_destination(self):
        # TODO(johngarbutt) this retry loop should be shared
        attempted_hosts = [self.source]
        image = utils.get_image_from_system_metadata(
            self.instance.system_metadata)
        filter_properties = {'ignore_hosts': attempted_hosts}
        # TODO(sbauza): Remove that once setup_instance_group() accepts a
        # RequestSpec object
        request_spec = {'instance_properties': {'uuid': self.instance.uuid}}
        scheduler_utils.setup_instance_group(self.context, request_spec,
                                                 filter_properties)
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way
            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image,
                self.instance.flavor, self.instance.numa_topology,
                self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone
            )
        else:
            request_spec = self.request_spec
            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()

        host = None
        while host is None:
            self._check_not_over_max_retries(attempted_hosts)
            request_spec.ignore_hosts = attempted_hosts
            try:
                host = self.scheduler_client.select_destinations(self.context,
                                request_spec)[0]['host']
            except messaging.RemoteError as ex:
                # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
                # scheduling algorithm is R-R, we can let other scheduler try.
                # Note(ShaoHe Feng) There are types of RemoteError, such as
                # NoSuchMethod, UnsupportedVersion, we can distinguish it by
                # ex.exc_type.
                raise exception.MigrationSchedulerRPCError(
                    reason=six.text_type(ex))
            try:
                self._check_compatible_with_source_hypervisor(host)
                self._call_livem_checks_on_host(host)
            except (exception.Invalid, exception.MigrationPreCheckError) as e:
                LOG.debug("Skipping host: %(host)s because: %(e)s",
                    {"host": host, "e": e})
                attempted_hosts.append(host)
                host = None
        return host
예제 #43
0
    def test_find_destination_works(self):
        self.instance.numa_topology = None
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(objects.RequestSpec,
                                 'reset_forced_destinations')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
        self.mox.StubOutWithMock(self.instance, 'is_volume_backed')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.fake_spec.reset_forced_destinations()
        self.instance.is_volume_backed().AndReturn(False)
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec, [self.instance.uuid]).AndReturn([{
                'host':
                'host1',
                'nodename':
                'node1',
                'limits':
                'fake-limits'
            }])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1", limits='fake-limits')

        self.mox.ReplayAll()
        self.assertEqual(("host1", 'fake-limits'),
                         self.task._find_destination())

        # Make sure the request_spec was updated to include the cell
        # mapping.
        self.assertIsNotNone(self.fake_spec.requested_destination.cell)
        # Make sure the spec was updated to include the project_id.
        self.assertEqual(self.fake_spec.project_id, self.instance.project_id)
예제 #44
0
    def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
        """Builds a RequestSpec that can be passed to select_destinations

        Used when calling the scheduler to pick a destination host for live
        migrating the instance.

        :param attempted_hosts: List of host names to ignore in the scheduler.
            This is generally at least seeded with the source host.
        :returns: nova.objects.RequestSpec object
        """
        if not self.request_spec:
            # NOTE(sbauza): We were unable to find an original RequestSpec
            # object - probably because the instance is old.
            # We need to mock that the old way
            image = utils.get_image_from_system_metadata(
                self.instance.system_metadata)
            filter_properties = {'ignore_hosts': attempted_hosts}
            request_spec = objects.RequestSpec.from_components(
                self.context, self.instance.uuid, image,
                self.instance.flavor, self.instance.numa_topology,
                self.instance.pci_requests,
                filter_properties, None, self.instance.availability_zone
            )
        else:
            request_spec = self.request_spec
            # NOTE(sbauza): Force_hosts/nodes needs to be reset
            # if we want to make sure that the next destination
            # is not forced to be the original host
            request_spec.reset_forced_destinations()
        scheduler_utils.setup_instance_group(self.context, request_spec)

        # We currently only support live migrating to hosts in the same
        # cell that the instance lives in, so we need to tell the scheduler
        # to limit the applicable hosts based on cell.
        cell_mapping = self._get_source_cell_mapping()
        LOG.debug('Requesting cell %(cell)s while live migrating',
                  {'cell': cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in request_spec and
                request_spec.requested_destination):
            request_spec.requested_destination.cell = cell_mapping
        else:
            request_spec.requested_destination = objects.Destination(
                cell=cell_mapping)

        request_spec.ensure_project_and_user_id(self.instance)
        request_spec.ensure_network_metadata(self.instance)
        compute_utils.heal_reqspec_is_bfv(
            self.context, request_spec, self.instance)

        return request_spec
예제 #45
0
    def test_find_destination_works(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
        scheduler_utils.setup_instance_group(
            self.context, fake_props, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(
            self.context, self.fake_spec).AndReturn([{
                'host': 'host1'
            }])
        self.task._check_compatible_with_source_hypervisor("host1")
        self.task._call_livem_checks_on_host("host1")

        self.mox.ReplayAll()
        self.assertEqual("host1", self.task._find_destination())
예제 #46
0
    def from_instance(cls, instance):
        """Create instance from instance system metadata

        :param instance: Instance object

        Creates a new object instance, initializing from the
        system metadata "image_*" properties associated with
        instance

        :returns: an ImageMeta instance
        """
        sysmeta = utils.instance_sys_meta(instance)
        image_meta = utils.get_image_from_system_metadata(sysmeta)
        return cls.from_dict(image_meta)
예제 #47
0
파일: image_meta.py 프로젝트: scholery/nova
    def from_instance(cls, instance):
        """Create instance from instance system metadata

        :param instance: Instance object

        Creates a new object instance, initializing from the
        system metadata "image_*" properties associated with
        instance

        :returns: an ImageMeta instance
        """
        sysmeta = utils.instance_sys_meta(instance)
        image_meta = utils.get_image_from_system_metadata(sysmeta)
        return cls.from_dict(image_meta)
    def _update_usage_from_migration(self, context, instance, image_meta, resources, migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s") % uuid)

        incoming = migration.dest_compute == self.host and migration.dest_node == self.nodename
        outbound = migration.source_compute == self.host and migration.source_node == self.nodename
        same_node = incoming and outbound

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if instance["instance_type_id"] == migration.old_instance_type_id:
                itype = self._get_instance_type(context, instance, "new_", migration.new_instance_type_id)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, "old_", migration.old_instance_type_id)

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, "new_", migration.new_instance_type_id)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, "old_", migration.old_instance_type_id)

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(instance["system_metadata"])

        if itype:
            host_topology = resources.get("numa_topology")
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = hardware.numa_fit_instance_to_host(host_topology, numa_topology)
            usage = self._get_usage_dict(itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(context, resources, usage)
            if self.pci_tracker:
                resources["pci_device_pools"] = self.pci_tracker.stats
            else:
                resources["pci_device_pools"] = []
            self.tracked_migrations[uuid] = (migration, itype)
    def test_find_destination_when_runs_out_of_hosts(self):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        fake_spec = objects.RequestSpec()
        objects.RequestSpec.from_primitives(
            self.context,
            mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                fake_spec).AndRaise(
                        exception.NoValidHost(reason=""))

        self.mox.ReplayAll()
        self.assertRaises(exception.NoValidHost, self.task._find_destination)
    def test_find_destination_retry_exceeds_max(self):
        self.flags(migrate_max_retries=0)
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                '_check_compatible_with_source_hypervisor')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.setup_instance_group(self.context, self.fake_spec)
        self.task.scheduler_client.select_destinations(self.context,
                self.fake_spec, [self.instance.uuid]).AndReturn(
                        [{'host': 'host1'}])
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(exception.DestinationHypervisorTooOld)

        self.mox.ReplayAll()
        with mock.patch.object(self.task.migration, 'save') as save_mock:
            self.assertRaises(exception.MaxRetriesExceeded,
                              self.task._find_destination)
            self.assertEqual('failed', self.task.migration.status)
            save_mock.assert_called_once_with()
예제 #51
0
    def _test_find_destination_retry_hypervisor_raises(self, error):
        self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
        self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
        self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
        self.mox.StubOutWithMock(self.task.scheduler_client,
                                 'select_destinations')
        self.mox.StubOutWithMock(self.task,
                                 '_check_compatible_with_source_hypervisor')
        self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')

        utils.get_image_from_system_metadata(
            self.instance.system_metadata).AndReturn("image")
        scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
                                           mox.IgnoreArg()).AndReturn({})
        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host]})
        self.task.scheduler_client.select_destinations(
            self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([{
                'host':
                'host1'
            }])
        self.task._check_compatible_with_source_hypervisor("host1")\
                .AndRaise(error)

        scheduler_utils.setup_instance_group(
            self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
        self.task.scheduler_client.select_destinations(
            self.context, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([{
                'host':
                'host2'
            }])
        self.task._check_compatible_with_source_hypervisor("host2")
        self.task._call_livem_checks_on_host("host2")

        self.mox.ReplayAll()
        self.assertEqual("host2", self.task._find_destination())
예제 #52
0
    def test_image_from_system_metadata(self):
        sys_meta = self.get_system_metadata()
        sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
        sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"

        image = utils.get_image_from_system_metadata(sys_meta)

        # Verify that we inherit all the needed keys
        for key in utils.SM_INHERITABLE_KEYS:
            sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
            self.assertEqual(image[key], sys_meta.get(sys_key))

        # Verify that we inherit the rest of metadata as properties
        self.assertIn("properties", image)

        for key, value in image["properties"].iteritems():
            sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
            self.assertEqual(image["properties"][key], sys_meta[sys_key])
예제 #53
0
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown):
        image = utils.get_image_from_system_metadata(
            instance.system_metadata)

        request_spec = scheduler_utils.build_request_spec(
            context, image, [instance], instance_type=flavor)
        task = self._build_cold_migrate_task(context, instance, flavor,
                                             filter_properties, request_spec,
                                             reservations, clean_shutdown)
        try:
            task.execute()
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server',
                                          updates, ex, request_spec)

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor.id == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state,
                           'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, request_spec)
예제 #54
0
def get_image_metadata(context, image_service, image_id, instance):
    # If the base image is still available, get its metadata
    try:
        image = image_service.show(context, image_id)
    except Exception as e:
        LOG.warning(_("Can't access image %(image_id)s: %(error)s"),
                    {"image_id": image_id, "error": e}, instance=instance)
        image_system_meta = {}
    else:
        flavor = flavors.extract_flavor(instance)
        image_system_meta = utils.get_system_metadata_from_image(image, flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
예제 #55
0
    def from_instance(cls, instance):
        """Create instance from instance system metadata

        :param instance: Instance object

        Creates a new object instance, initializing from the
        system metadata "image_*" properties associated with
        instance

        :returns: an ImageMeta instance
        """
        sysmeta = utils.instance_sys_meta(instance)
        image_meta = utils.get_image_from_system_metadata(sysmeta)

        # NOTE(lyarwood): Provide the id of the image in image_meta if it
        # wasn't persisted in the system_metadata of the instance previously.
        # This is only provided to allow users of image_meta to avoid the need
        # to pass around references to instance.image_ref alongside image_meta.
        if image_meta.get('id') is None and instance.image_ref:
            image_meta['id'] = instance.image_ref

        return cls.from_dict(image_meta)
예제 #56
0
def get_image_metadata(context, image_api, image_id_or_uri, instance):
    # If the base image is still available, get its metadata
    try:
        image = image_api.get(context, image_id_or_uri)
    except (exception.ImageNotAuthorized, exception.ImageNotFound,
            exception.Invalid) as e:
        LOG.warning(_("Can't access image %(image_id)s: %(error)s"), {
            "image_id": image_id_or_uri,
            "error": e
        },
                    instance=instance)
        image_system_meta = {}
    else:
        flavor = flavors.extract_flavor(instance)
        image_system_meta = utils.get_system_metadata_from_image(image, flavor)

    # Get the system metadata from the instance
    system_meta = utils.instance_sys_meta(instance)

    # Merge the metadata from the instance with the image's, if any
    system_meta.update(image_system_meta)

    # Convert the system metadata to image metadata
    return utils.get_image_from_system_metadata(system_meta)
예제 #57
0
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown, request_spec):
        image = utils.get_image_from_system_metadata(
            instance.system_metadata)

        # NOTE(sbauza): If a reschedule occurs when prep_resize(), then
        # it only provides filter_properties legacy dict back to the
        # conductor with no RequestSpec part of the payload.
        if not request_spec:
            # Make sure we hydrate a new RequestSpec object with the new flavor
            # and not the nested one from the instance
            request_spec = objects.RequestSpec.from_components(
                context, instance.uuid, image,
                flavor, instance.numa_topology, instance.pci_requests,
                filter_properties, None, instance.availability_zone)
        else:
            # NOTE(sbauza): Resizes means new flavor, so we need to update the
            # original RequestSpec object for make sure the scheduler verifies
            # the right one and not the original flavor
            request_spec.flavor = flavor

        task = self._build_cold_migrate_task(context, instance, flavor,
                                             request_spec,
                                             reservations, clean_shutdown)
        # TODO(sbauza): Provide directly the RequestSpec object once
        # _set_vm_state_and_notify() accepts it
        legacy_spec = request_spec.to_legacy_request_spec_dict()
        try:
            task.execute()
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server',
                                          updates, ex, legacy_spec)

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor.id == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state,
                           'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server',
                                              updates, ex, legacy_spec)
        # NOTE(sbauza): Make sure we persist the new flavor in case we had
        # a successful scheduler call if and only if nothing bad happened
        if request_spec.obj_what_changed():
            request_spec.save()
예제 #58
0
    def _update_usage_from_migration(self, context, instance, image_meta,
                                     migration):
        """Update usage for a single migration.  The record may
        represent an incoming or outbound migration.
        """
        uuid = migration.instance_uuid
        LOG.info(_LI("Updating from migration %s") % uuid)

        incoming = (migration.dest_compute == self.host and
                    migration.dest_node == self.nodename)
        outbound = (migration.source_compute == self.host and
                    migration.source_node == self.nodename)
        same_node = (incoming and outbound)

        record = self.tracked_instances.get(uuid, None)
        itype = None

        if same_node:
            # same node resize. record usage for whichever instance type the
            # instance is *not* in:
            if (instance['instance_type_id'] ==
                    migration.old_instance_type_id):
                itype = self._get_instance_type(context, instance, 'new_',
                        migration.new_instance_type_id)
            else:
                # instance record already has new flavor, hold space for a
                # possible revert to the old instance type:
                itype = self._get_instance_type(context, instance, 'old_',
                        migration.old_instance_type_id)

        elif incoming and not record:
            # instance has not yet migrated here:
            itype = self._get_instance_type(context, instance, 'new_',
                    migration.new_instance_type_id)

        elif outbound and not record:
            # instance migrated, but record usage for a possible revert:
            itype = self._get_instance_type(context, instance, 'old_',
                    migration.old_instance_type_id)

        if image_meta is None:
            image_meta = utils.get_image_from_system_metadata(
                    instance['system_metadata'])

        if itype:
            host_topology = self.compute_node.get('numa_topology')
            if host_topology:
                host_topology = objects.NUMATopology.obj_from_db_obj(
                        host_topology)
            numa_topology = hardware.numa_get_constraints(itype, image_meta)
            numa_topology = (
                    hardware.numa_fit_instance_to_host(
                        host_topology, numa_topology))
            usage = self._get_usage_dict(
                        itype, numa_topology=numa_topology)
            if self.pci_tracker:
                self.pci_tracker.update_pci_for_migration(context, instance)
            self._update_usage(usage)
            if self.pci_tracker:
                self.compute_node['pci_device_pools'] = self.pci_tracker.stats
            else:
                self.compute_node['pci_device_pools'] = []
            self.tracked_migrations[uuid] = (migration, itype)
예제 #59
0
파일: manager.py 프로젝트: gweuieon/nova
    def _cold_migrate(self, context, instance, flavor, filter_properties,
                      reservations, clean_shutdown):
        image = utils.get_image_from_system_metadata(instance.system_metadata)

        request_spec = scheduler_utils.build_request_spec(context,
                                                          image, [instance],
                                                          instance_type=flavor)

        quotas = objects.Quotas.from_reservations(context,
                                                  reservations,
                                                  instance=instance)
        try:
            scheduler_utils.setup_instance_group(context, request_spec,
                                                 filter_properties)
            scheduler_utils.populate_retry(filter_properties, instance.uuid)
            hosts = self.scheduler_client.select_destinations(
                context, request_spec, filter_properties)
            host_state = hosts[0]
        except exception.NoValidHost as ex:
            vm_state = instance.vm_state
            if not vm_state:
                vm_state = vm_states.ACTIVE
            updates = {'vm_state': vm_state, 'task_state': None}
            self._set_vm_state_and_notify(context, instance.uuid,
                                          'migrate_server', updates, ex,
                                          request_spec)
            quotas.rollback()

            # if the flavor IDs match, it's migrate; otherwise resize
            if flavor['id'] == instance.instance_type_id:
                msg = _("No valid host found for cold migrate")
            else:
                msg = _("No valid host found for resize")
            raise exception.NoValidHost(reason=msg)
        except exception.UnsupportedPolicyException as ex:
            with excutils.save_and_reraise_exception():
                vm_state = instance.vm_state
                if not vm_state:
                    vm_state = vm_states.ACTIVE
                updates = {'vm_state': vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server', updates, ex,
                                              request_spec)
                quotas.rollback()

        try:
            scheduler_utils.populate_filter_properties(filter_properties,
                                                       host_state)
            # context is not serializable
            filter_properties.pop('context', None)

            (host, node) = (host_state['host'], host_state['nodename'])
            self.compute_rpcapi.prep_resize(
                context,
                image,
                instance,
                flavor,
                host,
                reservations,
                request_spec=request_spec,
                filter_properties=filter_properties,
                node=node,
                clean_shutdown=clean_shutdown)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                updates = {'vm_state': instance.vm_state, 'task_state': None}
                self._set_vm_state_and_notify(context, instance.uuid,
                                              'migrate_server', updates, ex,
                                              request_spec)
                quotas.rollback()