Beispiel #1
0
    def create_migration_context(self):
        if not self.migration:
            LOG.warn(_LW("Can't create a migration_context record without a "
                         "migration object specified."),
                     instance=self.instance)
            return

        mig_context = objects.MigrationContext(
            context=self.context, instance_uuid=self.instance.uuid,
            migration_id=self.migration.id,
            old_numa_topology=self.instance.numa_topology,
            new_numa_topology=self.claimed_numa_topology)
        return mig_context
    def test_prep_resize_at_dest(self, mock_get_az, mock_task_execute):
        """Tests setting up and executing PrepResizeAtDestTask"""
        # _setup_target_cell_db set the _target_cell_context and
        # _target_cell_instance variables so fake those out here
        self.task._target_cell_context = mock.sentinel.target_cell_context
        target_inst = objects.Instance(vm_state=vm_states.ACTIVE,
                                       system_metadata={})
        self.task._target_cell_instance = target_inst
        target_cell_migration = objects.Migration(
            # use unique ids for comparisons
            id=self.task.source_migration.id + 1)
        self.assertNotIn('migration_context', self.task.instance)
        mock_task_execute.return_value = objects.MigrationContext(
            migration_id=target_cell_migration.id)

        with test.nested(
                mock.patch.object(self.task,
                                  '_update_migration_from_dest_after_claim'),
                mock.patch.object(self.task.instance, 'save'),
                mock.patch.object(target_inst,
                                  'save')) as (_upd_mig, source_inst_save,
                                               target_inst_save):
            retval = self.task._prep_resize_at_dest(target_cell_migration)

        self.assertIs(retval, _upd_mig.return_value)
        mock_task_execute.assert_called_once_with()
        mock_get_az.assert_called_once_with(
            self.task.context, self.task.host_selection.service_host)
        self.assertIn('PrepResizeAtDestTask', self.task._completed_tasks)
        self.assertIsInstance(
            self.task._completed_tasks['PrepResizeAtDestTask'],
            cross_cell_migrate.PrepResizeAtDestTask)
        # The new_flavor should be set on the target cell instance along with
        # the AZ and old_vm_state.
        self.assertIs(target_inst.new_flavor, self.task.flavor)
        self.assertEqual(vm_states.ACTIVE,
                         target_inst.system_metadata['old_vm_state'])
        self.assertEqual(mock_get_az.return_value,
                         target_inst.availability_zone)
        # A clone of the MigrationContext returned from execute() should be
        # stored on the source instance with the internal context targeted
        # at the source cell context and the migration_id updated.
        self.assertIsNotNone('migration_context', self.task.instance)
        self.assertEqual(self.task.source_migration.id,
                         self.task.instance.migration_context.migration_id)
        source_inst_save.assert_called_once_with()
        _upd_mig.assert_called_once_with(target_cell_migration)
Beispiel #3
0
#    under the License.

import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids

from nova import context
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_instance_numa
from nova.tests.unit.objects import test_objects


fake_instance_uuid = uuids.fake

fake_migration_context_obj = objects.MigrationContext()
fake_migration_context_obj.instance_uuid = fake_instance_uuid
fake_migration_context_obj.migration_id = 42
fake_migration_context_obj.new_numa_topology = (
    test_instance_numa.fake_obj_numa_topology.obj_clone())
fake_migration_context_obj.old_numa_topology = None
fake_migration_context_obj.new_pci_devices = objects.PciDeviceList()
fake_migration_context_obj.old_pci_devices = None
fake_migration_context_obj.new_pci_requests = (
    objects.InstancePCIRequests(requests=[
        objects.InstancePCIRequest(count=123, spec=[])]))
fake_migration_context_obj.old_pci_requests = None
fake_migration_context_obj.new_resources = objects.ResourceList()
fake_migration_context_obj.old_resources = None

fake_db_context = {
Beispiel #4
0
    def _move_claim(self,
                    context,
                    instance,
                    new_instance_type,
                    nodename,
                    move_type=None,
                    image_meta=None,
                    limits=None,
                    migration=None):
        """Indicate that resources are needed for a move to this host.

        Move can be either a migrate/resize, live-migrate or an
        evacuate/rebuild operation.

        :param context: security context
        :param instance: instance object to reserve resources for
        :param new_instance_type: new instance_type being resized to
        :param nodename: The Ironic nodename selected by the scheduler
        :param image_meta: instance image metadata
        :param move_type: move type - can be one of 'migration', 'resize',
                         'live-migration', 'evacuate'
        :param limits: Dict of oversubscription limits for memory, disk,
        and CPUs
        :param migration: A migration object if one was already created
                          elsewhere for this operation
        :returns: A Claim ticket representing the reserved resources.  This
        should be turned into finalize  a resource claim or free
        resources after the compute operation is finished.
        """
        image_meta = image_meta or {}
        if migration:
            self._claim_existing_migration(migration, nodename)
        else:
            migration = self._create_migration(context, instance,
                                               new_instance_type, nodename,
                                               move_type)

        if self.disabled(nodename):
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            return claims.NopClaim(migration=migration)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(new_instance_type)
        LOG.debug(
            "Memory overhead for %(flavor)d MB instance; %(overhead)d "
            "MB", {
                'flavor': new_instance_type.memory_mb,
                'overhead': overhead['memory_mb']
            })
        LOG.debug(
            "Disk overhead for %(flavor)d GB instance; %(overhead)d "
            "GB", {
                'flavor': instance.flavor.root_gb,
                'overhead': overhead.get('disk_gb', 0)
            })
        LOG.debug(
            "CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
            "vCPU(s)", {
                'flavor': instance.flavor.vcpus,
                'overhead': overhead.get('vcpus', 0)
            })

        cn = self.compute_nodes[nodename]

        # TODO(moshele): we are recreating the pci requests even if
        # there was no change on resize. This will cause allocating
        # the old/new pci device in the resize phase. In the future
        # we would like to optimise this.
        new_pci_requests = pci_request.get_pci_requests_from_flavor(
            new_instance_type)
        new_pci_requests.instance_uuid = instance.uuid
        # PCI requests come from two sources: instance flavor and
        # SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
        # On resize merge the SR-IOV ports pci_requests with the new
        # instance flavor pci_requests.
        if instance.pci_requests:
            for request in instance.pci_requests.requests:
                if request.alias_name is None:
                    new_pci_requests.requests.append(request)
        claim = claims.MoveClaim(context,
                                 instance,
                                 nodename,
                                 new_instance_type,
                                 image_meta,
                                 self,
                                 cn,
                                 new_pci_requests,
                                 overhead=overhead,
                                 limits=limits)

        claim.migration = migration
        claimed_pci_devices_objs = []
        if self.pci_tracker:
            # NOTE(jaypipes): ComputeNode.pci_device_pools is set below
            # in _update_usage_from_instance().
            claimed_pci_devices_objs = self.pci_tracker.claim_instance(
                context, new_pci_requests, claim.claimed_numa_topology)
        claimed_pci_devices = objects.PciDeviceList(
            objects=claimed_pci_devices_objs)

        # TODO(jaypipes): Move claimed_numa_topology out of the Claim's
        # constructor flow so the Claim constructor only tests whether
        # resources can be claimed, not consume the resources directly.
        mig_context = objects.MigrationContext(
            context=context,
            instance_uuid=instance.uuid,
            migration_id=migration.id,
            old_numa_topology=instance.numa_topology,
            new_numa_topology=claim.claimed_numa_topology,
            old_pci_devices=instance.pci_devices,
            new_pci_devices=claimed_pci_devices,
            old_pci_requests=instance.pci_requests,
            new_pci_requests=new_pci_requests)
        instance.migration_context = mig_context
        instance.save()

        # Mark the resources in-use for the resize landing on this
        # compute host:
        self._update_usage_from_migration(context, instance, migration,
                                          nodename)
        elevated = context.elevated()
        self._update(elevated, cn)

        return claim