Exemplo n.º 1
0
 def _create_fake_instance(self):
     self.inst = objects.Instance()
     self.inst.uuid = uuids.instance
     self.inst.pci_devices = objects.PciDeviceList()
Exemplo n.º 2
0
 def _fake_obj_load_attr(foo, attrname):
     if attrname == 'pci_devices':
         self.load_attr_called = True
         foo.pci_devices = objects.PciDeviceList()
Exemplo n.º 3
0
 def test_pci_device_not_equivalent_with_not_pci_device(self):
     pci_device1 = pci_device.PciDevice.create(None, dev_dict)
     self.assertNotEqual(pci_device1, None)
     self.assertNotEqual(pci_device1, 'foo')
     self.assertNotEqual(pci_device1, 1)
     self.assertNotEqual(pci_device1, objects.PciDeviceList())
Exemplo n.º 4
0
 def test_create_pci_device_list(self):
     ctxt = context.get_admin_context()
     devobj = pci_device.PciDevice.create(ctxt, dev_dict)
     pci_device_list = objects.PciDeviceList(context=ctxt, objects=[devobj])
     self.assertEqual(1, len(pci_device_list))
     self.assertIsInstance(pci_device_list[0], pci_device.PciDevice)
Exemplo n.º 5
0
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        # NOTE(danms): We can be called with a dict instead of a
        # SQLAlchemy object, so we have to be careful here
        if hasattr(db_inst, '__dict__'):
            have_extra = 'extra' in db_inst.__dict__ and db_inst['extra']
        else:
            have_extra = 'extra' in db_inst and db_inst['extra']

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            if have_extra:
                instance._load_numa_topology(
                    db_inst['extra'].get('numa_topology'))
            else:
                instance.numa_topology = None
        if 'pci_requests' in expected_attrs:
            if have_extra:
                instance._load_pci_requests(
                    db_inst['extra'].get('pci_requests'))
            else:
                instance.pci_requests = None
        if 'vcpu_model' in expected_attrs:
            if have_extra:
                instance._load_vcpu_model(db_inst['extra'].get('vcpu_model'))
            else:
                instance.vcpu_model = None
        if 'ec2_ids' in expected_attrs:
            instance._load_ec2_ids()
        if 'migration_context' in expected_attrs:
            if have_extra:
                instance._load_migration_context(
                    db_inst['extra'].get('migration_context'))
            else:
                instance.migration_context = None
        if 'info_cache' in expected_attrs:
            if db_inst.get('info_cache') is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        if any([
                x in expected_attrs
                for x in ('flavor', 'old_flavor', 'new_flavor')
        ]):
            if have_extra and db_inst['extra'].get('flavor'):
                instance._flavor_from_db(db_inst['extra']['flavor'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst.get('security_groups', []))
            instance['security_groups'] = sec_groups

        if 'tags' in expected_attrs:
            tags = base.obj_make_list(context, objects.TagList(context),
                                      objects.Tag, db_inst['tags'])
            instance['tags'] = tags

        instance.obj_reset_changes()
        return instance
Exemplo n.º 6
0
    def _move_claim(self,
                    context,
                    instance,
                    new_instance_type,
                    nodename,
                    move_type=None,
                    image_meta=None,
                    limits=None,
                    migration=None):
        """Indicate that resources are needed for a move to this host.

        Move can be either a migrate/resize, live-migrate or an
        evacuate/rebuild operation.

        :param context: security context
        :param instance: instance object to reserve resources for
        :param new_instance_type: new instance_type being resized to
        :param nodename: The Ironic nodename selected by the scheduler
        :param image_meta: instance image metadata
        :param move_type: move type - can be one of 'migration', 'resize',
                         'live-migration', 'evacuate'
        :param limits: Dict of oversubscription limits for memory, disk,
        and CPUs
        :param migration: A migration object if one was already created
                          elsewhere for this operation
        :returns: A Claim ticket representing the reserved resources.  This
        should be turned into finalize  a resource claim or free
        resources after the compute operation is finished.
        """
        image_meta = image_meta or {}
        if migration:
            self._claim_existing_migration(migration, nodename)
        else:
            migration = self._create_migration(context, instance,
                                               new_instance_type, nodename,
                                               move_type)

        if self.disabled(nodename):
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            return claims.NopClaim(migration=migration)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(new_instance_type)
        LOG.debug(
            "Memory overhead for %(flavor)d MB instance; %(overhead)d "
            "MB", {
                'flavor': new_instance_type.memory_mb,
                'overhead': overhead['memory_mb']
            })
        LOG.debug(
            "Disk overhead for %(flavor)d GB instance; %(overhead)d "
            "GB", {
                'flavor': instance.flavor.root_gb,
                'overhead': overhead.get('disk_gb', 0)
            })
        LOG.debug(
            "CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
            "vCPU(s)", {
                'flavor': instance.flavor.vcpus,
                'overhead': overhead.get('vcpus', 0)
            })

        cn = self.compute_nodes[nodename]

        # TODO(moshele): we are recreating the pci requests even if
        # there was no change on resize. This will cause allocating
        # the old/new pci device in the resize phase. In the future
        # we would like to optimise this.
        new_pci_requests = pci_request.get_pci_requests_from_flavor(
            new_instance_type)
        new_pci_requests.instance_uuid = instance.uuid
        # PCI requests come from two sources: instance flavor and
        # SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
        # On resize merge the SR-IOV ports pci_requests with the new
        # instance flavor pci_requests.
        if instance.pci_requests:
            for request in instance.pci_requests.requests:
                if request.alias_name is None:
                    new_pci_requests.requests.append(request)
        claim = claims.MoveClaim(context,
                                 instance,
                                 nodename,
                                 new_instance_type,
                                 image_meta,
                                 self,
                                 cn,
                                 new_pci_requests,
                                 overhead=overhead,
                                 limits=limits)

        claim.migration = migration
        claimed_pci_devices_objs = []
        if self.pci_tracker:
            # NOTE(jaypipes): ComputeNode.pci_device_pools is set below
            # in _update_usage_from_instance().
            claimed_pci_devices_objs = self.pci_tracker.claim_instance(
                context, new_pci_requests, claim.claimed_numa_topology)
        claimed_pci_devices = objects.PciDeviceList(
            objects=claimed_pci_devices_objs)

        # TODO(jaypipes): Move claimed_numa_topology out of the Claim's
        # constructor flow so the Claim constructor only tests whether
        # resources can be claimed, not consume the resources directly.
        mig_context = objects.MigrationContext(
            context=context,
            instance_uuid=instance.uuid,
            migration_id=migration.id,
            old_numa_topology=instance.numa_topology,
            new_numa_topology=claim.claimed_numa_topology,
            old_pci_devices=instance.pci_devices,
            new_pci_devices=claimed_pci_devices,
            old_pci_requests=instance.pci_requests,
            new_pci_requests=new_pci_requests)
        instance.migration_context = mig_context
        instance.save()

        # Mark the resources in-use for the resize landing on this
        # compute host:
        self._update_usage_from_migration(context, instance, migration,
                                          nodename)
        elevated = context.elevated()
        self._update(elevated, cn)

        return claim
Exemplo n.º 7
0
 def _create_fake_instance(self):
     self.inst = objects.Instance()
     self.inst.uuid = 'fake-inst-uuid'
     self.inst.pci_devices = objects.PciDeviceList()
     self.inst.vm_state = vm_states.ACTIVE
     self.inst.task_state = None
Exemplo n.º 8
0
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            instance._load_numa_topology()
        if 'pci_requests' in expected_attrs:
            instance._load_pci_requests()

        if 'info_cache' in expected_attrs:
            if db_inst['info_cache'] is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst['security_groups'])
            instance['security_groups'] = sec_groups

        instance.obj_reset_changes()
        return instance
Exemplo n.º 9
0
from oslo_utils.fixture import uuidsentinel as uuids

from nova import exception
from nova import objects
from nova.tests.unit.objects import test_instance_numa
from nova.tests.unit.objects import test_objects

fake_instance_uuid = uuids.fake

fake_migration_context_obj = objects.MigrationContext()
fake_migration_context_obj.instance_uuid = fake_instance_uuid
fake_migration_context_obj.migration_id = 42
fake_migration_context_obj.new_numa_topology = (
    test_instance_numa.fake_obj_numa_topology.obj_clone())
fake_migration_context_obj.old_numa_topology = None
fake_migration_context_obj.new_pci_devices = objects.PciDeviceList()
fake_migration_context_obj.old_pci_devices = None
fake_migration_context_obj.new_pci_requests = (objects.InstancePCIRequests(
    requests=[objects.InstancePCIRequest(count=123, spec=[])]))
fake_migration_context_obj.old_pci_requests = None

fake_db_context = {
    'created_at':
    None,
    'updated_at':
    None,
    'deleted_at':
    None,
    'deleted':
    0,
    'instance_uuid':