Beispiel #1
0
class FixedIPList(obj_base.ObjectListBase, obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_network()
    # Version 1.2: FixedIP <= version 1.2
    # Version 1.3: FixedIP <= version 1.3
    # Version 1.4: FixedIP <= version 1.4
    # Version 1.5: FixedIP <= version 1.5, added expected attrs to gets
    # Version 1.6: FixedIP <= version 1.6
    # Version 1.7: FixedIP <= version 1.7
    # Version 1.8: FixedIP <= version 1.8
    # Version 1.9: FixedIP <= version 1.9
    # Version 1.10: FixedIP <= version 1.10
    # Version 1.11: FixedIP <= version 1.11
    # Version 1.12: FixedIP <= version 1.12
    # Version 1.13: FixedIP <= version 1.13
    VERSION = '1.13'

    fields = {
        'objects': fields.ListOfObjectsField('FixedIP'),
    }
    obj_relationships = {
        'objects': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.2'),
                    ('1.3', '1.3'), ('1.4', '1.4'), ('1.5', '1.5'),
                    ('1.6', '1.6'), ('1.7', '1.7'), ('1.8', '1.8'),
                    ('1.9', '1.9'), ('1.10', '1.10'), ('1.11', '1.11'),
                    ('1.12', '1.12'), ('1.13', '1.13')],
    }

    @obj_base.remotable_classmethod
    def get_all(cls, context):
        db_fixedips = db.fixed_ip_get_all(context)
        return obj_base.obj_make_list(context, cls(context), objects.FixedIP,
                                      db_fixedips)

    @obj_base.remotable_classmethod
    def get_by_instance_uuid(cls, context, instance_uuid):
        expected_attrs = ['network', 'virtual_interface', 'floating_ips']
        db_fixedips = db.fixed_ip_get_by_instance(context, instance_uuid)
        return obj_base.obj_make_list(context,
                                      cls(context),
                                      objects.FixedIP,
                                      db_fixedips,
                                      expected_attrs=expected_attrs)

    @obj_base.remotable_classmethod
    def get_by_host(cls, context, host):
        db_fixedips = db.fixed_ip_get_by_host(context, host)
        return obj_base.obj_make_list(context, cls(context), objects.FixedIP,
                                      db_fixedips)

    @obj_base.remotable_classmethod
    def get_by_virtual_interface_id(cls, context, vif_id):
        expected_attrs = ['network', 'floating_ips']
        db_fixedips = db.fixed_ips_by_virtual_interface(context, vif_id)
        return obj_base.obj_make_list(context,
                                      cls(context),
                                      objects.FixedIP,
                                      db_fixedips,
                                      expected_attrs=expected_attrs)

    @obj_base.remotable_classmethod
    def get_by_network(cls, context, network, host=None):
        ipinfo = db.network_get_associated_fixed_ips(context,
                                                     network['id'],
                                                     host=host)
        if not ipinfo:
            return []

        fips = cls(context=context, objects=[])

        for info in ipinfo:
            inst = objects.Instance(context=context,
                                    uuid=info['instance_uuid'],
                                    hostname=info['instance_hostname'],
                                    created_at=info['instance_created'],
                                    updated_at=info['instance_updated'])
            vif = objects.VirtualInterface(context=context,
                                           id=info['vif_id'],
                                           address=info['vif_address'])
            fip = objects.FixedIP(context=context,
                                  address=info['address'],
                                  instance_uuid=info['instance_uuid'],
                                  network_id=info['network_id'],
                                  virtual_interface_id=info['vif_id'],
                                  allocated=info['allocated'],
                                  leased=info['leased'],
                                  default_route=info['default_route'],
                                  instance=inst,
                                  virtual_interface=vif)
            fips.objects.append(fip)
        fips.obj_reset_changes()
        return fips

    @obj_base.remotable_classmethod
    def bulk_create(self, context, fixed_ips):
        ips = []
        for fixedip in fixed_ips:
            ip = obj_base.obj_to_primitive(fixedip)
            if 'id' in ip:
                raise exception.ObjectActionError(action='create',
                                                  reason='already created')
            ips.append(ip)
        db.fixed_ip_bulk_create(context, ips)
Beispiel #2
0
class ServiceList(base.ObjectListBase, base.NovaObject):
    # Version 1.0: Initial version
    #              Service <= version 1.2
    # Version 1.1  Service version 1.3
    # Version 1.2: Service version 1.4
    # Version 1.3: Service version 1.5
    # Version 1.4: Service version 1.6
    # Version 1.5: Service version 1.7
    # Version 1.6: Service version 1.8
    # Version 1.7: Service version 1.9
    # Version 1.8: Service version 1.10
    # Version 1.9: Added get_by_binary() and Service version 1.11
    # Version 1.10: Service version 1.12
    # Version 1.11: Service version 1.13
    # Version 1.12: Service version 1.14
    # Version 1.13: Service version 1.15
    # Version 1.14: Service version 1.16
    # Version 1.15: Service version 1.17
    # Version 1.16: Service version 1.18
    # Version 1.17: Service version 1.19
    # Version 1.18: Added include_disabled parameter to get_by_binary()
    # Version 1.19: Added get_all_computes_by_hv_type()
    VERSION = '1.19'

    fields = {
        'objects': fields.ListOfObjectsField('Service'),
    }

    @base.remotable_classmethod
    def get_by_topic(cls, context, topic):
        db_services = db.service_get_all_by_topic(context, topic)
        return base.obj_make_list(context, cls(context), objects.Service,
                                  db_services)

    # NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
    # will be removed so both enabled and disabled hosts are returned
    @base.remotable_classmethod
    def get_by_binary(cls, context, binary, include_disabled=False):
        db_services = db.service_get_all_by_binary(
            context, binary, include_disabled=include_disabled)
        return base.obj_make_list(context, cls(context), objects.Service,
                                  db_services)

    @base.remotable_classmethod
    def get_by_host(cls, context, host):
        db_services = db.service_get_all_by_host(context, host)
        return base.obj_make_list(context, cls(context), objects.Service,
                                  db_services)

    @base.remotable_classmethod
    def get_all(cls, context, disabled=None, set_zones=False):
        db_services = db.service_get_all(context, disabled=disabled)
        if set_zones:
            db_services = availability_zones.set_availability_zones(
                context, db_services)
        return base.obj_make_list(context, cls(context), objects.Service,
                                  db_services)

    @base.remotable_classmethod
    def get_all_computes_by_hv_type(cls, context, hv_type):
        db_services = db.service_get_all_computes_by_hv_type(
            context, hv_type, include_disabled=False)
        return base.obj_make_list(context, cls(context), objects.Service,
                                  db_services)
Beispiel #3
0
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added get_by_service_id()
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: Added stats field
    # Version 1.4: Added host ip field
    # Version 1.5: Added numa_topology field
    # Version 1.6: Added supported_hv_specs
    # Version 1.7: Added host field
    # Version 1.8: Added get_by_host_and_nodename()
    # Version 1.9: Added pci_device_pools
    # Version 1.10: Added get_first_node_by_host_for_old_compat()
    # Version 1.11: PciDevicePoolList version 1.1
    # Version 1.12: HVSpec version 1.1
    # Version 1.13: Changed service_id field to be nullable
    # Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
    # Version 1.15: Added uuid
    # Version 1.16: Added disk_allocation_ratio
    VERSION = '1.16'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'uuid': fields.UUIDField(read_only=True),
        'service_id': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'vcpus': fields.IntegerField(),
        'memory_mb': fields.IntegerField(),
        'local_gb': fields.IntegerField(),
        'vcpus_used': fields.IntegerField(),
        'memory_mb_used': fields.IntegerField(),
        'local_gb_used': fields.IntegerField(),
        'hypervisor_type': fields.StringField(),
        'hypervisor_version': fields.IntegerField(),
        'hypervisor_hostname': fields.StringField(nullable=True),
        'free_ram_mb': fields.IntegerField(nullable=True),
        'free_disk_gb': fields.IntegerField(nullable=True),
        'current_workload': fields.IntegerField(nullable=True),
        'running_vms': fields.IntegerField(nullable=True),
        # TODO(melwitt): cpu_info is non-nullable in the schema but we must
        # wait until version 2.0 of ComputeNode to change it to non-nullable
        'cpu_info': fields.StringField(nullable=True),
        'disk_available_least': fields.IntegerField(nullable=True),
        'metrics': fields.StringField(nullable=True),
        'stats': fields.DictOfNullableStringsField(nullable=True),
        'host_ip': fields.IPAddressField(nullable=True),
        # TODO(rlrossit): because of history, numa_topology is held here as a
        # StringField, not a NUMATopology object. In version 2 of ComputeNode
        # this will be converted over to a fields.ObjectField('NUMATopology')
        'numa_topology': fields.StringField(nullable=True),
        # NOTE(pmurray): the supported_hv_specs field maps to the
        # supported_instances field in the database
        'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
        # NOTE(pmurray): the pci_device_pools field maps to the
        # pci_stats field in the database
        'pci_device_pools': fields.ObjectField('PciDevicePoolList',
                                               nullable=True),
        'cpu_allocation_ratio': fields.FloatField(),
        'ram_allocation_ratio': fields.FloatField(),
        'disk_allocation_ratio': fields.FloatField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(ComputeNode, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 16):
            if 'disk_allocation_ratio' in primitive:
                del primitive['disk_allocation_ratio']
        if target_version < (1, 15):
            if 'uuid' in primitive:
                del primitive['uuid']
        if target_version < (1, 14):
            if 'ram_allocation_ratio' in primitive:
                del primitive['ram_allocation_ratio']
            if 'cpu_allocation_ratio' in primitive:
                del primitive['cpu_allocation_ratio']
        if target_version < (1, 13) and primitive.get('service_id') is None:
            # service_id is non-nullable in versions before 1.13
            try:
                service = objects.Service.get_by_compute_host(
                    self._context, primitive['host'])
                primitive['service_id'] = service.id
            except (exception.ComputeHostNotFound, KeyError):
                # NOTE(hanlind): In case anything goes wrong like service not
                # found or host not being set, catch and set a fake value just
                # to allow for older versions that demand a value to work.
                # Setting to -1 will, if value is later used result in a
                # ServiceNotFound, so should be safe.
                primitive['service_id'] = -1
        if target_version < (1, 7) and 'host' in primitive:
            del primitive['host']
        if target_version < (1, 5) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 4) and 'host_ip' in primitive:
            del primitive['host_ip']
        if target_version < (1, 3) and 'stats' in primitive:
            # pre 1.3 version does not have a stats field
            del primitive['stats']

    @staticmethod
    def _host_from_db_object(compute, db_compute):
        if (('host' not in db_compute or db_compute['host'] is None)
                and 'service_id' in db_compute
                and db_compute['service_id'] is not None):
            # FIXME(sbauza) : Unconverted compute record, provide compatibility
            # This has to stay until we can be sure that any/all compute nodes
            # in the database have been converted to use the host field

            # Service field of ComputeNode could be deprecated in a next patch,
            # so let's use directly the Service object
            try:
                service = objects.Service.get_by_id(compute._context,
                                                    db_compute['service_id'])
            except exception.ServiceNotFound:
                compute.host = None
                return
            try:
                compute.host = service.host
            except (AttributeError, exception.OrphanedObjectError):
                # Host can be nullable in Service
                compute.host = None
        elif 'host' in db_compute and db_compute['host'] is not None:
            # New-style DB having host as a field
            compute.host = db_compute['host']
        else:
            # We assume it should not happen but in case, let's set it to None
            compute.host = None

    @staticmethod
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            'local_gb',
            'memory_mb',
            'vcpus',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Newton) where the opt default values will be
            # restored for both cpu (16.0), ram (1.5) and disk (1.0)
            # allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Liberty computes
            if (key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio'
                    or key == 'disk_allocation_ratio'):
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
                    if value == 0.0 and key == 'disk_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.0
            setattr(compute, key, value)

        for key in ('vcpus', 'local_gb', 'memory_mb'):
            inv_key = 'inv_%s' % key
            if inv_key in db_compute and db_compute[inv_key] is not None:
                setattr(compute, key, db_compute[inv_key])
            else:
                setattr(compute, key, db_compute[key])

        stats = db_compute['stats']
        if stats:
            compute.stats = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute.supported_hv_specs = hv_specs

        pci_stats = db_compute.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute.pci_device_pools = pci_stats
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()

        return compute

    @base.remotable_classmethod
    def get_by_id(cls, context, compute_id):
        db_compute = db.compute_node_get(context, compute_id)
        return cls._from_db_object(context, cls(), db_compute)

    # NOTE(hanlind): This is deprecated and should be removed on the next
    # major version bump
    @base.remotable_classmethod
    def get_by_service_id(cls, context, service_id):
        db_computes = db.compute_nodes_get_by_service_id(context, service_id)
        # NOTE(sbauza): Old version was returning an item, we need to keep this
        # behaviour for backwards compatibility
        db_compute = db_computes[0]
        return cls._from_db_object(context, cls(), db_compute)

    @base.remotable_classmethod
    def get_by_host_and_nodename(cls, context, host, nodename):
        db_compute = db.compute_node_get_by_host_and_nodename(
            context, host, nodename)
        return cls._from_db_object(context, cls(), db_compute)

    # TODO(pkholkin): Remove this method in the next major version bump
    @base.remotable_classmethod
    def get_first_node_by_host_for_old_compat(cls,
                                              context,
                                              host,
                                              use_slave=False):
        computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
        # FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
        # nodes per host, we should return all the nodes and modify the callers
        # instead.
        # Arbitrarily returning the first node.
        return computes[0]

    @staticmethod
    def _convert_stats_to_db_format(updates):
        stats = updates.pop('stats', None)
        if stats is not None:
            updates['stats'] = jsonutils.dumps(stats)

    @staticmethod
    def _convert_host_ip_to_db_format(updates):
        host_ip = updates.pop('host_ip', None)
        if host_ip:
            updates['host_ip'] = str(host_ip)

    @staticmethod
    def _convert_supported_instances_to_db_format(updates):
        hv_specs = updates.pop('supported_hv_specs', None)
        if hv_specs is not None:
            hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
            updates['supported_instances'] = jsonutils.dumps(hv_specs)

    @staticmethod
    def _convert_pci_stats_to_db_format(updates):
        if 'pci_device_pools' in updates:
            pools = updates.pop('pci_device_pools')
            if pools is not None:
                pools = jsonutils.dumps(pools.obj_to_primitive())
            updates['pci_stats'] = pools

    def _should_manage_inventory(self):
        related_binaries = ['nova-api', 'nova-conductor', 'nova-scheduler']
        required_version = 10
        min_ver = objects.Service.get_minimum_version_multi(
            self._context, related_binaries)
        return min_ver >= required_version

    def _update_inventory(self, updates):
        """Update inventory records from legacy model values

        :param updates: Legacy model update dict which will be modified when
                        we return
        """
        # NOTE(danms): Here we update our inventory records with our
        # resource information. Since this information is prepared in
        # updates against our older compute_node columns, we need to
        # zero those values after we have updated the inventory
        # records so that it is clear that they have been migrated.
        # We return True or False here based on whether we found
        # inventory records to update. If not, then we need to signal
        # to our caller that _create_inventory() needs to be called
        # instead

        inventory_list = \
            objects.InventoryList.get_all_by_resource_provider_uuid(
                self._context, self.uuid)
        if not inventory_list:
            return False

        for inventory in inventory_list:
            if inventory.resource_class == fields.ResourceClass.VCPU:
                key = 'vcpus'
            elif inventory.resource_class == fields.ResourceClass.MEMORY_MB:
                key = 'memory_mb'
            elif inventory.resource_class == fields.ResourceClass.DISK_GB:
                key = 'local_gb'
            else:
                LOG.warning(_LW('Unknown inventory class %s for compute node'),
                            inventory.resource_class)
                continue

            if key in updates:
                inventory.total = getattr(self, key)
                updates[key] = 0

                inventory.save()
        return True

    def _create_inventory(self, updates):
        """Create the initial inventory objects for this compute node.

        This is only ever called once, either for the first time when a compute
        is created, or after an upgrade where the required services have
        reached the required version.

        :param updates: Legacy model update dict which will be modified when
                        we return
        """
        rp = objects.ResourceProvider(context=self._context, uuid=self.uuid)
        rp.create()

        # NOTE(danms): Until we remove the columns from compute_nodes,
        # we need to constantly zero out each value in our updates to
        # signal that we wrote the value into inventory instead.

        cpu = objects.Inventory(context=self._context,
                                resource_provider=rp,
                                resource_class=fields.ResourceClass.VCPU,
                                total=self.vcpus,
                                reserved=0,
                                min_unit=1,
                                max_unit=1,
                                step_size=1,
                                allocation_ratio=self.cpu_allocation_ratio)
        cpu.create()
        updates['vcpus'] = 0

        mem = objects.Inventory(context=self._context,
                                resource_provider=rp,
                                resource_class=fields.ResourceClass.MEMORY_MB,
                                total=self.memory_mb,
                                reserved=0,
                                min_unit=1,
                                max_unit=1,
                                step_size=1,
                                allocation_ratio=self.ram_allocation_ratio)
        mem.create()
        updates['memory_mb'] = 0

        # FIXME(danms): Eventually we want to not write this record
        # if the compute host is on shared storage. We'll need some
        # indication from it to that effect, so for now we always
        # write it so that we can make all the usual machinery depend
        # on these records instead of the legacy columns.
        disk = objects.Inventory(context=self._context,
                                 resource_provider=rp,
                                 resource_class=fields.ResourceClass.DISK_GB,
                                 total=self.local_gb,
                                 reserved=0,
                                 min_unit=1,
                                 max_unit=1,
                                 step_size=1,
                                 allocation_ratio=self.disk_allocation_ratio)
        disk.create()
        updates['local_gb'] = 0

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']

        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        if self._should_manage_inventory():
            self._create_inventory(updates)

        db_compute = db.compute_node_create(self._context, updates)
        # NOTE(danms): compute_node_create() operates on (and returns) the
        # compute node model only. We need to get the full inventory-based
        # result in order to satisfy _from_db_object(). So, we do a double
        # query here. This can be removed in Newton once we're sure that all
        # compute nodes are inventory-based
        db_compute = db.compute_node_get(self._context, db_compute['id'])
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def save(self, prune_stats=False):
        # NOTE(belliott) ignore prune_stats param, no longer relevant

        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._convert_stats_to_db_format(updates)
        self._convert_host_ip_to_db_format(updates)
        self._convert_supported_instances_to_db_format(updates)
        self._convert_pci_stats_to_db_format(updates)

        if self._should_manage_inventory():
            if not self._update_inventory(updates):
                # NOTE(danms): This only happens once
                self._create_inventory(updates)
        db_compute = db.compute_node_update(self._context, self.id, updates)
        # NOTE(danms): compute_node_update() operates on (and returns) the
        # compute node model only. We need to get the full inventory-based
        # result in order to satisfy _from_db_object(). So, we do a double
        # query here. This can be removed in Newton once we're sure that all
        # compute nodes are inventory-based
        db_compute = db.compute_node_get(self._context, self.id)
        self._from_db_object(self._context, self, db_compute)

    @base.remotable
    def destroy(self):
        db.compute_node_delete(self._context, self.id)

    def update_from_virt_driver(self, resources):
        # NOTE(pmurray): the virt driver provides a dict of values that
        # can be copied into the compute node. The names and representation
        # do not exactly match.
        # TODO(pmurray): the resources dict should be formalized.
        keys = [
            "vcpus", "memory_mb", "local_gb", "cpu_info", "vcpus_used",
            "memory_mb_used", "local_gb_used", "numa_topology",
            "hypervisor_type", "hypervisor_version", "hypervisor_hostname",
            "disk_available_least", "host_ip"
        ]
        for key in keys:
            if key in resources:
                setattr(self, key, resources[key])

        # supported_instances has a different name in compute_node
        if 'supported_instances' in resources:
            si = resources['supported_instances']
            self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si]
Beispiel #4
0
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
    # Version 1.0: Initial version
    #              ComputeNode <= version 1.2
    # Version 1.1 ComputeNode version 1.3
    # Version 1.2 Add get_by_service()
    # Version 1.3 ComputeNode version 1.4
    # Version 1.4 ComputeNode version 1.5
    # Version 1.5 Add use_slave to get_by_service
    # Version 1.6 ComputeNode version 1.6
    # Version 1.7 ComputeNode version 1.7
    # Version 1.8 ComputeNode version 1.8 + add get_all_by_host()
    # Version 1.9 ComputeNode version 1.9
    # Version 1.10 ComputeNode version 1.10
    # Version 1.11 ComputeNode version 1.11
    # Version 1.12 ComputeNode version 1.12
    # Version 1.13 ComputeNode version 1.13
    # Version 1.14 ComputeNode version 1.14
    VERSION = '1.14'
    fields = {
        'objects': fields.ListOfObjectsField('ComputeNode'),
    }

    @base.remotable_classmethod
    def get_all(cls, context):
        db_computes = db.compute_node_get_all(context)
        return base.obj_make_list(context, cls(context), objects.ComputeNode,
                                  db_computes)

    @base.remotable_classmethod
    def get_by_hypervisor(cls, context, hypervisor_match):
        db_computes = db.compute_node_search_by_hypervisor(
            context, hypervisor_match)
        return base.obj_make_list(context, cls(context), objects.ComputeNode,
                                  db_computes)

    # NOTE(hanlind): This is deprecated and should be removed on the next
    # major version bump
    @base.remotable_classmethod
    def _get_by_service(cls, context, service_id, use_slave=False):
        try:
            db_computes = db.compute_nodes_get_by_service_id(
                context, service_id)
        except exception.ServiceNotFound:
            # NOTE(sbauza): Previous behaviour was returning an empty list
            # if the service was created with no computes, we need to keep it.
            db_computes = []
        return base.obj_make_list(context, cls(context), objects.ComputeNode,
                                  db_computes)

    @staticmethod
    @db.select_db_reader_mode
    def _db_compute_node_get_all_by_host(context, host, use_slave=False):
        return db.compute_node_get_all_by_host(context, host)

    @base.remotable_classmethod
    def get_all_by_host(cls, context, host, use_slave=False):
        db_computes = cls._db_compute_node_get_all_by_host(context,
                                                           host,
                                                           use_slave=use_slave)
        return base.obj_make_list(context, cls(context), objects.ComputeNode,
                                  db_computes)
Beispiel #5
0
class InstanceNUMATopology(base.NovaObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        # NOTE(danms): The 'id' field is no longer used and should be
        # removed in the future when convenient
        'id': fields.IntegerField(),
        'instance_uuid': fields.UUIDField(),
        'cells': fields.ListOfObjectsField('InstanceNUMACell'),
    }

    @classmethod
    def obj_from_topology(cls, topology):
        if not isinstance(topology, hardware.VirtNUMAInstanceTopology):
            raise exception.ObjectActionError(action='obj_from_topology',
                                              reason='invalid topology class')
        if topology:
            cells = []
            for topocell in topology.cells:
                cell = InstanceNUMACell(id=topocell.id,
                                        cpuset=topocell.cpuset,
                                        memory=topocell.memory)
                cells.append(cell)
            return cls(cells=cells)

    def topology_from_obj(self):
        cells = []
        for objcell in self.cells:
            cell = hardware.VirtNUMATopologyCell(objcell.id, objcell.cpuset,
                                                 objcell.memory)
            cells.append(cell)
        return hardware.VirtNUMAInstanceTopology(cells=cells)

    @base.remotable
    def create(self, context):
        topology = self.topology_from_obj()
        if not topology:
            return
        values = {'numa_topology': topology.to_json()}
        db.instance_extra_update_by_uuid(context, self.instance_uuid, values)
        self.obj_reset_changes()

    @base.remotable_classmethod
    def get_by_instance_uuid(cls, context, instance_uuid):
        db_topology = db.instance_extra_get_by_instance_uuid(
            context, instance_uuid, columns=['numa_topology'])
        if not db_topology:
            raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)

        if db_topology['numa_topology'] is None:
            return None

        topo = hardware.VirtNUMAInstanceTopology.from_json(
            db_topology['numa_topology'])
        obj_topology = cls.obj_from_topology(topo)
        obj_topology.id = db_topology['id']
        obj_topology.instance_uuid = db_topology['instance_uuid']
        # NOTE (ndipanov) not really needed as we never save, but left for
        # consistency
        obj_topology.obj_reset_changes()
        return obj_topology
Beispiel #6
0
class NUMACell(base.NovaObject,
               base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added pinned_cpus and siblings fields
    # Version 1.2: Added mempages field
    VERSION = '1.2'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'cpuset': fields.SetOfIntegersField(),
        'memory': fields.IntegerField(),
        'cpu_usage': fields.IntegerField(default=0),
        'memory_usage': fields.IntegerField(default=0),
        'pinned_cpus': fields.SetOfIntegersField(),
        'siblings': fields.ListOfSetsOfIntegersField(),
        'mempages': fields.ListOfObjectsField('NUMAPagesTopology'),
        }

    obj_relationships = {
        'NUMAPagesTopology': [('1.2', '1.0')]
    }

    def __init__(self, **kwargs):
        super(NUMACell, self).__init__(**kwargs)
        if 'pinned_cpus' not in kwargs:
            self.pinned_cpus = set()
        if 'siblings' not in kwargs:
            self.siblings = []

    @property
    def free_cpus(self):
        return self.cpuset - self.pinned_cpus or set()

    @property
    def free_siblings(self):
        return [sibling_set & self.free_cpus
                for sibling_set in self.siblings]

    @property
    def avail_cpus(self):
        return len(self.free_cpus)

    @property
    def avail_memory(self):
        return self.memory - self.memory_usage

    def pin_cpus(self, cpus):
        if self.pinned_cpus & cpus:
            raise exception.CPUPinningInvalid(requested=list(cpus),
                                              pinned=list(self.pinned_cpus))
        self.pinned_cpus |= cpus

    def unpin_cpus(self, cpus):
        if (self.pinned_cpus & cpus) != cpus:
            raise exception.CPUPinningInvalid(requested=list(cpus),
                                              pinned=list(self.pinned_cpus))
        self.pinned_cpus -= cpus

    def _to_dict(self):
        return {
            'id': self.id,
            'cpus': hardware.format_cpu_spec(
                self.cpuset, allow_ranges=False),
            'mem': {
                'total': self.memory,
                'used': self.memory_usage},
            'cpu_usage': self.cpu_usage}

    @classmethod
    def _from_dict(cls, data_dict):
        cpuset = hardware.parse_cpu_spec(
            data_dict.get('cpus', ''))
        cpu_usage = data_dict.get('cpu_usage', 0)
        memory = data_dict.get('mem', {}).get('total', 0)
        memory_usage = data_dict.get('mem', {}).get('used', 0)
        cell_id = data_dict.get('id')
        return cls(id=cell_id, cpuset=cpuset, memory=memory,
                   cpu_usage=cpu_usage, memory_usage=memory_usage,
                   mempages=[])

    def can_fit_hugepages(self, pagesize, memory):
        """Returns whether memory can fit into hugepages size

        :param pagesize: a page size in KibB
        :param memory: a memory size asked to fit in KiB

        :returns: whether memory can fit in hugepages
        :raises: MemoryPageSizeNotSupported if page size not supported
        """
        for pages in self.mempages:
            if pages.size_kb == pagesize:
                return (memory <= pages.free_kb and
                        (memory % pages.size_kb) == 0)
        raise exception.MemoryPageSizeNotSupported(pagesize=pagesize)
Beispiel #7
0
 class Foo(base.ObjectListBase, base.NovaObject):
     fields = {'objects': fields.ListOfObjectsField('Bar')}
Beispiel #8
0
 class MyList(base.ObjectListBase, base.NovaObject):
     fields = {'objects': fields.ListOfObjectsField('MyObj')}
Beispiel #9
0
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: BlockDeviceMapping <= version 1.1
    # Version 1.2: Added use_slave to get_by_instance_uuid
    # Version 1.3: BlockDeviceMapping <= version 1.2
    # Version 1.4: BlockDeviceMapping <= version 1.3
    # Version 1.5: BlockDeviceMapping <= version 1.4
    # Version 1.6: BlockDeviceMapping <= version 1.5
    # Version 1.7: BlockDeviceMapping <= version 1.6
    # Version 1.8: BlockDeviceMapping <= version 1.7
    # Version 1.9: BlockDeviceMapping <= version 1.8
    # Version 1.10: BlockDeviceMapping <= version 1.9
    # Version 1.11: BlockDeviceMapping <= version 1.10
    # Version 1.12: BlockDeviceMapping <= version 1.11
    # Version 1.13: BlockDeviceMapping <= version 1.12
    # Version 1.14: BlockDeviceMapping <= version 1.13
    # Version 1.15: BlockDeviceMapping <= version 1.14
    # Version 1.16: BlockDeviceMapping <= version 1.15
    # Version 1.17: Add get_by_instance_uuids()
    VERSION = '1.17'

    fields = {
        'objects': fields.ListOfObjectsField('BlockDeviceMapping'),
    }

    @property
    def instance_uuids(self):
        return set(
            bdm.instance_uuid for bdm in self
            if bdm.obj_attr_is_set('instance_uuid')
        )

    @classmethod
    def bdms_by_instance_uuid(cls, context, instance_uuids):
        bdms = cls.get_by_instance_uuids(context, instance_uuids)
        return base.obj_make_dict_of_lists(
                context, cls, bdms, 'instance_uuid')

    @staticmethod
    @db.select_db_reader_mode
    def _db_block_device_mapping_get_all_by_instance_uuids(
            context, instance_uuids, use_slave=False):
        return db.block_device_mapping_get_all_by_instance_uuids(
                context, instance_uuids)

    @base.remotable_classmethod
    def get_by_instance_uuids(cls, context, instance_uuids, use_slave=False):
        db_bdms = cls._db_block_device_mapping_get_all_by_instance_uuids(
            context, instance_uuids, use_slave=use_slave)
        return base.obj_make_list(
                context, cls(), objects.BlockDeviceMapping, db_bdms or [])

    @staticmethod
    @db.select_db_reader_mode
    def _db_block_device_mapping_get_all_by_instance(
            context, instance_uuid, use_slave=False):
        return db.block_device_mapping_get_all_by_instance(
            context, instance_uuid)

    @base.remotable_classmethod
    def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
        db_bdms = cls._db_block_device_mapping_get_all_by_instance(
            context, instance_uuid, use_slave=use_slave)
        return base.obj_make_list(
                context, cls(), objects.BlockDeviceMapping, db_bdms or [])

    def root_bdm(self):
        """It only makes sense to call this method when the
        BlockDeviceMappingList contains BlockDeviceMappings from
        exactly one instance rather than BlockDeviceMappings from
        multiple instances.

        For example, you should not call this method from a
        BlockDeviceMappingList created by get_by_instance_uuids(),
        but you may call this method from a BlockDeviceMappingList
        created by get_by_instance_uuid().
        """

        if len(self.instance_uuids) > 1:
            raise exception.UndefinedRootBDM()
        try:
            return next(bdm_obj for bdm_obj in self if bdm_obj.is_root)
        except StopIteration:
            return