コード例 #1
0
ファイル: test_fields.py プロジェクト: haris00/nova
 def setUp(self):
     super(TestDictOfIntegers, self).setUp()
     self.field = fields.DictOfIntegersField()
     self.coerce_good_values = [({
         'foo': '42'
     }, {
         'foo': 42
     }), ({
         'foo': 4.2
     }, {
         'foo': 4
     })]
     self.coerce_bad_values = [{
         1: 'bar'
     }, {
         'foo': 'boo'
     }, 'foo', {
         'foo': None
     }]
     self.to_primitive_values = [({'foo': 42}, {'foo': 42})]
     self.from_primitive_values = [({'foo': 42}, {'foo': 42})]
コード例 #2
0
ファイル: request_spec.py プロジェクト: youqiang95/nova
class RequestGroup(base.NovaObject):
    """Versioned object based on the unversioned
    nova.api.openstack.placement.lib.RequestGroup object.
    """
    VERSION = '1.0'

    fields = {
        'use_same_provider': fields.BooleanField(default=True),
        'resources': fields.DictOfIntegersField(default={}),
        'required_traits': fields.SetOfStringsField(default=set()),
        'forbidden_traits': fields.SetOfStringsField(default=set()),
        # The aggregates field has a form of
        #     [[aggregate_UUID1],
        #      [aggregate_UUID2, aggregate_UUID3]]
        # meaning that the request should be fulfilled from an RP that is a
        # member of the aggregate aggregate_UUID1 and member of the aggregate
        # aggregate_UUID2 or aggregate_UUID3 .
        'aggregates': fields.ListOfListsOfStringsField(default=[]),
    }

    def __init__(self, context=None, **kwargs):
        super(RequestGroup, self).__init__(context=context, **kwargs)
        self.obj_set_defaults()
コード例 #3
0
class InstanceNUMACell(base.NovaObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add pagesize field
    # Version 1.2: Add cpu_pinning_raw and topology fields
    # Version 1.3: Add cpu_policy and cpu_thread_policy fields
    # Version 1.4: Add cpuset_reserved field
    VERSION = '1.4'

    def obj_make_compatible(self, primitive, target_version):
        super(InstanceNUMACell,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4):
            primitive.pop('cpuset_reserved', None)

        if target_version < (1, 3):
            primitive.pop('cpu_policy', None)
            primitive.pop('cpu_thread_policy', None)

    fields = {
        'id':
        obj_fields.IntegerField(),
        'cpuset':
        obj_fields.SetOfIntegersField(),
        'memory':
        obj_fields.IntegerField(),
        'pagesize':
        obj_fields.IntegerField(nullable=True),
        'cpu_topology':
        obj_fields.ObjectField('VirtCPUTopology', nullable=True),
        'cpu_pinning_raw':
        obj_fields.DictOfIntegersField(nullable=True),
        'cpu_policy':
        obj_fields.CPUAllocationPolicyField(nullable=True),
        'cpu_thread_policy':
        obj_fields.CPUThreadAllocationPolicyField(nullable=True),
        # These physical CPUs are reserved for use by the hypervisor
        'cpuset_reserved':
        obj_fields.SetOfIntegersField(nullable=True),
    }

    cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')

    def __init__(self, **kwargs):
        super(InstanceNUMACell, self).__init__(**kwargs)
        if 'pagesize' not in kwargs:
            self.pagesize = None
            self.obj_reset_changes(['pagesize'])
        if 'cpu_topology' not in kwargs:
            self.cpu_topology = None
            self.obj_reset_changes(['cpu_topology'])
        if 'cpu_pinning' not in kwargs:
            self.cpu_pinning = None
            self.obj_reset_changes(['cpu_pinning_raw'])
        if 'cpu_policy' not in kwargs:
            self.cpu_policy = None
            self.obj_reset_changes(['cpu_policy'])
        if 'cpu_thread_policy' not in kwargs:
            self.cpu_thread_policy = None
            self.obj_reset_changes(['cpu_thread_policy'])
        if 'cpuset_reserved' not in kwargs:
            self.cpuset_reserved = None
            self.obj_reset_changes(['cpuset_reserved'])

    def __len__(self):
        return len(self.cpuset)

    def _to_dict(self):
        # NOTE(sahid): Used as legacy, could be renamed in
        # _legacy_to_dict_ to the future to avoid confusing.
        return {
            'cpus': hardware.format_cpu_spec(self.cpuset, allow_ranges=False),
            'mem': {
                'total': self.memory
            },
            'id': self.id,
            'pagesize': self.pagesize
        }

    @classmethod
    def _from_dict(cls, data_dict):
        # NOTE(sahid): Used as legacy, could be renamed in
        # _legacy_from_dict_ to the future to avoid confusing.
        cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
        memory = data_dict.get('mem', {}).get('total', 0)
        cell_id = data_dict.get('id')
        pagesize = data_dict.get('pagesize')
        return cls(id=cell_id, cpuset=cpuset, memory=memory, pagesize=pagesize)

    @property
    def siblings(self):
        cpu_list = sorted(list(self.cpuset))

        threads = 0
        if self.cpu_topology:
            threads = self.cpu_topology.threads
        if threads == 1:
            threads = 0

        return list(map(set, zip(*[iter(cpu_list)] * threads)))

    @property
    def cpu_pinning_requested(self):
        return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED

    def pin(self, vcpu, pcpu):
        if vcpu not in self.cpuset:
            return
        pinning_dict = self.cpu_pinning or {}
        pinning_dict[vcpu] = pcpu
        self.cpu_pinning = pinning_dict

    def pin_vcpus(self, *cpu_pairs):
        for vcpu, pcpu in cpu_pairs:
            self.pin(vcpu, pcpu)

    def clear_host_pinning(self):
        """Clear any data related to how this cell is pinned to the host.

        Needed for aborting claims as we do not want to keep stale data around.
        """
        self.id = -1
        self.cpu_pinning = {}
        return self
コード例 #4
0
class RequestGroup(base.NovaObject):
    """Versioned object based on the unversioned
    nova.api.openstack.placement.lib.RequestGroup object.
    """
    # Version 1.0: Initial version
    # Version 1.1: add requester_id and provider_uuids fields
    VERSION = '1.1'

    fields = {
        'use_same_provider': fields.BooleanField(default=True),
        'resources': fields.DictOfIntegersField(default={}),
        'required_traits': fields.SetOfStringsField(default=set()),
        'forbidden_traits': fields.SetOfStringsField(default=set()),
        # The aggregates field has a form of
        #     [[aggregate_UUID1],
        #      [aggregate_UUID2, aggregate_UUID3]]
        # meaning that the request should be fulfilled from an RP that is a
        # member of the aggregate aggregate_UUID1 and member of the aggregate
        # aggregate_UUID2 or aggregate_UUID3 .
        'aggregates': fields.ListOfListsOfStringsField(default=[]),
        # The entity the request is coming from (e.g. the Neutron port uuid)
        # which may not always be a UUID.
        'requester_id': fields.StringField(nullable=True, default=None),
        # The resource provider UUIDs that together fulfill the request
        # NOTE(gibi): this can be more than one if this is the unnumbered
        # request group (i.e. use_same_provider=False)
        'provider_uuids': fields.ListOfUUIDField(default=[]),
    }

    def __init__(self, context=None, **kwargs):
        super(RequestGroup, self).__init__(context=context, **kwargs)
        self.obj_set_defaults()

    @classmethod
    def from_port_request(cls, context, port_uuid, port_resource_request):
        """Init the group from the resource request of a neutron port

        :param context: the request context
        :param port_uuid: the port requesting the resources
        :param port_resource_request: the resource_request attribute of the
                                      neutron port
        For example:

            port_resource_request = {
                "resources": {
                    "NET_BW_IGR_KILOBIT_PER_SEC": 1000,
                    "NET_BW_EGR_KILOBIT_PER_SEC": 1000},
                "required": ["CUSTOM_PHYSNET_2",
                             "CUSTOM_VNIC_TYPE_NORMAL"]
            }
        """

        # NOTE(gibi): Assumptions:
        # * a port requests resource from a single provider.
        # * a port only specifies resources and required traits
        # NOTE(gibi): Placement rejects allocation candidates where a request
        # group has traits but no resources specified. This is why resources
        # are handled as mandatory below but not traits.
        obj = cls(context=context,
                  use_same_provider=True,
                  resources=port_resource_request['resources'],
                  required_traits=set(port_resource_request.get(
                      'required', [])),
                  requester_id=port_uuid)
        obj.obj_set_defaults()
        return obj

    def obj_make_compatible(self, primitive, target_version):
        super(RequestGroup, self).obj_make_compatible(
            primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 1):
            if 'requester_id' in primitive:
                del primitive['requester_id']
            if 'provider_uuids' in primitive:
                del primitive['provider_uuids']
コード例 #5
0
ファイル: instance_numa.py プロジェクト: vwangyanweida/nova
class InstanceNUMACell(base.NovaEphemeralObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add pagesize field
    # Version 1.2: Add cpu_pinning_raw and topology fields
    # Version 1.3: Add cpu_policy and cpu_thread_policy fields
    # Version 1.4: Add cpuset_reserved field
    # Version 1.5: Add pcpuset field
    # Version 1.6: Add 'mixed' to cpu_policy field
    VERSION = '1.6'

    def obj_make_compatible(self, primitive, target_version):
        super(InstanceNUMACell,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        # Instance with a 'mixed' CPU policy could not provide a backward
        # compatibility.
        if target_version < (1, 6):
            if primitive['cpu_policy'] == obj_fields.CPUAllocationPolicy.MIXED:
                raise exception.ObjectActionError(
                    action='obj_make_compatible',
                    reason=_('%s policy is not supported in version %s') %
                    (primitive['cpu_policy'], target_version))

        # NOTE(huaqiang): Since version 1.5, 'cpuset' is modified to track the
        # unpinned CPUs only, with pinned CPUs tracked via 'pcpuset' instead.
        # For a backward compatibility, move the 'dedicated' instance CPU list
        # from 'pcpuset' to 'cpuset'.
        if target_version < (1, 5):
            if (primitive['cpu_policy'] ==
                    obj_fields.CPUAllocationPolicy.DEDICATED):
                primitive['cpuset'] = primitive['pcpuset']
            primitive.pop('pcpuset', None)

        if target_version < (1, 4):
            primitive.pop('cpuset_reserved', None)

        if target_version < (1, 3):
            primitive.pop('cpu_policy', None)
            primitive.pop('cpu_thread_policy', None)

    fields = {
        'id':
        obj_fields.IntegerField(),
        'cpuset':
        obj_fields.SetOfIntegersField(),
        'pcpuset':
        obj_fields.SetOfIntegersField(),
        # These physical CPUs are reserved for use by the hypervisor
        'cpuset_reserved':
        obj_fields.SetOfIntegersField(nullable=True, default=None),
        'memory':
        obj_fields.IntegerField(),
        'pagesize':
        obj_fields.IntegerField(nullable=True, default=None),
        # TODO(sean-k-mooney): This is no longer used and should be
        # removed in v2
        'cpu_topology':
        obj_fields.ObjectField('VirtCPUTopology', nullable=True),
        'cpu_pinning_raw':
        obj_fields.DictOfIntegersField(nullable=True, default=None),
        'cpu_policy':
        obj_fields.CPUAllocationPolicyField(nullable=True, default=None),
        'cpu_thread_policy':
        obj_fields.CPUThreadAllocationPolicyField(nullable=True, default=None),
    }

    cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')

    def __len__(self):
        return len(self.total_cpus)

    @property
    def total_cpus(self):
        return self.cpuset | self.pcpuset

    @property
    def siblings(self):
        cpu_list = sorted(list(self.total_cpus))

        threads = 0
        if ('cpu_topology' in self) and self.cpu_topology:
            threads = self.cpu_topology.threads
        if threads == 1:
            threads = 0

        return list(map(set, zip(*[iter(cpu_list)] * threads)))

    def pin(self, vcpu, pcpu):
        if vcpu not in self.pcpuset:
            return
        pinning_dict = self.cpu_pinning or {}
        pinning_dict[vcpu] = pcpu
        self.cpu_pinning = pinning_dict

    def pin_vcpus(self, *cpu_pairs):
        for vcpu, pcpu in cpu_pairs:
            self.pin(vcpu, pcpu)

    def clear_host_pinning(self):
        """Clear any data related to how this cell is pinned to the host.

        Needed for aborting claims as we do not want to keep stale data around.
        """
        self.id = -1
        self.cpu_pinning = {}
        return self
コード例 #6
0
ファイル: instance_numa.py プロジェクト: pharmacolog/nova
class InstanceNUMACell(base.NovaEphemeralObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add pagesize field
    # Version 1.2: Add cpu_pinning_raw and topology fields
    # Version 1.3: Add cpu_policy and cpu_thread_policy fields
    # Version 1.4: Add cpuset_reserved field
    VERSION = '1.4'

    def obj_make_compatible(self, primitive, target_version):
        super(InstanceNUMACell,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4):
            primitive.pop('cpuset_reserved', None)

        if target_version < (1, 3):
            primitive.pop('cpu_policy', None)
            primitive.pop('cpu_thread_policy', None)

    fields = {
        'id':
        obj_fields.IntegerField(),
        'cpuset':
        obj_fields.SetOfIntegersField(),
        'memory':
        obj_fields.IntegerField(),
        'pagesize':
        obj_fields.IntegerField(nullable=True, default=None),
        'cpu_topology':
        obj_fields.ObjectField('VirtCPUTopology', nullable=True),
        'cpu_pinning_raw':
        obj_fields.DictOfIntegersField(nullable=True, default=None),
        'cpu_policy':
        obj_fields.CPUAllocationPolicyField(nullable=True, default=None),
        'cpu_thread_policy':
        obj_fields.CPUThreadAllocationPolicyField(nullable=True, default=None),
        # These physical CPUs are reserved for use by the hypervisor
        'cpuset_reserved':
        obj_fields.SetOfIntegersField(nullable=True, default=None),
    }

    cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')

    def __len__(self):
        return len(self.cpuset)

    @property
    def siblings(self):
        cpu_list = sorted(list(self.cpuset))

        threads = 0
        if ('cpu_topology' in self) and self.cpu_topology:
            threads = self.cpu_topology.threads
        if threads == 1:
            threads = 0

        return list(map(set, zip(*[iter(cpu_list)] * threads)))

    def pin(self, vcpu, pcpu):
        if vcpu not in self.cpuset:
            return
        pinning_dict = self.cpu_pinning or {}
        pinning_dict[vcpu] = pcpu
        self.cpu_pinning = pinning_dict

    def pin_vcpus(self, *cpu_pairs):
        for vcpu, pcpu in cpu_pairs:
            self.pin(vcpu, pcpu)

    def clear_host_pinning(self):
        """Clear any data related to how this cell is pinned to the host.

        Needed for aborting claims as we do not want to keep stale data around.
        """
        self.id = -1
        self.cpu_pinning = {}
        return self
コード例 #7
0
class InstanceNUMACell(base.NovaObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add pagesize field
    # Version 1.2: Add cpu_pinning_raw and topology fields
    VERSION = '1.2'

    fields = {
        'id': obj_fields.IntegerField(),
        'cpuset': obj_fields.SetOfIntegersField(),
        'memory': obj_fields.IntegerField(),
        'pagesize': obj_fields.IntegerField(nullable=True),
        'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
                                               nullable=True),
        'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True)
    }

    obj_relationships = {'cpu_topology': [('1.2', '1.0')]}

    cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')

    def __init__(self, **kwargs):
        super(InstanceNUMACell, self).__init__(**kwargs)
        if 'pagesize' not in kwargs:
            self.pagesize = None
            self.obj_reset_changes(['pagesize'])
        if 'cpu_topology' not in kwargs:
            self.cpu_topology = None
            self.obj_reset_changes(['cpu_topology'])
        if 'cpu_pinning' not in kwargs:
            self.cpu_pinning = None
            self.obj_reset_changes(['cpu_pinning_raw'])

    def __len__(self):
        return len(self.cpuset)

    def _to_dict(self):
        # NOTE(sahid): Used as legacy, could be renamed in
        # _legacy_to_dict_ to the future to avoid confusing.
        return {
            'cpus': hardware.format_cpu_spec(self.cpuset, allow_ranges=False),
            'mem': {
                'total': self.memory
            },
            'id': self.id,
            'pagesize': self.pagesize
        }

    @classmethod
    def _from_dict(cls, data_dict):
        # NOTE(sahid): Used as legacy, could be renamed in
        # _legacy_from_dict_ to the future to avoid confusing.
        cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
        memory = data_dict.get('mem', {}).get('total', 0)
        cell_id = data_dict.get('id')
        pagesize = data_dict.get('pagesize')
        return cls(id=cell_id, cpuset=cpuset, memory=memory, pagesize=pagesize)

    @property
    def siblings(self):
        cpu_list = sorted(list(self.cpuset))

        threads = 0
        if self.cpu_topology:
            threads = self.cpu_topology.threads
        if threads == 1:
            threads = 0

        return list(map(set, zip(*[iter(cpu_list)] * threads)))

    @property
    def cpu_pinning_requested(self):
        return self.cpu_pinning is not None

    def pin(self, vcpu, pcpu):
        if vcpu not in self.cpuset:
            return
        pinning_dict = self.cpu_pinning or {}
        pinning_dict[vcpu] = pcpu
        self.cpu_pinning = pinning_dict

    def pin_vcpus(self, *cpu_pairs):
        for vcpu, pcpu in cpu_pairs:
            self.pin(vcpu, pcpu)
コード例 #8
0
class InstanceNUMACell(base.NovaObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add pagesize field
    # Version 1.2: Add cpu_pinning_raw and topology fields
    # Version 1.3: Add cpu_policy and cpu_thread_policy fields
    # Version 1.4: Add cpuset_reserved field
    #              WRS: Add physnode
    #              WRS: Add shared_vcpu and shared_pcpu_for_vcpu
    VERSION = '1.4'

    def obj_make_compatible(self, primitive, target_version):
        super(InstanceNUMACell,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4):
            primitive.pop('cpuset_reserved', None)

        if target_version < (1, 3):
            primitive.pop('cpu_policy', None)
            primitive.pop('cpu_thread_policy', None)
        # NOTE(jgauld): R4 to R5 upgrades, Pike upversion to 1.4. Drop L3
        #               related fields with R4/Newton.
        if target_version < (1, 4) or CONF.upgrade_levels.compute == 'newton':
            primitive.pop('l3_cpuset', None)
            primitive.pop('l3_both_size', None)
            primitive.pop('l3_code_size', None)
            primitive.pop('l3_data_size', None)

    fields = {
        'id':
        obj_fields.IntegerField(),
        'cpuset':
        obj_fields.SetOfIntegersField(),
        'memory':
        obj_fields.IntegerField(),
        'physnode':
        obj_fields.IntegerField(nullable=True),
        'pagesize':
        obj_fields.IntegerField(nullable=True),
        'cpu_topology':
        obj_fields.ObjectField('VirtCPUTopology', nullable=True),
        'cpu_pinning_raw':
        obj_fields.DictOfIntegersField(nullable=True),
        'shared_vcpu':
        obj_fields.IntegerField(nullable=True),
        'shared_pcpu_for_vcpu':
        obj_fields.IntegerField(nullable=True),
        'cpu_policy':
        obj_fields.CPUAllocationPolicyField(nullable=True),
        'cpu_thread_policy':
        obj_fields.CPUThreadAllocationPolicyField(nullable=True),
        # These physical CPUs are reserved for use by the hypervisor
        'cpuset_reserved':
        obj_fields.SetOfIntegersField(nullable=True),

        # L3 CAT
        'l3_cpuset':
        obj_fields.SetOfIntegersField(nullable=True),
        'l3_both_size':
        obj_fields.IntegerField(nullable=True),
        'l3_code_size':
        obj_fields.IntegerField(nullable=True),
        'l3_data_size':
        obj_fields.IntegerField(nullable=True),
    }

    cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')

    def __init__(self, **kwargs):
        super(InstanceNUMACell, self).__init__(**kwargs)
        if 'pagesize' not in kwargs:
            self.pagesize = None
            self.obj_reset_changes(['pagesize'])
        if 'cpu_topology' not in kwargs:
            self.cpu_topology = None
            self.obj_reset_changes(['cpu_topology'])
        if 'cpu_pinning' not in kwargs:
            self.cpu_pinning = None
            self.obj_reset_changes(['cpu_pinning_raw'])
        if 'cpu_policy' not in kwargs:
            self.cpu_policy = None
            self.obj_reset_changes(['cpu_policy'])
        if 'cpu_thread_policy' not in kwargs:
            self.cpu_thread_policy = None
            self.obj_reset_changes(['cpu_thread_policy'])
        if 'cpuset_reserved' not in kwargs:
            self.cpuset_reserved = None
            self.obj_reset_changes(['cpuset_reserved'])
        if 'physnode' not in kwargs:
            self.physnode = None
            self.obj_reset_changes(['physnode'])
        if 'shared_vcpu' not in kwargs:
            self.shared_vcpu = None
            self.obj_reset_changes(['shared_vcpu'])
        if 'shared_pcpu_for_vcpu' not in kwargs:
            self.shared_pcpu_for_vcpu = None
            self.obj_reset_changes(['shared_pcpu_for_vcpu'])
        if 'l3_cpuset' not in kwargs:
            self.l3_cpuset = None
            self.obj_reset_changes(['l3_cpuset'])
        if 'l3_both_size' not in kwargs:
            self.l3_both_size = None
            self.obj_reset_changes(['l3_both_size'])
        if 'l3_code_size' not in kwargs:
            self.l3_code_size = None
            self.obj_reset_changes(['l3_code_size'])
        if 'l3_data_size' not in kwargs:
            self.l3_data_size = None
            self.obj_reset_changes(['l3_data_size'])

    def __len__(self):
        return len(self.cpuset)

    def _to_dict(self):
        # NOTE(sahid): Used as legacy, could be renamed in
        # _legacy_to_dict_ to the future to avoid confusing.
        return {
            'cpus': hardware.format_cpu_spec(self.cpuset, allow_ranges=False),
            'mem': {
                'total': self.memory
            },
            'id': self.id,
            'pagesize': self.pagesize
        }

    @classmethod
    def _from_dict(cls, data_dict):
        # NOTE(sahid): Used as legacy, could be renamed in
        # _legacy_from_dict_ to the future to avoid confusing.
        cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
        memory = data_dict.get('mem', {}).get('total', 0)
        cell_id = data_dict.get('id')
        pagesize = data_dict.get('pagesize')
        return cls(id=cell_id, cpuset=cpuset, memory=memory, pagesize=pagesize)

    @property
    def siblings(self):
        cpu_list = sorted(list(self.cpuset))

        threads = 0
        if ('cpu_topology' in self) and self.cpu_topology:
            threads = self.cpu_topology.threads
        if threads == 1:
            threads = 0

        return list(map(set, zip(*[iter(cpu_list)] * threads)))

    @property
    def cpu_pinning_requested(self):
        return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED

    def pin(self, vcpu, pcpu):
        if vcpu not in self.cpuset:
            return
        pinning_dict = self.cpu_pinning or {}
        pinning_dict[vcpu] = pcpu
        self.cpu_pinning = pinning_dict

    def pin_vcpus(self, *cpu_pairs):
        for vcpu, pcpu in cpu_pairs:
            self.pin(vcpu, pcpu)

    def clear_host_pinning(self):
        """Clear any data related to how this cell is pinned to the host.

        Needed for aborting claims as we do not want to keep stale data around.
        """
        self.id = -1
        self.cpu_pinning = {}
        return self

    @property
    def cachetune_requested(self):
        return (self.l3_cpuset is not None) and (len(self.l3_cpuset) > 0)

    # WRS extension
    @property
    def numa_pinning_requested(self):
        return self.physnode is not None

    # WRS: add a readable string representation
    def __str__(self):
        return '  {obj_name} (id: {id})\n' \
               '    cpuset: {cpuset}\n' \
               '    shared_vcpu: {shared_vcpu}\n' \
               '    shared_pcpu_for_vcpu: {shared_pcpu_for_vcpu}\n' \
               '    memory: {memory}\n' \
               '    physnode: {physnode}\n' \
               '    pagesize: {pagesize}\n' \
               '    cpu_topology: {cpu_topology}\n' \
               '    cpu_pinning: {cpu_pinning}\n' \
               '    siblings: {siblings}\n' \
               '    cpu_policy: {cpu_policy}\n' \
               '    cpu_thread_policy: {cpu_thread_policy}\n' \
               '    l3_cpuset: {l3_cpuset}\n' \
               '    l3_both_size: {l3_both_size}\n' \
               '    l3_code_size: {l3_code_size}\n' \
               '    l3_data_size: {l3_data_size}'.format(
            obj_name=self.obj_name(),
            id=self.id if ('id' in self) else None,
            cpuset=hardware.format_cpu_spec(
                self.cpuset, allow_ranges=True),
            shared_vcpu=self.shared_vcpu,
            shared_pcpu_for_vcpu=self.shared_pcpu_for_vcpu,
            memory=self.memory,
            physnode=self.physnode,
            pagesize=self.pagesize,
            cpu_topology=self.cpu_topology if (
                'cpu_topology' in self) else None,
            cpu_pinning=self.cpu_pinning,
            siblings=self.siblings,
            cpu_policy=self.cpu_policy,
            cpu_thread_policy=self.cpu_thread_policy,
            l3_cpuset=hardware.format_cpu_spec(
                self.l3_cpuset or [], allow_ranges=True),
            l3_both_size=self.l3_both_size,
            l3_code_size=self.l3_code_size,
            l3_data_size=self.l3_data_size,
        )

    # WRS: add a readable representation, without newlines
    def __repr__(self):
        return '{obj_name} (id: {id}) ' \
               'cpuset: {cpuset} ' \
               'shared_vcpu: {shared_vcpu} ' \
               'shared_pcpu_for_vcpu: {shared_pcpu_for_vcpu} ' \
               'memory: {memory} ' \
               'physnode: {physnode} ' \
               'pagesize: {pagesize} ' \
               'cpu_topology: {cpu_topology} ' \
               'cpu_pinning: {cpu_pinning} ' \
               'siblings: {siblings} ' \
               'cpu_policy: {cpu_policy} ' \
               'cpu_thread_policy: {cpu_thread_policy} ' \
               'l3_cpuset: {l3_cpuset} ' \
               'l3_both_size: {l3_both_size} ' \
               'l3_code_size: {l3_code_size} ' \
               'l3_data_size: {l3_data_size}'.format(
            obj_name=self.obj_name(),
            id=self.id if ('id' in self) else None,
            cpuset=hardware.format_cpu_spec(
                self.cpuset, allow_ranges=True),
            shared_vcpu=self.shared_vcpu,
            shared_pcpu_for_vcpu=self.shared_pcpu_for_vcpu,
            memory=self.memory,
            physnode=self.physnode,
            pagesize=self.pagesize,
            cpu_topology=self.cpu_topology if (
                'cpu_topology' in self) else None,
            cpu_pinning=self.cpu_pinning,
            siblings=self.siblings,
            cpu_policy=self.cpu_policy,
            cpu_thread_policy=self.cpu_thread_policy,
            l3_cpuset=hardware.format_cpu_spec(
                self.l3_cpuset or [], allow_ranges=True),
            l3_both_size=self.l3_both_size,
            l3_code_size=self.l3_code_size,
            l3_data_size=self.l3_data_size,
        )