Exemplo n.º 1
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Mitaka) where the opt default values will be
            # restored for both cpu (16.0) and ram (1.5) allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Kilo computes
            if key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio':
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
            compute[key] = value

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()
        return compute
Exemplo n.º 2
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            compute[key] = db_compute[key]

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()
        return compute
Exemplo n.º 3
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set(["stats", "supported_hv_specs", "host", "pci_device_pools"])
        fields = set(compute.fields) - special_cases
        for key in fields:
            compute[key] = db_compute[key]

        stats = db_compute["stats"]
        if stats:
            compute["stats"] = jsonutils.loads(stats)

        sup_insts = db_compute.get("supported_instances")
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs]
            compute["supported_hv_specs"] = hv_specs

        pci_stats = db_compute.get("pci_stats")
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()
        return compute
Exemplo n.º 4
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Mitaka) where the opt default values will be
            # restored for both cpu (16.0) and ram (1.5) allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Kilo computes
            if key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio':
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
            compute[key] = value

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [objects.HVSpec.from_list(hv_spec)
                        for hv_spec in hv_specs]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()
        return compute
Exemplo n.º 5
0
 def setUp(self):
     raise testtools.TestCase.skipException(skip_msg)
     super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
     cpu_info = collections.OrderedDict([
         ('arch', 'x86_64'),
         ('model', 'Nehalem'),
         ('vendor', 'Intel'),
         ('features', ['pge', 'clflush']),
         ('topology', {
             'cores': 1,
             'threads': 1,
             'sockets': 4,
         }),
     ])
     self.fake_compute_node = objects.ComputeNode(
         cpu_info=jsonutils.dumps(cpu_info),
         current_workload=0,
         disk_available_least=0,
         host_ip="1.1.1.1",
         state="up",
         status="enabled",
         free_disk_gb=1028,
         free_ram_mb=7680,
         hypervisor_hostname="fake-mini",
         hypervisor_type="fake",
         hypervisor_version=1000,
         id=1,
         local_gb=1028,
         local_gb_used=0,
         memory_mb=8192,
         memory_mb_used=512,
         running_vms=0,
         vcpus=1,
         vcpus_used=0,
         service_id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         pci_device_pools=pci_device_pool.from_pci_stats({
             "count": 5,
             "vendor_id": "8086",
             "product_id": "1520",
             "keya": "valuea",
             "extra_info": {
                 "phys_function": '[["0x0000", '
                 '"0x04", "0x00",'
                 ' "0x1"]]',
                 "key1": "value1"
             }
         }),
     )
     self.fake_service = objects.Service(
         id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         disabled=False,
         disabled_reason=None)
Exemplo n.º 6
0
 def test_update_resource_stats_creates(self, mock_update, mock_create, mock_save):
     cn = objects.ComputeNode(context=self.context)
     cn.host = "fakehost"
     cn.hypervisor_hostname = "fakenode"
     cn.pci_device_pools = pci_device_pool.from_pci_stats(
         [{"vendor_id": "foo", "product_id": "foo", "count": 1, "a": "b"}]
     )
     mock_update.return_value = False
     self.client.update_resource_stats(cn)
     mock_save.assert_called_once_with()
     mock_create.assert_called_once_with()
 def test_update_resource_stats_saves(self, mock_save):
     cn = objects.ComputeNode()
     cn.host = 'fakehost'
     cn.hypervisor_hostname = 'fakenode'
     cn.pci_device_pools = pci_device_pool.from_pci_stats(
         [{"vendor_id": "foo",
           "product_id": "foo",
           "count": 1,
           "a": "b"}])
     self.client.update_resource_stats(cn)
     mock_save.assert_called_once_with()
Exemplo n.º 8
0
 def setUp(self):
     raise testtools.TestCase.skipException(skip_msg)
     super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
     cpu_info = collections.OrderedDict([
         ('arch', 'x86_64'),
         ('model', 'Nehalem'),
         ('vendor', 'Intel'),
         ('features', ['pge', 'clflush']),
         ('topology', {
             'cores': 1,
             'threads': 1,
             'sockets': 4,
             }),
         ])
     self.fake_compute_node = objects.ComputeNode(
         cpu_info=jsonutils.dumps(cpu_info),
         current_workload=0,
         disk_available_least=0,
         host_ip="1.1.1.1",
         state="up",
         status="enabled",
         free_disk_gb=1028,
         free_ram_mb=7680,
         hypervisor_hostname="fake-mini",
         hypervisor_type="fake",
         hypervisor_version=1000,
         id=1,
         local_gb=1028,
         local_gb_used=0,
         memory_mb=8192,
         memory_mb_used=512,
         running_vms=0,
         vcpus=1,
         vcpus_used=0,
         service_id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         pci_device_pools=pci_device_pool.from_pci_stats(
                                   {"count": 5,
                                    "vendor_id": "8086",
                                    "product_id": "1520",
                                    "keya": "valuea",
                                    "extra_info": {
                                        "phys_function": '[["0x0000", '
                                                         '"0x04", "0x00",'
                                                         ' "0x1"]]',
                                        "key1": "value1"}}),)
     self.fake_service = objects.Service(
         id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         disabled=False,
         disabled_reason=None)
Exemplo n.º 9
0
 def setUp(self):
     raise testtools.TestCase.skipException(skip_msg)
     super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
     cpu_info = collections.OrderedDict(
         [
             ("arch", "x86_64"),
             ("model", "Nehalem"),
             ("vendor", "Intel"),
             ("features", ["pge", "clflush"]),
             ("topology", {"cores": 1, "threads": 1, "sockets": 4}),
         ]
     )
     self.fake_compute_node = objects.ComputeNode(
         cpu_info=jsonutils.dumps(cpu_info),
         current_workload=0,
         disk_available_least=0,
         host_ip="1.1.1.1",
         state="up",
         status="enabled",
         free_disk_gb=1028,
         free_ram_mb=7680,
         hypervisor_hostname="fake-mini",
         hypervisor_type="fake",
         hypervisor_version=1000,
         id=1,
         local_gb=1028,
         local_gb_used=0,
         memory_mb=8192,
         memory_mb_used=512,
         running_vms=0,
         vcpus=1,
         vcpus_used=0,
         service_id=2,
         host="043b3cacf6f34c90a7245151fc8ebcda",
         pci_device_pools=pci_device_pool.from_pci_stats(
             {
                 "count": 5,
                 "vendor_id": "8086",
                 "product_id": "1520",
                 "keya": "valuea",
                 "key1": "value1",
                 "numa_node": 1,
             }
         ),
     )
     self.fake_service = objects.Service(
         id=2, host="043b3cacf6f34c90a7245151fc8ebcda", disabled=False, disabled_reason=None
     )
Exemplo n.º 10
0
 def test_update_resource_stats_saves(self, mock_update, mock_create,
                                      mock_save):
     cn = objects.ComputeNode(context=self.context)
     cn.host = 'fakehost'
     cn.hypervisor_hostname = 'fakenode'
     cn.pci_device_pools = pci_device_pool.from_pci_stats([{
         "vendor_id": "foo",
         "product_id": "foo",
         "count": 1,
         "a": "b"
     }])
     mock_update.return_value = True
     self.client.update_resource_stats(cn)
     mock_save.assert_called_once_with()
     mock_update.assert_called_once_with()
     self.assertFalse(mock_create.called)
Exemplo n.º 11
0
 def setUp(self):
     super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
     self.fake_compute_node = objects.ComputeNode(
         cpu_info="?",
         current_workload=0,
         disk_available_least=0,
         host_ip="1.1.1.1",
         state="up",
         status="enabled",
         free_disk_gb=1028,
         free_ram_mb=7680,
         hypervisor_hostname="fake-mini",
         hypervisor_type="fake",
         hypervisor_version=1000,
         id=1,
         local_gb=1028,
         local_gb_used=0,
         memory_mb=8192,
         memory_mb_used=512,
         running_vms=0,
         vcpus=1,
         vcpus_used=0,
         service_id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         pci_device_pools=pci_device_pool.from_pci_stats(
                                   {"count": 5,
                                    "vendor_id": "8086",
                                    "product_id": "1520",
                                    "keya": "valuea",
                                    "extra_info": {
                                        "phys_function": '[["0x0000", '
                                                         '"0x04", "0x00",'
                                                         ' "0x1"]]',
                                        "key1": "value1"}}),)
     self.fake_service = objects.Service(
         id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         disabled=False,
         disabled_reason=None)
Exemplo n.º 12
0
 def setUp(self):
     super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
     self.fake_compute_node = objects.ComputeNode(
         cpu_info="?",
         current_workload=0,
         disk_available_least=0,
         host_ip="1.1.1.1",
         state="up",
         status="enabled",
         free_disk_gb=1028,
         free_ram_mb=7680,
         hypervisor_hostname="fake-mini",
         hypervisor_type="fake",
         hypervisor_version=1000,
         id=1,
         local_gb=1028,
         local_gb_used=0,
         memory_mb=8192,
         memory_mb_used=512,
         running_vms=0,
         _cached_service=objects.Service(
             host='043b3cacf6f34c90a7245151fc8ebcda',
             disabled=False,
             disabled_reason=None),
         vcpus=1,
         vcpus_used=0,
         service_id=2,
         host='043b3cacf6f34c90a7245151fc8ebcda',
         pci_device_pools=pci_device_pool.from_pci_stats(
                                   {"count": 5,
                                    "vendor_id": "8086",
                                    "product_id": "1520",
                                    "keya": "valuea",
                                    "extra_info": {
                                        "phys_function": '[["0x0000", '
                                                         '"0x04", "0x00",'
                                                         ' "0x1"]]',
                                        "key1": "value1"}}),)
Exemplo n.º 13
0
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci import device
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device

pci_stats = [{
    "count": 3,
    "vendor_id": "8086",
    "product_id": "1520",
    "numa_node": 1
}]

fake_compute_node = objects.ComputeNode(
    pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))


class FakeResponse(wsgi.ResponseObject):
    pass


class PciServerControllerTestV21(test.NoDBTestCase):
    def setUp(self):
        super(PciServerControllerTestV21, self).setUp()
        self.controller = pci.PciServerController()
        self.fake_obj = {
            'server': {
                'addresses': {},
                'id': 'fb08',
                'name': 'a3',
Exemplo n.º 14
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            ])
        fields = set(compute.fields) - special_cases
        online_updates = {}
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Newton) where the opt default values will be
            # restored for both cpu (16.0), ram (1.5) and disk (1.0)
            # allocation ratios.
            # TODO(yikun): Remove this online migration code when all ratio
            # values are NOT 0.0 or NULL
            ratio_keys = ['cpu_allocation_ratio', 'ram_allocation_ratio',
                          'disk_allocation_ratio']
            if key in ratio_keys and value in (None, 0.0):
                # ResourceTracker is not updating the value (old node)
                # or the compute node is updated but the default value has
                # not been changed
                r = getattr(CONF, key)
                # NOTE(yikun): If the allocation ratio record is not set, the
                # allocation ratio will be changed to the
                # CONF.x_allocation_ratio value if x_allocation_ratio is
                # set, and fallback to use the CONF.initial_x_allocation_ratio
                # otherwise.
                init_x_ratio = getattr(CONF, 'initial_%s' % key)
                value = r if r else init_x_ratio
                online_updates[key] = value
            elif key == 'mapped':
                value = 0 if value is None else value

            setattr(compute, key, value)

        if online_updates:
            db.compute_node_update(context, compute.id, online_updates)

        stats = db_compute['stats']
        if stats:
            compute.stats = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [objects.HVSpec.from_list(hv_spec)
                        for hv_spec in hv_specs]
            compute.supported_hv_specs = hv_specs

        pci_stats = db_compute.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute.pci_device_pools = pci_stats
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        compute.obj_reset_changes()

        return compute
Exemplo n.º 15
0
 def to_device_pools_obj(self):
     """Return the contents of the pools as a PciDevicePoolList object."""
     stats = [x for x in self]
     return pci_device_pool.from_pci_stats(stats)
Exemplo n.º 16
0
 def to_device_pools_obj(self):
     """Return the contents of the pools as a PciDevicePoolList object."""
     stats = [x for x in self]
     return pci_device_pool.from_pci_stats(stats)
Exemplo n.º 17
0
 def test_from_pci_stats_list_of_dicts(self):
     prim = fake_pci.fake_pool_dict
     pools = pci_device_pool.from_pci_stats([prim, prim])
     self.assertIsInstance(pools, pci_device_pool.PciDevicePoolList)
     self.assertEqual(len(pools), 2)
Exemplo n.º 18
0
 def test_from_pci_stats_bad(self):
     prim = "not a valid json string for an object"
     pools = pci_device_pool.from_pci_stats(prim)
     self.assertIsNone(pools)
Exemplo n.º 19
0
from nova.api.openstack.compute import pci
from nova.api.openstack import wsgi
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device
from nova.tests import uuidsentinel as uuids


pci_stats = [{"count": 3, "vendor_id": "8086", "product_id": "1520", "numa_node": 1}]

fake_compute_node = objects.ComputeNode(pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))


class FakeResponse(wsgi.ResponseObject):
    pass


class PciServerControllerTestV21(test.NoDBTestCase):
    def setUp(self):
        super(PciServerControllerTestV21, self).setUp()
        self.controller = pci.PciServerController()
        self.fake_obj = {
            "server": {
                "addresses": {},
                "id": "fb08",
                "name": "a3",
Exemplo n.º 20
0
 def test_from_pci_stats_obj(self):
     prim = fake_pci.fake_pool_list_primitive
     pools = pci_device_pool.from_pci_stats(prim)
     self.assertIsInstance(pools, pci_device_pool.PciDevicePoolList)
     self.assertEqual(len(pools), 1)
Exemplo n.º 21
0
from nova import exception
from nova import objects
from nova.objects import pci_device_pool
from nova.pci import device
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device


pci_stats = [{"count": 3,
              "vendor_id": "8086",
              "product_id": "1520",
              "extra_info": {"phys_function": '[["0x0000", "0x04", '
                                              '"0x00", "0x1"]]'}}]
fake_compute_node = objects.ComputeNode(
    pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))


class FakeResponse(wsgi.ResponseObject):
    pass


class PciServerControllerTestV21(test.NoDBTestCase):
    def setUp(self):
        super(PciServerControllerTestV21, self).setUp()
        self.controller = pci.PciServerController()
        self.fake_obj = {'server': {'addresses': {},
                                    'id': 'fb08',
                                    'name': 'a3',
                                    'status': 'ACTIVE',
                                    'tenant_id': '9a3af784c',
Exemplo n.º 22
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            'uuid',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Newton) where the opt default values will be
            # restored for both cpu (16.0), ram (1.5) and disk (1.0)
            # allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Liberty computes
            if (key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio'
                    or key == 'disk_allocation_ratio'):
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
                    if value == 0.0 and key == 'disk_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.0
            compute[key] = value

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute.pci_device_pools = pci_stats
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        # NOTE(danms): Remove this conditional load (and remove uuid from
        # the list of special_cases above) once we're in Newton and have
        # enforced that all UUIDs in the database are not NULL.
        if db_compute.get('uuid'):
            compute.uuid = db_compute['uuid']

        compute.obj_reset_changes()

        # NOTE(danms): This needs to come after obj_reset_changes() to make
        # sure we only save the uuid, if we generate one.
        # FIXME(danms): Remove this in Newton once we have enforced that
        # all compute nodes have uuids set in the database.
        if 'uuid' not in compute:
            compute.uuid = uuidutils.generate_uuid()
            LOG.debug('Generated UUID %(uuid)s for compute node %(id)i',
                      dict(uuid=compute.uuid, id=compute.id))
            compute.save()

        return compute
Exemplo n.º 23
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            'uuid',
            ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Mitaka) where the opt default values will be
            # restored for both cpu (16.0) and ram (1.5) allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Kilo computes
            if key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio':
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
            compute[key] = value

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [objects.HVSpec.from_list(hv_spec)
                        for hv_spec in hv_specs]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        # NOTE(danms): Remove this conditional load (and remove uuid from
        # the list of special_cases above) once we're in Newton and have
        # enforced that all UUIDs in the database are not NULL.
        if db_compute.get('uuid'):
            compute.uuid = db_compute['uuid']

        compute.obj_reset_changes()

        # NOTE(danms): This needs to come after obj_reset_changes() to make
        # sure we only save the uuid, if we generate one.
        # FIXME(danms): Remove this in Newton once we have enforced that
        # all compute nodes have uuids set in the database.
        if 'uuid' not in compute:
            compute.uuid = uuidutils.generate_uuid()
            LOG.debug('Generated UUID %(uuid)s for compute node %(id)i',
                      dict(uuid=compute.uuid, id=compute.id))
            compute.save()

        return compute
 def test_from_pci_stats_bad(self):
     prim = "not a valid json string for an object"
     pools = pci_device_pool.from_pci_stats(prim)
     self.assertEqual(len(pools), 0)
 def test_from_pci_stats_dict(self):
     prim = fake_pci.fake_pool_dict
     pools = pci_device_pool.from_pci_stats(prim)
     self.assertIsInstance(pools, pci_device_pool.PciDevicePoolList)
     self.assertEqual(len(pools), 1)