def test_migrate_none_or_zero_ratio_with_none_ratio_conf(self): cn1 = fake_compute_obj.obj_clone() cn1._context = self.context cn1.create() db.compute_node_update( self.context, cn1.id, { 'cpu_allocation_ratio': 0.0, 'disk_allocation_ratio': 0.0, 'ram_allocation_ratio': 0.0 }) self.flags(initial_cpu_allocation_ratio=32.0) self.flags(initial_ram_allocation_ratio=8.0) self.flags(initial_disk_allocation_ratio=2.0) res = compute_node.migrate_empty_ratio(self.context, 1) self.assertEqual(res, (1, 1)) # the ratio is refreshed to CONF.initial_xxx_allocation_ratio # beacause CONF.xxx_allocation_ratio is None cns = db.compute_node_get_all(self.context) # the ratio is refreshed to CONF.xxx_allocation_ratio for cn in cns: for x in ['cpu', 'disk', 'ram']: conf_key = 'initial_%s_allocation_ratio' % x key = '%s_allocation_ratio' % x self.assertEqual(getattr(CONF, conf_key), cn[key])
def _create_zero_and_none_cn(self): cn1 = fake_compute_obj.obj_clone() cn1._context = self.context cn1.create() db.compute_node_update( self.context, cn1.id, { 'cpu_allocation_ratio': 0.0, 'disk_allocation_ratio': 0.0, 'ram_allocation_ratio': 0.0 }) cn1_db = db.compute_node_get(self.context, cn1.id) for x in ['cpu', 'disk', 'ram']: self.assertEqual(0.0, cn1_db['%s_allocation_ratio' % x]) cn2 = fake_compute_obj.obj_clone() cn2._context = self.context cn2.host += '-alt' cn2.create() # We can't set a cn_obj.xxx_allocation_ratio to None, # so we set ratio to None in db directly db.compute_node_update( self.context, cn2.id, { 'cpu_allocation_ratio': None, 'disk_allocation_ratio': None, 'ram_allocation_ratio': None }) cn2_db = db.compute_node_get(self.context, cn2.id) for x in ['cpu', 'disk', 'ram']: self.assertIsNone(None, cn2_db['%s_allocation_ratio' % x])
def test_numa_topology_online_migration_when_load(self): """Ensure legacy NUMA topology objects are reserialized to o.vo's.""" cn = fake_compute_obj.obj_clone() cn._context = self.context cn.create() legacy_topology = jsonutils.dumps({ "cells": [ { "id": 0, "cpus": "0-3", "mem": {"total": 512, "used": 256}, "cpu_usage": 2, }, { "id": 1, "cpus": "4,5,6,7", "mem": {"total": 512, "used": 0}, "cpu_usage": 0, } ] }) db.compute_node_update( self.context, cn.id, {'numa_topology': legacy_topology}) cn_db = db.compute_node_get(self.context, cn.id) self.assertEqual(legacy_topology, cn_db['numa_topology']) self.assertNotIn('nova_object.name', cn_db['numa_topology']) # trigger online migration objects.ComputeNodeList.get_all(self.context) cn_db = db.compute_node_get(self.context, cn.id) self.assertNotEqual(legacy_topology, cn_db['numa_topology']) self.assertIn('nova_object.name', cn_db['numa_topology'])
def _create_zero_and_none_cn(self): cn1 = fake_compute_obj.obj_clone() cn1._context = self.context cn1.create() db.compute_node_update(self.context, cn1.id, {'cpu_allocation_ratio': 0.0, 'disk_allocation_ratio': 0.0, 'ram_allocation_ratio': 0.0}) cn1_db = db.compute_node_get(self.context, cn1.id) for x in ['cpu', 'disk', 'ram']: self.assertEqual(0.0, cn1_db['%s_allocation_ratio' % x]) cn2 = fake_compute_obj.obj_clone() cn2._context = self.context cn2.host += '-alt' cn2.create() # We can't set a cn_obj.xxx_allocation_ratio to None, # so we set ratio to None in db directly db.compute_node_update(self.context, cn2.id, {'cpu_allocation_ratio': None, 'disk_allocation_ratio': None, 'ram_allocation_ratio': None}) cn2_db = db.compute_node_get(self.context, cn2.id) for x in ['cpu', 'disk', 'ram']: self.assertIsNone(None, cn2_db['%s_allocation_ratio' % x])
def test_migrate_none_or_zero_ratio_with_none_ratio_conf(self): cn1 = fake_compute_obj.obj_clone() cn1._context = self.context cn1.create() db.compute_node_update(self.context, cn1.id, {'cpu_allocation_ratio': 0.0, 'disk_allocation_ratio': 0.0, 'ram_allocation_ratio': 0.0}) self.flags(initial_cpu_allocation_ratio=32.0) self.flags(initial_ram_allocation_ratio=8.0) self.flags(initial_disk_allocation_ratio=2.0) res = compute_node.migrate_empty_ratio(self.context, 1) self.assertEqual(res, (1, 1)) # the ratio is refreshed to CONF.initial_xxx_allocation_ratio # beacause CONF.xxx_allocation_ratio is None cns = db.compute_node_get_all(self.context) # the ratio is refreshed to CONF.xxx_allocation_ratio for cn in cns: for x in ['cpu', 'disk', 'ram']: conf_key = 'initial_%s_allocation_ratio' % x key = '%s_allocation_ratio' % x self.assertEqual(getattr(CONF, conf_key), cn[key])
def test_migrate_none_or_zero_ratio_with_not_empty_ratio(self): cn1 = fake_compute_obj.obj_clone() cn1._context = self.context cn1.create() db.compute_node_update(self.context, cn1.id, {'cpu_allocation_ratio': 32.0, 'ram_allocation_ratio': 4.0, 'disk_allocation_ratio': 3.0}) res = compute_node.migrate_empty_ratio(self.context, 1) # the non-empty ratio will not be refreshed self.assertEqual(res, (0, 0)) cns = db.compute_node_get_all(self.context) for cn in cns: self.assertEqual(32.0, cn['cpu_allocation_ratio']) self.assertEqual(4.0, cn['ram_allocation_ratio']) self.assertEqual(3.0, cn['disk_allocation_ratio'])
def test_migrate_empty_ratio(self): # we have 5 records to process, the last of which is deleted for i in range(5): cn = fake_compute_obj.obj_clone() cn._context = self.context cn.host += '-alt-%s' % i cn.create() db.compute_node_update(self.context, cn.id, {'cpu_allocation_ratio': 0.0}) if i == 4: cn.destroy() # first only process 2 res = compute_node.migrate_empty_ratio(self.context, 2) self.assertEqual(res, (2, 2)) # then process others - there should only be 2 found since one # of the remaining compute nodes is deleted and gets filtered out res = compute_node.migrate_empty_ratio(self.context, 999) self.assertEqual(res, (2, 2))
def save(self, prune_stats=False): # NOTE(belliott) ignore prune_stats param, no longer relevant updates = self.obj_get_changes() updates.pop('id', None) self._convert_stats_to_db_format(updates) self._convert_host_ip_to_db_format(updates) self._convert_supported_instances_to_db_format(updates) self._convert_pci_stats_to_db_format(updates) db_compute = db.compute_node_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_compute)
def _from_db_object(context, compute, db_compute): special_cases = set([ 'stats', 'supported_hv_specs', 'host', 'pci_device_pools', ]) fields = set(compute.fields) - special_cases online_updates = {} for key in fields: value = db_compute[key] # NOTE(sbauza): Since all compute nodes don't possibly run the # latest RT code updating allocation ratios, we need to provide # a backwards compatible way of hydrating them. # As we want to care about our operators and since we don't want to # ask them to change their configuration files before upgrading, we # prefer to hardcode the default values for the ratios here until # the next release (Newton) where the opt default values will be # restored for both cpu (16.0), ram (1.5) and disk (1.0) # allocation ratios. # TODO(yikun): Remove this online migration code when all ratio # values are NOT 0.0 or NULL ratio_keys = ['cpu_allocation_ratio', 'ram_allocation_ratio', 'disk_allocation_ratio'] if key in ratio_keys and value in (None, 0.0): # ResourceTracker is not updating the value (old node) # or the compute node is updated but the default value has # not been changed r = getattr(CONF, key) # NOTE(yikun): If the allocation ratio record is not set, the # allocation ratio will be changed to the # CONF.x_allocation_ratio value if x_allocation_ratio is # set, and fallback to use the CONF.initial_x_allocation_ratio # otherwise. init_x_ratio = getattr(CONF, 'initial_%s' % key) value = r if r else init_x_ratio online_updates[key] = value elif key == 'mapped': value = 0 if value is None else value setattr(compute, key, value) if online_updates: db.compute_node_update(context, compute.id, online_updates) stats = db_compute['stats'] if stats: compute.stats = jsonutils.loads(stats) sup_insts = db_compute.get('supported_instances') if sup_insts: hv_specs = jsonutils.loads(sup_insts) hv_specs = [objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs] compute.supported_hv_specs = hv_specs pci_stats = db_compute.get('pci_stats') if pci_stats is not None: pci_stats = pci_device_pool.from_pci_stats(pci_stats) compute.pci_device_pools = pci_stats compute._context = context # Make sure that we correctly set the host field depending on either # host column is present in the table or not compute._host_from_db_object(compute, db_compute) compute.obj_reset_changes() return compute