def test_archive_deleted_rows_with_undeleted_residue(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Undelete the instance_extra record to make sure we delete it anyway extra = db.instance_extra_get_by_instance_uuid(admin_context, instance.uuid) self.assertNotEqual(0, extra.deleted) db.instance_extra_update_by_uuid(admin_context, instance.uuid, {'deleted': 0}) extra = db.instance_extra_get_by_instance_uuid(admin_context, instance.uuid) self.assertEqual(0, extra.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Create a pci_devices record to simulate an instance that had a PCI # device allocated at the time it was deleted. There is a window of # time between deletion of the instance record and freeing of the PCI # device in nova-compute's _complete_deletion method during RT update. db.pci_device_update( admin_context, 1, 'fake-address', { 'compute_node_id': 1, 'address': 'fake-address', 'vendor_id': 'fake', 'product_id': 'fake', 'dev_type': 'fake', 'label': 'fake', 'status': 'allocated', 'instance_uuid': instance.uuid }) # Now try and archive the soft deleted records. results, deleted_instance_uuids, archived = \ db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results) self.assertEqual(sum(results.values()), archived) # Verify that the pci_devices record has not been dropped self.assertNotIn('pci_devices', results)
def _migrate_legacy_object(cls, context, instance_uuid, primitive): """Convert a pre-Liberty object to a real o.vo. Handle an unversioned object created prior to Liberty, by transforming to a versioned object and saving back the serialized version of this. :param context: RequestContext :param instance_uuid: The UUID of the instance this topology is associated with. :param primitive: A serialized representation of the legacy object. :returns: A serialized representation of the updated object. """ obj = cls( instance_uuid=instance_uuid, cells=[ InstanceNUMACell( id=cell.get('id'), cpuset=hardware.parse_cpu_spec(cell.get('cpus', '')), pcpuset=set(), memory=cell.get('mem', {}).get('total', 0), pagesize=cell.get('pagesize'), ) for cell in primitive.get('cells', []) ], ) db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid(context, instance_uuid, values) return obj
def _save_migrated_cpuset_to_instance_extra(cls, context, obj, instance_uuid): db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid(context, instance_uuid, values)
def _migrate_legacy_dedicated_instance_cpuset(cls, context, instance_uuid, obj): # NOTE(huaqiang): We may meet some topology object with the old version # 'InstanceNUMACell' cells, in that case, the 'dedicated' CPU is kept # in 'InstanceNUMACell.cpuset' field, but it should be kept in # 'InstanceNUMACell.pcpuset' field since Victoria. Making an upgrade # and persisting to database. update_db = False for cell in obj.cells: if len(cell.cpuset) == 0: continue if cell.cpu_policy != obj_fields.CPUAllocationPolicy.DEDICATED: continue cell.pcpuset = cell.cpuset cell.cpuset = set() update_db = True if update_db: db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid(context, instance_uuid, values)
def test_numa_topology_online_migration(self): """Ensure legacy NUMA topology objects are reserialized to o.vo's.""" instance = self._create_instance(host='fake-host', node='fake-node') legacy_topology = jsonutils.dumps({ "cells": [{ "id": 0, "cpus": "0-3", "mem": { "total": 512 }, "pagesize": 4 }, { "id": 1, "cpus": "4,5,6,7", "mem": { "total": 512 }, "pagesize": 4 }] }) db.instance_extra_update_by_uuid(self.context, instance.uuid, {'numa_topology': legacy_topology}) instance_db = db.instance_extra_get_by_instance_uuid( self.context, instance.uuid, ['numa_topology']) self.assertEqual(legacy_topology, instance_db['numa_topology']) self.assertNotIn('nova_object.name', instance_db['numa_topology']) # trigger online migration objects.InstanceList.get_by_host_and_node( self.context, 'fake-host', 'fake-node', expected_attrs=['numa_topology']) instance_db = db.instance_extra_get_by_instance_uuid( self.context, instance.uuid, ['numa_topology']) self.assertNotEqual(legacy_topology, instance_db['numa_topology']) self.assertIn('nova_object.name', instance_db['numa_topology'])
def create(self): values = {'numa_topology': self._to_json()} db.instance_extra_update_by_uuid(self._context, self.instance_uuid, values) self.obj_reset_changes()