def test_archive_deleted_rows_with_undeleted_residue(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Undelete the instance_extra record to make sure we delete it anyway extra = db.instance_extra_get_by_instance_uuid(admin_context, instance.uuid) self.assertNotEqual(0, extra.deleted) db.instance_extra_update_by_uuid(admin_context, instance.uuid, {'deleted': 0}) extra = db.instance_extra_get_by_instance_uuid(admin_context, instance.uuid) self.assertEqual(0, extra.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Create a pci_devices record to simulate an instance that had a PCI # device allocated at the time it was deleted. There is a window of # time between deletion of the instance record and freeing of the PCI # device in nova-compute's _complete_deletion method during RT update. db.pci_device_update( admin_context, 1, 'fake-address', { 'compute_node_id': 1, 'address': 'fake-address', 'vendor_id': 'fake', 'product_id': 'fake', 'dev_type': 'fake', 'label': 'fake', 'status': 'allocated', 'instance_uuid': instance.uuid }) # Now try and archive the soft deleted records. results, deleted_instance_uuids, archived = \ db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results) self.assertEqual(sum(results.values()), archived) # Verify that the pci_devices record has not been dropped self.assertNotIn('pci_devices', results)
def _get_target(self, context, target): """Processes and validates the CLI given target and adapts it for policy authorization. :returns: None if the given target is None, otherwise returns a proper authorization target. :raises nova.exception.InvalidAttribute: if a key in the given target is not an acceptable. :raises nova.exception.InstanceNotFound: if 'instance_id' is given, and there is no instance match the id. """ if not target: return None new_target = {} for t in target: key, value = t.split('=') if key not in self._ACCEPTABLE_TARGETS: raise exception.InvalidAttribute(attr=key) new_target[key] = value # if the target is an instance_id, return an instance instead. instance_id = new_target.get('instance_id') if instance_id: admin_ctxt = nova_context.get_admin_context() instance = db.instance_get_by_uuid(admin_ctxt, instance_id) new_target = { 'user_id': instance['user_id'], 'project_id': instance['project_id'] } return new_target
def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Now try and archive the soft deleted records. results, deleted_instance_uuids, archived = \ db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results) self.assertEqual(sum(results.values()), archived)
def _get_target(self, context, target): """Processes and validates the CLI given target and adapts it for policy authorization. :returns: None if the given target is None, otherwise returns a proper authorization target. :raises nova.exception.InvalidAttribute: if a key in the given target is not an acceptable. :raises nova.exception.InstanceNotFound: if 'instance_id' is given, and there is no instance match the id. """ if not target: return None new_target = {} for t in target: key, value = t.split('=') if key not in self._ACCEPTABLE_TARGETS: raise exception.InvalidAttribute(attr=key) new_target[key] = value # if the target is an instance_id, return an instance instead. instance_id = new_target.get('instance_id') if instance_id: admin_ctxt = nova_context.get_admin_context() instance = db.instance_get_by_uuid(admin_ctxt, instance_id) new_target = {'user_id': instance['user_id'], 'project_id': instance['project_id']} return new_target
def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server_id) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Now try and archive the soft deleted records. results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results)
def _validate_token(self, context, token): instance_uuid = token['instance_uuid'] if instance_uuid is None: return False instance = db.instance_get_by_uuid(context, instance_uuid) return self.compute_rpcapi.validate_console_port( context, instance, token['port'], token['console_type'])
def test_create_instances_here(self): # Just grab the first instance type inst_type = objects.Flavor.get_by_id(self.ctxt, 1) image = {'properties': {}} instance_uuids = self.instance_uuids instance_props = { 'id': 'removed', 'security_groups': 'removed', 'info_cache': 'removed', 'name': 'instance-00000001', 'display_name': 'moo', 'image_ref': uuidsentinel.fake_image_ref, 'user_id': self.ctxt.user_id, # Test these as lists 'metadata': { 'moo': 'cow' }, 'system_metadata': { 'meow': 'cat' }, 'flavor': inst_type, 'project_id': self.ctxt.project_id } call_info = {'uuids': []} block_device_mapping = objects.BlockDeviceMappingList(objects=[ objects.BlockDeviceMapping( context=self.ctxt, **fake_block_device.FakeDbBlockDeviceDict( block_device.create_image_bdm(uuidsentinel.fake_image_ref), anon=True)) ]) def _fake_instance_update_at_top(self, _ctxt, instance): call_info['uuids'].append(instance['uuid']) self.stub_out( 'nova.cells.messaging.MessageRunner.' 'instance_update_at_top', _fake_instance_update_at_top) self.scheduler._create_instances_here(self.ctxt, instance_uuids, instance_props, inst_type, image, ['default'], block_device_mapping) self.assertEqual(instance_uuids, call_info['uuids']) for count, instance_uuid in enumerate(instance_uuids): bdms = db.block_device_mapping_get_all_by_instance( self.ctxt, instance_uuid) self.assertIsNotNone(bdms) instance = db.instance_get_by_uuid(self.ctxt, instance_uuid) meta = utils.instance_meta(instance) self.assertEqual('cow', meta['moo']) sys_meta = utils.instance_sys_meta(instance) self.assertEqual('cat', sys_meta['meow']) self.assertEqual('moo-%d' % (count + 1), instance['hostname']) self.assertEqual('moo-%d' % (count + 1), instance['display_name']) self.assertEqual(uuidsentinel.fake_image_ref, instance['image_ref'])
def test_create_instances_here(self): # Just grab the first instance type inst_type = objects.Flavor.get_by_id(self.ctxt, 1) image = {'properties': {}} instance_uuids = self.instance_uuids instance_props = {'id': 'removed', 'security_groups': 'removed', 'info_cache': 'removed', 'name': 'instance-00000001', 'display_name': 'moo', 'image_ref': uuidsentinel.fake_image_ref, 'user_id': self.ctxt.user_id, # Test these as lists 'metadata': {'moo': 'cow'}, 'system_metadata': {'meow': 'cat'}, 'flavor': inst_type, 'project_id': self.ctxt.project_id} call_info = {'uuids': []} block_device_mapping = objects.BlockDeviceMappingList( objects=[ objects.BlockDeviceMapping(context=self.ctxt, **fake_block_device.FakeDbBlockDeviceDict( block_device.create_image_bdm( uuidsentinel.fake_image_ref), anon=True)) ]) def _fake_instance_update_at_top(self, _ctxt, instance): call_info['uuids'].append(instance['uuid']) self.stub_out('nova.cells.messaging.MessageRunner.' 'instance_update_at_top', _fake_instance_update_at_top) self.scheduler._create_instances_here(self.ctxt, instance_uuids, instance_props, inst_type, image, ['default'], block_device_mapping) self.assertEqual(instance_uuids, call_info['uuids']) for count, instance_uuid in enumerate(instance_uuids): bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, instance_uuid) self.assertIsNotNone(bdms) instance = db.instance_get_by_uuid(self.ctxt, instance_uuid) meta = utils.instance_meta(instance) self.assertEqual('cow', meta['moo']) sys_meta = utils.instance_sys_meta(instance) self.assertEqual('cat', sys_meta['meow']) self.assertEqual('moo-%d' % (count + 1), instance['hostname']) self.assertEqual('moo-%d' % (count + 1), instance['display_name']) self.assertEqual(uuidsentinel.fake_image_ref, instance['image_ref'])
def instance_create(context, values, session=None): """Creates a new VM instance in the Database""" power_specs = values.pop('power_specs', None) inst_ref = db_api.instance_create(context, values) #If there were PowerSpecs provided, then insert them now if power_specs is not None: instance_power_specs_create( context, inst_ref['uuid'], power_specs, session) #Query the Instance again to make sure the PowerSpecs is populated inst_ref = db_api.instance_get_by_uuid(context, inst_ref['uuid']) return inst_ref
def _instance_merge_metadata(context, instance_uuid, values): """Helper Method to Merge Partial Metadata into existing""" system_metadata = values.get('system_metadata', {}) partial_metadata = system_metadata.pop('updates_only', False) #If they gave us only Partial System MetaData, we need to merge if partial_metadata: old_inst = db_api.instance_get_by_uuid(context, instance_uuid) #Convert the MetaData on the Instance from a List to a Dictionary old_metal = old_inst.get('system_metadata', []) old_metad = dict([(itm['key'], itm['value']) for itm in old_metal]) #Add in the new MetaData over top of the all of the old MetaData old_metad.update(system_metadata) values['system_metadata'] = old_metad
def instance_update(context, instance_uuid, values, session=None): """Updates an existing VM instance in the Database""" values = copy.deepcopy(values) power_specs = values.pop('power_specs', None) #Merge in the existing MetaData if they asked for Partial Updates _instance_merge_metadata(context, instance_uuid, values) #Delegate to the OpenStack method to actually update the Instance inst_ref = db_api.instance_update(context, instance_uuid, values) #If there were PowerSpecs provided, then insert them now if power_specs is not None and inst_ref is not None: instance_power_specs_update( context, instance_uuid, power_specs, session) #Query the Instance again to make sure the PowerSpecs is populated inst_ref = db_api.instance_get_by_uuid(context, inst_ref['uuid']) return inst_ref
def get_marker_record(self, ctx, marker): try: im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker) except exception.InstanceMappingNotFound: raise exception.MarkerNotFound(marker=marker) elevated = ctx.elevated(read_deleted='yes') with context.target_cell(elevated, im.cell_mapping) as cctx: try: # NOTE(danms): We query this with no columns_to_join() # as we're just getting values for the sort keys from # it and none of the valid sort keys are on joined # columns. db_inst = db.instance_get_by_uuid(cctx, marker, columns_to_join=[]) except exception.InstanceNotFound: raise exception.MarkerNotFound(marker=marker) return db_inst