def archive_deleted_rows(self, max_rows): """Move up to max_rows deleted rows from production tables to shadow tables. """ if max_rows is not None: max_rows = int(max_rows) if max_rows < 0: print _("Must supply a positive value for max_rows") return(1) admin_context = context.get_admin_context() db.archive_deleted_rows(admin_context, max_rows)
def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server_id) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Now try and archive the soft deleted records. results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results)
def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server["id"] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), "No instance actions for server: %s" % server_id) self._delete_server(server_id) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted="yes") # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), "No system_metadata for instance: %s" % server_id) # Now try and archive the soft deleted records. results = db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn("instance_system_metadata", results) self.assertEqual(len(instance.system_metadata), results["instance_system_metadata"]) # FIXME(mriedem): we fail to archive instances because of a fkey # referential constraint error with instance_actions not being deleted self.assertNotIn("instances", results) # FIXME(mriedem): instance_actions aren't soft deleted so they aren't # archived, which we need to fix. self.assertNotIn("instance_actions", results)
def test_archive_then_purge_by_date(self): server = self._create_server() server_id = server['id'] self._delete_server(server_id) results, deleted_ids = db.archive_deleted_rows(max_rows=1000) self.assertEqual([server_id], deleted_ids) pre_purge_results = self._get_table_counts() past = timeutils.utcnow() - datetime.timedelta(hours=1) admin_context = context.get_admin_context() deleted = sqlalchemy_api.purge_shadow_tables(admin_context, past) # Make sure we didn't delete anything if the marker is before # we started self.assertEqual(0, deleted) results = self._get_table_counts() # Nothing should be changed if we didn't purge anything self.assertEqual(pre_purge_results, results) future = timeutils.utcnow() + datetime.timedelta(hours=1) deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future) # Make sure we deleted things when the marker is after # we started self.assertNotEqual(0, deleted) results = self._get_table_counts() # There should be no rows in any table if we purged everything self.assertFalse(any(results.values()))
def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. server = self._create_server() server_id = server['id'] # Assert that there are instance_actions. instance_actions are # interesting since we don't soft delete them but they have a foreign # key back to the instances table. actions = self.api.get_instance_actions(server_id) self.assertTrue(len(actions), 'No instance actions for server: %s' % server_id) self._delete_server(server_id) # Verify we have the soft deleted instance in the database. admin_context = context.get_admin_context(read_deleted='yes') # This will raise InstanceNotFound if it's not found. instance = db.instance_get_by_uuid(admin_context, server_id) # Make sure it's soft deleted. self.assertNotEqual(0, instance.deleted) # Verify we have some system_metadata since we'll check that later. self.assertTrue(len(instance.system_metadata), 'No system_metadata for instance: %s' % server_id) # Now try and archive the soft deleted records. results = db.archive_deleted_rows(max_rows=100) # verify system_metadata was dropped self.assertIn('instance_system_metadata', results) self.assertEqual(len(instance.system_metadata), results['instance_system_metadata']) # Verify that instances rows are dropped self.assertIn('instances', results) # Verify that instance_actions and actions_event are dropped # by the archive self.assertIn('instance_actions', results) self.assertIn('instance_actions_events', results)
def test_purge_with_real_date(self): """Make sure the result of dateutil's parser works with the query we're making to sqlalchemy. """ server = self._create_server() server_id = server['id'] self._delete_server(server_id) results, deleted_ids = db.archive_deleted_rows(max_rows=1000) self.assertEqual([server_id], deleted_ids) date = dateutil_parser.parse('oct 21 2015', fuzzy=True) admin_context = context.get_admin_context() deleted = sqlalchemy_api.purge_shadow_tables(admin_context, date) self.assertEqual(0, deleted)
def test_archive_then_purge_all(self): server = self._create_server() server_id = server['id'] self._delete_server(server_id) results, deleted_ids = db.archive_deleted_rows(max_rows=1000) self.assertEqual([server_id], deleted_ids) lines = [] def status(msg): lines.append(msg) admin_context = context.get_admin_context() deleted = sqlalchemy_api.purge_shadow_tables(admin_context, None, status_fn=status) self.assertNotEqual(0, deleted) self.assertNotEqual(0, len(lines)) for line in lines: self.assertIsNotNone(re.match(r'Deleted [1-9][0-9]* rows from .*', line)) results = self._get_table_counts() # No table should have any rows self.assertFalse(any(results.values()))