Пример #1
0
 def test_archive_deleted_rows_with_undeleted_residue(self):
     # Boots a server, deletes it, and then tries to archive it.
     server = self._create_server()
     server_id = server['id']
     # Assert that there are instance_actions. instance_actions are
     # interesting since we don't soft delete them but they have a foreign
     # key back to the instances table.
     actions = self.api.get_instance_actions(server_id)
     self.assertTrue(len(actions),
                     'No instance actions for server: %s' % server_id)
     self._delete_server(server)
     # Verify we have the soft deleted instance in the database.
     admin_context = context.get_admin_context(read_deleted='yes')
     # This will raise InstanceNotFound if it's not found.
     instance = db.instance_get_by_uuid(admin_context, server_id)
     # Make sure it's soft deleted.
     self.assertNotEqual(0, instance.deleted)
     # Undelete the instance_extra record to make sure we delete it anyway
     extra = db.instance_extra_get_by_instance_uuid(admin_context,
                                                    instance.uuid)
     self.assertNotEqual(0, extra.deleted)
     db.instance_extra_update_by_uuid(admin_context, instance.uuid,
                                      {'deleted': 0})
     extra = db.instance_extra_get_by_instance_uuid(admin_context,
                                                    instance.uuid)
     self.assertEqual(0, extra.deleted)
     # Verify we have some system_metadata since we'll check that later.
     self.assertTrue(len(instance.system_metadata),
                     'No system_metadata for instance: %s' % server_id)
     # Create a pci_devices record to simulate an instance that had a PCI
     # device allocated at the time it was deleted. There is a window of
     # time between deletion of the instance record and freeing of the PCI
     # device in nova-compute's _complete_deletion method during RT update.
     db.pci_device_update(
         admin_context, 1, 'fake-address', {
             'compute_node_id': 1,
             'address': 'fake-address',
             'vendor_id': 'fake',
             'product_id': 'fake',
             'dev_type': 'fake',
             'label': 'fake',
             'status': 'allocated',
             'instance_uuid': instance.uuid
         })
     # Now try and archive the soft deleted records.
     results, deleted_instance_uuids, archived = \
         db.archive_deleted_rows(max_rows=100)
     # verify system_metadata was dropped
     self.assertIn('instance_system_metadata', results)
     self.assertEqual(len(instance.system_metadata),
                      results['instance_system_metadata'])
     # Verify that instances rows are dropped
     self.assertIn('instances', results)
     # Verify that instance_actions and actions_event are dropped
     # by the archive
     self.assertIn('instance_actions', results)
     self.assertIn('instance_actions_events', results)
     self.assertEqual(sum(results.values()), archived)
     # Verify that the pci_devices record has not been dropped
     self.assertNotIn('pci_devices', results)
Пример #2
0
    def test_archive_then_purge_all(self):
        server = self._create_server()
        server_id = server['id']
        self._delete_server(server_id)
        results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
        self.assertEqual([server_id], deleted_ids)

        lines = []

        def status(msg):
            lines.append(msg)

        admin_context = context.get_admin_context()
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
                                                     None,
                                                     status_fn=status)
        self.assertNotEqual(0, deleted)
        self.assertNotEqual(0, len(lines))
        for line in lines:
            self.assertIsNotNone(
                re.match(r'Deleted [1-9][0-9]* rows from .*', line))

        results = self._get_table_counts()
        # No table should have any rows
        self.assertFalse(any(results.values()))
Пример #3
0
 def test_archive_deleted_rows(self):
     # Boots a server, deletes it, and then tries to archive it.
     server = self._create_server()
     server_id = server['id']
     # Assert that there are instance_actions. instance_actions are
     # interesting since we don't soft delete them but they have a foreign
     # key back to the instances table.
     actions = self.api.get_instance_actions(server_id)
     self.assertTrue(len(actions),
                     'No instance actions for server: %s' % server_id)
     self._delete_server(server)
     # Verify we have the soft deleted instance in the database.
     admin_context = context.get_admin_context(read_deleted='yes')
     # This will raise InstanceNotFound if it's not found.
     instance = db.instance_get_by_uuid(admin_context, server_id)
     # Make sure it's soft deleted.
     self.assertNotEqual(0, instance.deleted)
     # Verify we have some system_metadata since we'll check that later.
     self.assertTrue(len(instance.system_metadata),
                     'No system_metadata for instance: %s' % server_id)
     # Now try and archive the soft deleted records.
     results, deleted_instance_uuids, archived = \
         db.archive_deleted_rows(max_rows=100)
     # verify system_metadata was dropped
     self.assertIn('instance_system_metadata', results)
     self.assertEqual(len(instance.system_metadata),
                      results['instance_system_metadata'])
     # Verify that instances rows are dropped
     self.assertIn('instances', results)
     # Verify that instance_actions and actions_event are dropped
     # by the archive
     self.assertIn('instance_actions', results)
     self.assertIn('instance_actions_events', results)
     self.assertEqual(sum(results.values()), archived)
Пример #4
0
    def test_archive_then_purge_by_date(self):
        server = self._create_server()
        server_id = server['id']
        self._delete_server(server_id)
        results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
        self.assertEqual([server_id], deleted_ids)

        pre_purge_results = self._get_table_counts()

        past = timeutils.utcnow() - datetime.timedelta(hours=1)
        admin_context = context.get_admin_context()
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context, past)
        # Make sure we didn't delete anything if the marker is before
        # we started
        self.assertEqual(0, deleted)

        results = self._get_table_counts()
        # Nothing should be changed if we didn't purge anything
        self.assertEqual(pre_purge_results, results)

        future = timeutils.utcnow() + datetime.timedelta(hours=1)
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
        # Make sure we deleted things when the marker is after
        # we started
        self.assertNotEqual(0, deleted)

        results = self._get_table_counts()
        # There should be no rows in any table if we purged everything
        self.assertFalse(any(results.values()))
Пример #5
0
    def test_archive_then_purge_by_date(self):
        server = self._create_server()
        server_id = server['id']
        self._delete_server(server_id)
        results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
        self.assertEqual([server_id], deleted_ids)

        pre_purge_results = self._get_table_counts()

        past = timeutils.utcnow() - datetime.timedelta(hours=1)
        admin_context = context.get_admin_context()
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
                                                     past)
        # Make sure we didn't delete anything if the marker is before
        # we started
        self.assertEqual(0, deleted)

        results = self._get_table_counts()
        # Nothing should be changed if we didn't purge anything
        self.assertEqual(pre_purge_results, results)

        future = timeutils.utcnow() + datetime.timedelta(hours=1)
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
        # Make sure we deleted things when the marker is after
        # we started
        self.assertNotEqual(0, deleted)

        results = self._get_table_counts()
        # There should be no rows in any table if we purged everything
        self.assertFalse(any(results.values()))
Пример #6
0
 def test_archive_deleted_rows(self):
     # Boots a server, deletes it, and then tries to archive it.
     server = self._create_server()
     server_id = server['id']
     # Assert that there are instance_actions. instance_actions are
     # interesting since we don't soft delete them but they have a foreign
     # key back to the instances table.
     actions = self.api.get_instance_actions(server_id)
     self.assertTrue(len(actions),
                     'No instance actions for server: %s' % server_id)
     self._delete_server(server_id)
     # Verify we have the soft deleted instance in the database.
     admin_context = context.get_admin_context(read_deleted='yes')
     # This will raise InstanceNotFound if it's not found.
     instance = db.instance_get_by_uuid(admin_context, server_id)
     # Make sure it's soft deleted.
     self.assertNotEqual(0, instance.deleted)
     # Verify we have some system_metadata since we'll check that later.
     self.assertTrue(len(instance.system_metadata),
                     'No system_metadata for instance: %s' % server_id)
     # Now try and archive the soft deleted records.
     results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
     # verify system_metadata was dropped
     self.assertIn('instance_system_metadata', results)
     self.assertEqual(len(instance.system_metadata),
                      results['instance_system_metadata'])
     # Verify that instances rows are dropped
     self.assertIn('instances', results)
     # Verify that instance_actions and actions_event are dropped
     # by the archive
     self.assertIn('instance_actions', results)
     self.assertIn('instance_actions_events', results)
Пример #7
0
 def test_fill_vifs_migration(self):
     # Create a test server.
     self._create_server(
         flavor_id=1,
         networks=[{
             'uuid': nova_fixtures.NeutronFixture.network_1['id'],
         }],
     )
     # Run the online data migration which will create a (soft-deleted)
     # marker record.
     ctxt = nova_context.get_admin_context()
     virtual_interface.fill_virtual_interface_list(ctxt, max_count=50)
     # Now archive the deleted instance record.
     # The following (archive stuff) is used to prove that the migration
     # created a "fake instance". It is not necessary to trigger the bug.
     table_to_rows_archived, deleted_instance_uuids, total_rows_archived = (
         db_api.archive_deleted_rows(max_rows=1000))
     self.assertIn('instances', table_to_rows_archived)
     self.assertEqual(1, table_to_rows_archived['instances'])
     self.assertEqual(1, len(deleted_instance_uuids))
     self.assertEqual(virtual_interface.FAKE_UUID,
                      deleted_instance_uuids[0])
     # Since the above (archive stuff) removed the fake instance, do the
     # migration again to recreate it so we can exercise the code path.
     virtual_interface.fill_virtual_interface_list(ctxt, max_count=50)
     # Now list deleted servers. The fake marker instance should be excluded
     # from the API results.
     for detail in (True, False):
         servers = self.api.get_servers(detail=detail,
                                        search_opts={
                                            'all_tenants': 1,
                                            'deleted': 1
                                        })
         self.assertEqual(0, len(servers))
Пример #8
0
 def test_update_available_resource_node_recreate(self):
     # First we create a compute service to manage a couple of fake nodes.
     compute = self.start_service('compute', 'node1')
     # When start_service runs, it will create the node1 ComputeNode.
     compute.manager.driver._set_nodes(['node1', 'node2'])
     # Run the update_available_resource periodic to register node2.
     ctxt = context.get_admin_context()
     compute.manager.update_available_resource(ctxt)
     # Make sure no compute nodes were orphaned or deleted.
     self.assertNotIn('Deleting orphan compute node',
                      self.stdlog.logger.output)
     # Now we should have two compute nodes, make sure the hypervisors API
     # shows them.
     hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
     self.assertEqual(2, len(hypervisors), hypervisors)
     self.assertEqual({'node1', 'node2'},
                      set([hyp['hypervisor_hostname']
                           for hyp in hypervisors]))
     # Now stub the driver to only report node1. This is making it look like
     # node2 is no longer available when update_available_resource runs.
     compute.manager.driver._nodes = ['node1']
     ctxt = context.get_admin_context()
     compute.manager.update_available_resource(ctxt)
     # node2 should have been deleted, check the logs and API.
     log = self.stdlog.logger.output
     self.assertIn('Deleting orphan compute node', log)
     self.assertIn('hypervisor host is node2', log)
     hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
     self.assertEqual(1, len(hypervisors), hypervisors)
     self.assertEqual('node1', hypervisors[0]['hypervisor_hostname'])
     # But the node2 ComputeNode is still in the database with deleted!=0.
     with utils.temporary_mutation(ctxt, read_deleted='yes'):
         cn = objects.ComputeNode.get_by_host_and_nodename(
             ctxt, 'node1', 'node2')
         self.assertTrue(cn.deleted)
     # Now stub the driver again to report node2 as being back and run
     # the periodic task.
     compute.manager.driver._nodes = ['node1', 'node2']
     LOG.info('Running update_available_resource which should bring back '
              'node2.')
     compute.manager.update_available_resource(ctxt)
     # The DBDuplicateEntry error should have been handled and resulted in
     # updating the (soft) deleted record to no longer be deleted.
     log = self.stdlog.logger.output
     self.assertNotIn('DBDuplicateEntry', log)
     # Should have two reported hypervisors again.
     hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
     self.assertEqual(2, len(hypervisors), hypervisors)
     # Now that the node2 record was un-soft-deleted, archiving should not
     # remove any compute_nodes.
     LOG.info('Archiving the database.')
     archived = db_api.archive_deleted_rows(1000)[0]
     self.assertNotIn('compute_nodes', archived)
     cn2 = objects.ComputeNode.get_by_host_and_nodename(
         ctxt, 'node1', 'node2')
     self.assertFalse(cn2.deleted)
     self.assertIsNone(cn2.deleted_at)
     # The node2 id and uuid should not have changed in the DB.
     self.assertEqual(cn.id, cn2.id)
     self.assertEqual(cn.uuid, cn2.uuid)
Пример #9
0
    def test_archive_then_purge_by_date(self):
        # Enable the generation of task_log records by the instance usage audit
        # nova-compute periodic task.
        self.flags(instance_usage_audit=True)
        compute = self.computes['compute']

        # Simulate a server that was created 30 days ago, needed to test the
        # task_log coverage. The task_log audit period defaults to 1 month, so
        # for a server to appear in the task_log, it must have been active
        # during the previous calendar month.
        month_ago = timeutils.utcnow() - datetime.timedelta(days=30)
        with osloutils_fixture.TimeFixture(month_ago):
            server = self._create_server()

        server_id = server['id']
        admin_context = context.get_admin_context()

        # task_log records are generated by the _instance_usage_audit
        # periodic task.
        compute.manager._instance_usage_audit(admin_context)
        # Audit period defaults to 1 month, the last audit period will
        # be the previous calendar month.
        begin, end = nova_utils.last_completed_audit_period()
        # Verify that we have 1 task_log record per audit period.
        task_logs = objects.TaskLogList.get_all(admin_context,
                                                'instance_usage_audit', begin,
                                                end)
        self.assertEqual(1, len(task_logs))

        # Delete the server and archive deleted rows.
        self._delete_server(server)
        results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000,
                                                                 task_log=True)
        self.assertEqual([server_id], deleted_ids)
        self.assertEqual(sum(results.values()), archived)

        pre_purge_results = self._get_table_counts()

        # Make sure we didn't delete anything if the marker is before
        # we started
        past = timeutils.utcnow() - datetime.timedelta(days=31)
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context, past)
        self.assertEqual(0, deleted)

        # Nothing should be changed if we didn't purge anything
        results = self._get_table_counts()
        self.assertEqual(pre_purge_results, results)

        # Make sure we deleted things when the marker is after
        # we started
        future = timeutils.utcnow() + datetime.timedelta(hours=1)
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
        self.assertNotEqual(0, deleted)

        # There should be no rows in any table if we purged everything
        results = self._get_table_counts()
        self.assertFalse(any(results.values()))
Пример #10
0
 def test_purge_with_real_date(self):
     """Make sure the result of dateutil's parser works with the
        query we're making to sqlalchemy.
     """
     server = self._create_server()
     server_id = server['id']
     self._delete_server(server_id)
     results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
     self.assertEqual([server_id], deleted_ids)
     date = dateutil_parser.parse('oct 21 2015', fuzzy=True)
     admin_context = context.get_admin_context()
     deleted = sqlalchemy_api.purge_shadow_tables(admin_context, date)
     self.assertEqual(0, deleted)
Пример #11
0
 def test_purge_with_real_date(self):
     """Make sure the result of dateutil's parser works with the
        query we're making to sqlalchemy.
     """
     server = self._create_server()
     server_id = server['id']
     self._delete_server(server_id)
     results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
     self.assertEqual([server_id], deleted_ids)
     date = dateutil_parser.parse('oct 21 2015', fuzzy=True)
     admin_context = context.get_admin_context()
     deleted = sqlalchemy_api.purge_shadow_tables(admin_context, date)
     self.assertEqual(0, deleted)
Пример #12
0
    def test_archive_then_purge_all(self):
        # Enable the generation of task_log records by the instance usage audit
        # nova-compute periodic task.
        self.flags(instance_usage_audit=True)
        compute = self.computes['compute']

        server = self._create_server()
        server_id = server['id']

        admin_context = context.get_admin_context()
        future = timeutils.utcnow() + datetime.timedelta(days=30)

        with osloutils_fixture.TimeFixture(future):
            # task_log records are generated by the _instance_usage_audit
            # periodic task.
            compute.manager._instance_usage_audit(admin_context)
            # Audit period defaults to 1 month, the last audit period will
            # be the previous calendar month.
            begin, end = nova_utils.last_completed_audit_period()
            # Verify that we have 1 task_log record per audit period.
            task_logs = objects.TaskLogList.get_all(admin_context,
                                                    'instance_usage_audit',
                                                    begin, end)
            self.assertEqual(1, len(task_logs))

        self._delete_server(server)
        results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000,
                                                                 task_log=True)
        self.assertEqual([server_id], deleted_ids)

        lines = []

        def status(msg):
            lines.append(msg)

        deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
                                                     None,
                                                     status_fn=status)
        self.assertNotEqual(0, deleted)
        self.assertNotEqual(0, len(lines))
        self.assertEqual(sum(results.values()), archived)
        for line in lines:
            self.assertIsNotNone(
                re.match(r'Deleted [1-9][0-9]* rows from .*', line))
        # Ensure we purged task_log records.
        self.assertIn('shadow_task_log', str(lines))

        results = self._get_table_counts()
        # No table should have any rows
        self.assertFalse(any(results.values()))
Пример #13
0
    def test_archive_deleted_rows_incomplete(self):
        """This tests a scenario where archive_deleted_rows is run with
        --max_rows and does not run to completion.

        That is, the archive is stopped before all archivable records have been
        archived. Specifically, the problematic state is when a single instance
        becomes partially archived (example: 'instance_extra' record for one
        instance has been archived while its 'instances' record remains). Any
        access of the instance (example: listing deleted instances) that
        triggers the retrieval of a dependent record that has been archived
        away, results in undefined behavior that may raise an error.

        We will force the system into a state where a single deleted instance
        is partially archived. We want to verify that we can, for example,
        successfully do a GET /servers/detail at any point between partial
        archive_deleted_rows runs without errors.
        """
        # Boots a server, deletes it, and then tries to archive it.
        server = self._create_server()
        server_id = server['id']
        # Assert that there are instance_actions. instance_actions are
        # interesting since we don't soft delete them but they have a foreign
        # key back to the instances table.
        actions = self.api.get_instance_actions(server_id)
        self.assertTrue(len(actions),
                        'No instance actions for server: %s' % server_id)
        self._delete_server(server)
        # Archive deleted records iteratively, 1 row at a time, and try to do a
        # GET /servers/detail between each run. All should succeed.
        exceptions = []
        while True:
            _, _, archived = db.archive_deleted_rows(max_rows=1)
            try:
                # Need to use the admin API to list deleted servers.
                self.admin_api.get_servers(search_opts={'deleted': True})
            except Exception as ex:
                exceptions.append(ex)
            if archived == 0:
                break
        # FIXME(melwitt): OrphanedObjectError is raised because of the bug.
        self.assertTrue(exceptions)
        for ex in exceptions:
            self.assertEqual(500, ex.response.status_code)
            self.assertIn('OrphanedObjectError', str(ex))
Пример #14
0
    def test_archive_then_purge_all(self):
        server = self._create_server()
        server_id = server['id']
        self._delete_server(server_id)
        results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
        self.assertEqual([server_id], deleted_ids)

        lines = []

        def status(msg):
            lines.append(msg)

        admin_context = context.get_admin_context()
        deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
                                                     None, status_fn=status)
        self.assertNotEqual(0, deleted)
        self.assertNotEqual(0, len(lines))
        for line in lines:
            self.assertIsNotNone(re.match(r'Deleted [1-9][0-9]* rows from .*',
                                          line))

        results = self._get_table_counts()
        # No table should have any rows
        self.assertFalse(any(results.values()))