Exemplo n.º 1
0
    def setUp(self):
        super(IntegrationTestAutomaticEmails, self).setUp()
        # prep a fake table
        now = utc_now() - datetime.timedelta(minutes=30)
        last_month = now - datetime.timedelta(days=31)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)
            # clear the indices cache so the index is created on every test
            storage.indices_cache = set()

            storage.save_processed({
                'uuid': '1',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'unknown'
                    }
                }
            })
            storage.save_processed({
                'uuid': '2',
                'email': '"Quidam" <*****@*****.**>',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': None
                    }
                }
            })
            storage.save_processed({
                'uuid': '3',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'bitguard'
                    }
                }
            })
            storage.save_processed({
                'uuid': '4',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '5',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '6',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '7',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '8',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '9',
                'email': '*****@*****.**',
                'product': 'EarthRaccoon',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '18',
                'email': 'z\xc3\[email protected]',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Let's insert a duplicate
            storage.save_processed({
                'uuid': '10',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # And let's insert some invalid crashes
            storage.save_processed({
                'uuid': '11',
                'email': None,
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '12',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': last_month
            })
            storage.save_processed({
                'uuid': '13',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '14',
                'email': '*****@*****.**',
                'product': 'WindBear',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Finally some invalid email addresses
            storage.save_processed({
                'uuid': '15',
                'email': '     ',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '16',
                'email': 'invalid@email',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '17',
                'email': 'i.do.not.work',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Create some email addresses.
            storage.create_emails_index()
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': last_month
                },
                id='*****@*****.**',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '"Quidam" <*****@*****.**>',
                    'last_sending': last_month
                },
                id='"Quidam" <*****@*****.**>',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': now
                },
                id='*****@*****.**',
            )

            # As indexing is asynchronous, we need to force elasticsearch to
            # make the newly created content searchable before we run the
            # tests.
            storage.es.refresh()
Exemplo n.º 2
0
    def setUp(self):
        super(IntegrationTestAutomaticEmails, self).setUp()
        # prep a fake table
        now = utc_now() - datetime.timedelta(minutes=30)
        last_month = now - datetime.timedelta(days=31)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)
            # clear the indices cache so the index is created on every test
            storage.indices_cache = set()

            storage.save_processed({
                'uuid': '1',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'unknown'
                    }
                }
            })
            storage.save_processed({
                'uuid': '2',
                'email': '"Quidam" <*****@*****.**>',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': None
                    }
                }
            })
            storage.save_processed({
                'uuid': '3',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'bitguard'
                    }
                }
            })
            storage.save_processed({
                'uuid': '4',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '5',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '6',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '7',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '8',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '9',
                'email': '*****@*****.**',
                'product': 'EarthRaccoon',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '18',
                'email': 'z\xc3\[email protected]',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Let's insert a duplicate
            storage.save_processed({
                'uuid': '10',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # And let's insert some invalid crashes
            storage.save_processed({
                'uuid': '11',
                'email': None,
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '12',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': last_month
            })
            storage.save_processed({
                'uuid': '13',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '14',
                'email': '*****@*****.**',
                'product': 'WindBear',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Finally some invalid email addresses
            storage.save_processed({
                'uuid': '15',
                'email': '     ',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '16',
                'email': 'invalid@email',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '17',
                'email': 'i.do.not.work',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Create some email addresses.
            storage.create_emails_index()
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': last_month
                },
                id='*****@*****.**',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '"Quidam" <*****@*****.**>',
                    'last_sending': last_month
                },
                id='"Quidam" <*****@*****.**>',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': now
                },
                id='*****@*****.**',
            )

            # As indexing is asynchronous, we need to force elasticsearch to
            # make the newly created content searchable before we run the
            # tests.
            storage.es.refresh()
Exemplo n.º 3
0
class IntegrationTestIndexCleaner(ElasticSearchTestCase):
    def __init__(self, *args, **kwargs):
        super(IntegrationTestIndexCleaner, self).__init__(*args, **kwargs)

        storage_config = self._setup_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def setUp(self):
        self.indices = []

    def tearDown(self):
        # Clean up created indices.
        for index in self.indices:
            try:
                self.storage.es.delete_index(index)
            # "Missing" indices have already been deleted, no need to worry.
            except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
                pass

        super(IntegrationTestIndexCleaner, self).tearDown()

    def _setup_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        cleaner_conf = IndexCleaner.get_required_config()
        cleaner_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager([storage_conf, cleaner_conf],
                                    values_source_list=[environment],
                                    argv_source=[])

    @maximum_es_version('0.90')
    def test_correct_indices_are_deleted(self):
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.indices.append('socorro200142')

            self.storage.create_index('socorro200000', {})
            self.indices.append('socorro200000')

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            self.indices.append('socorro200201_20030101')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1))
            self.storage.create_index('socorro_some_aliased_index', {})
            self.indices.append('socorro_some_aliased_index')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})
            self.indices.append(now_index)

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200142')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200000')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200201')

    @maximum_es_version('0.90')
    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()
            self.indices.append('socorro_emails')

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')
Exemplo n.º 4
0
class IntegrationTestIndexCleaner(ElasticSearchTestCase):

    def __init__(self, *args, **kwargs):
        super(
            IntegrationTestIndexCleaner,
            self
        ).__init__(*args, **kwargs)

        storage_config = self._setup_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def setUp(self):
        self.indices = []

    def tearDown(self):
        # Clean up created indices.
        for index in self.indices:
            try:
                self.storage.es.delete_index(index)
            # "Missing" indices have already been deleted, no need to worry.
            except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
                pass

        super(IntegrationTestIndexCleaner, self).tearDown()

    def _setup_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        cleaner_conf = IndexCleaner.get_required_config()
        cleaner_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager(
            [storage_conf, cleaner_conf],
            values_source_list=[environment],
            argv_source=[]
        )

    @maximum_es_version('0.90')
    def test_correct_indices_are_deleted(self):
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.indices.append('socorro200142')

            self.storage.create_index('socorro200000', {})
            self.indices.append('socorro200000')

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            self.indices.append('socorro200201_20030101')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1)
            )
            self.storage.create_index('socorro_some_aliased_index', {})
            self.indices.append('socorro_some_aliased_index')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})
            self.indices.append(now_index)

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200142'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200000'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200201'
            )

    @maximum_es_version('0.90')
    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()
            self.indices.append('socorro_emails')

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')
Exemplo n.º 5
0
class IntegrationTestElasticsearchCleanup(IntegrationTestBase):

    def _setup_config_manager(self):
        return get_config_manager_for_crontabber(
            jobs='socorro.cron.jobs.elasticsearch_cleanup.'
                'ElasticsearchCleanupCronApp|30d',
        )

    def __init__(self, *args, **kwargs):
        super(
            IntegrationTestElasticsearchCleanup,
            self
        ).__init__(*args, **kwargs)

        storage_config = self._setup_storage_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def tearDown(self):
        # Clean up created indices.
        self.storage.es.delete_index('socorro*')
        super(IntegrationTestElasticsearchCleanup, self).tearDown()

    def _setup_storage_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager(
            [storage_conf],
            values_source_list=[os.environ],
            argv_source=[]
        )

    def test_right_indices_are_deleted(self):
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.storage.create_index('socorro200000', {})

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1)
            )
            self.storage.create_index('socorro_some_aliased_index', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200142'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200000'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200201'
            )

    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')
class IntegrationTestElasticsearchCleanup(IntegrationTestBase):
    def _setup_config_manager(self):
        return get_config_manager_for_crontabber(
            jobs='socorro.cron.jobs.elasticsearch_cleanup.'
            'ElasticsearchCleanupCronApp|30d', )

    def __init__(self, *args, **kwargs):
        super(IntegrationTestElasticsearchCleanup,
              self).__init__(*args, **kwargs)

        storage_config = self._setup_storage_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def tearDown(self):
        # Clean up created indices.
        self.storage.es.delete_index('socorro*')
        super(IntegrationTestElasticsearchCleanup, self).tearDown()

    def _setup_storage_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager([storage_conf],
                                    values_source_list=[os.environ],
                                    argv_source=[])

    def test_right_indices_are_deleted(self):
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.storage.create_index('socorro200000', {})

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1))
            self.storage.create_index('socorro_some_aliased_index', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200142')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200000')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200201')

    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')