def __init__(self, *args, **kwargs):
        super(IntegrationTestElasticsearchCleanup,
              self).__init__(*args, **kwargs)

        storage_config = self._setup_storage_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)
예제 #2
0
    def test_sending_many_emails(self, exacttarget_mock):
        """Test that we can send emails to a lot of users in the same run. """

        # First add a lot of emails.
        now = utc_now() - datetime.timedelta(minutes=30)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)

            for i in range(21):
                storage.save_processed({
                    'uuid': 'fake-%s' % i,
                    'email': '*****@*****.**' % i,
                    'product': 'WaterWolf',
                    'version': '20.0',
                    'release_channel': 'Release',
                    'date_processed': now,
                })

            storage.es.refresh()

        config_manager = self._setup_simple_config()
        with config_manager.context() as config:
            job = automatic_emails.AutomaticEmailsCronApp(config, '')
            job.run(utc_now())

            et_mock = exacttarget_mock.return_value
            # Verify that we have the default 4 results + the 21 we added.
            self.assertEqual(et_mock.trigger_send.call_count, 25)
예제 #3
0
    def test_failure_limited_retry(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
                'timeout': 0,
                'backoff_delays': [0, 0, 0],
                'transaction_executor_class':
                    TransactionExecutorWithLimitedBackoff
            }],
            argv_source=[]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)

            failure_exception = pyelasticsearch.exceptions.Timeout
            mock_es.index.side_effect = failure_exception

            crash_id = a_processed_crash['uuid']

            assert_raises(
                pyelasticsearch.exceptions.Timeout,
                es_storage.save_raw_and_processed,
                a_raw_crash,
                None,
                a_processed_crash.copy(),
                crash_id,
            )

            expected_crash = {
                'crash_id': crash_id,
                'processed_crash': a_processed_crash.copy(),
                'raw_crash': a_raw_crash
            }

            expected_request_args = (
                'socorro201214',
                'crash_reports',
                expected_crash
            )
            expected_request_kwargs = {
                'id': crash_id,
            }

            mock_es.index.assert_called_with(
                *expected_request_args,
                **expected_request_kwargs
            )
예제 #4
0
    def test_sending_many_emails(self, exacttarget_mock):
        """Test that we can send emails to a lot of users in the same run. """

        # First add a lot of emails.
        now = utc_now() - datetime.timedelta(minutes=30)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)

            for i in range(21):
                storage.save_processed({
                    'uuid': 'fake-%s' % i,
                    'email': '*****@*****.**' % i,
                    'product': 'WaterWolf',
                    'version': '20.0',
                    'release_channel': 'Release',
                    'date_processed': now,
                })

            storage.es.refresh()

        config_manager = self._setup_simple_config()
        with config_manager.context() as config:
            job = automatic_emails.AutomaticEmailsCronApp(config, '')
            job.run(utc_now())

            et_mock = exacttarget_mock.return_value
            # Verify that we have the default 4 results + the 21 we added.
            eq_(et_mock.trigger_send.call_count, 25)
예제 #5
0
    def test_success(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger':
                mock_logging,
                'elasticsearch_urls':
                'http://elasticsearch_host:9200',
            }])

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            es_storage.save_processed(a_processed_crash)

            expected_request_args = ('socorro201214', 'crash_reports',
                                     a_processed_crash)
            expected_request_kwargs = {
                'replication': 'async',
                'id': a_processed_crash['uuid'],
            }

            mock_es.index.assert_called_with(*expected_request_args,
                                             **expected_request_kwargs)
예제 #6
0
    def test_success_after_limited_retry(self):
        mock_logging = mock.Mock()
        required_config = ElasticSearchCrashStorage.required_config
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger':
                mock_logging,
                'submission_url':
                'http://elasticsearch_host/%s',
                'timeout':
                0,
                'backoff_delays': [0, 0, 0],
                'transaction_executor_class':
                TransactionExecutorWithLimitedBackoff
            }])

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            urllib_str = 'socorro.external.elasticsearch.crashstorage.urllib2'
            m_request = mock.Mock()
            m_urlopen = mock.Mock()
            with mock.patch(urllib_str) as mocked_urllib:
                mocked_urllib.Request = m_request
                m_request.return_value = 17
                mocked_urllib.urlopen = m_urlopen

                urlopen_results = [
                    urllib2.socket.timeout, urllib2.socket.timeout
                ]

                def urlopen_fn(*args, **kwargs):
                    try:
                        r = urlopen_results.pop(0)
                        raise r
                    except IndexError:
                        return m_urlopen

                m_urlopen.side_effect = urlopen_fn

                es_storage.save_processed(a_processed_crash)

                expected_request_args = (
                    'http://elasticsearch_host/9120408936ce666-ff3b-4c7a-9674-'
                    '367fe2120408',
                    {},
                )
                m_request.assert_called_with(*expected_request_args)
                self.assertEqual(m_urlopen.call_count, 3)
                expected_urlopen_args = (17, )
                expected_urlopen_kwargs = {'timeout': 0}
                m_urlopen.assert_called_with(*expected_urlopen_args,
                                             **expected_urlopen_kwargs)
예제 #7
0
    def test_success_after_limited_retry(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.required_config
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
                'timeout': 0,
                'backoff_delays': [0, 0, 0],
                'transaction_executor_class':
                    TransactionExecutorWithLimitedBackoff
            }]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)

            esindex_results = [pyelasticsearch.exceptions.Timeout,
                               pyelasticsearch.exceptions.Timeout]

            def esindex_fn(*args, **kwargs):
                try:
                    r = esindex_results.pop(0)
                    raise r
                except IndexError:
                    return mock_es.index

            mock_es.index.side_effect = esindex_fn

            es_storage.save_processed(a_processed_crash)

            expected_request_args = (
                'socorro201214',
                'crash_reports',
                a_processed_crash
            )
            expected_request_kwargs = {
                'replication': 'async',
                'id': a_processed_crash['uuid'],
            }

            mock_es.index.assert_called_with(
                *expected_request_args,
                **expected_request_kwargs
            )
예제 #8
0
    def test_success_after_limited_retry(self):
        mock_logging = mock.Mock()
        required_config = ElasticSearchCrashStorage.required_config
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
          [required_config],
          app_name='testapp',
          app_version='1.0',
          app_description='app description',
          values_source_list=[{
            'logger': mock_logging,
            'submission_url': 'http://elasticsearch_host/%s',
            'timeout': 0,
            'backoff_delays': [0, 0, 0],
            'transaction_executor_class': TransactionExecutorWithLimitedBackoff
          }]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            urllib_str = 'socorro.external.elasticsearch.crashstorage.urllib2'
            m_request = mock.Mock()
            m_urlopen = mock.Mock()
            with mock.patch(urllib_str) as mocked_urllib:
                mocked_urllib.Request = m_request
                m_request.return_value = 17
                mocked_urllib.urlopen = m_urlopen

                urlopen_results = [urllib2.socket.timeout,
                                   urllib2.socket.timeout]

                def urlopen_fn(*args, **kwargs):
                    try:
                        r = urlopen_results.pop(0)
                        raise r
                    except IndexError:
                        return m_urlopen

                m_urlopen.side_effect = urlopen_fn

                es_storage.save_processed(a_processed_crash)

                expected_request_args = (
                  'http://elasticsearch_host/9120408936ce666-ff3b-4c7a-9674-'
                                             '367fe2120408',
                  {},
                )
                m_request.assert_called_with(*expected_request_args)
                self.assertEqual(m_urlopen.call_count, 3)
                expected_urlopen_args = (17,)
                expected_urlopen_kwargs = {'timeout': 0}
                m_urlopen.assert_called_with(*expected_urlopen_args,
                                             **expected_urlopen_kwargs)
예제 #9
0
    def test_failure_no_retry(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger':
                mock_logging,
                'elasticsearch_urls':
                'http://elasticsearch_host:9200',
            }],
            argv_source=[])

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)

            failure_exception = Exception('horrors')
            mock_es.index.side_effect = failure_exception

            crash_id = a_processed_crash['uuid']

            self.assertRaises(
                Exception,
                es_storage.save_raw_and_processed,
                a_raw_crash,
                None,
                a_processed_crash.copy(),
                crash_id,
            )

            expected_crash = {
                'crash_id': crash_id,
                'processed_crash': a_processed_crash.copy(),
                'raw_crash': a_raw_crash
            }

            expected_request_args = ('socorro201214', 'crash_reports',
                                     expected_crash)
            expected_request_kwargs = {
                'replication': 'async',
                'id': crash_id,
            }

            mock_es.index.assert_called_with(*expected_request_args,
                                             **expected_request_kwargs)
예제 #10
0
    def test_success_after_limited_retry(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger':
                mock_logging,
                'elasticsearch_urls':
                'http://elasticsearch_host:9200',
                'timeout':
                0,
                'backoff_delays': [0, 0, 0],
                'transaction_executor_class':
                TransactionExecutorWithLimitedBackoff
            }])

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)

            esindex_results = [
                pyelasticsearch.exceptions.Timeout,
                pyelasticsearch.exceptions.Timeout
            ]

            def esindex_fn(*args, **kwargs):
                try:
                    r = esindex_results.pop(0)
                    raise r
                except IndexError:
                    return mock_es.index

            mock_es.index.side_effect = esindex_fn

            es_storage.save_processed(a_processed_crash)

            expected_request_args = ('socorro201214', 'crash_reports',
                                     a_processed_crash)
            expected_request_kwargs = {
                'replication': 'async',
                'id': a_processed_crash['uuid'],
            }

            mock_es.index.assert_called_with(*expected_request_args,
                                             **expected_request_kwargs)
예제 #11
0
    def test_success(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
            }],
            argv_source=[]
        )

        with config_manager.context() as config:
            crash_id = a_processed_crash['uuid']

            es_storage = ElasticSearchCrashStorage(config)
            es_storage.save_raw_and_processed(
                a_raw_crash,
                None,
                a_processed_crash.copy(),
                crash_id,
            )

            expected_crash = {
                'crash_id': crash_id,
                'processed_crash': a_processed_crash.copy(),
                'raw_crash': a_raw_crash
            }

            expected_request_args = (
                'socorro201214',
                'crash_reports',
                expected_crash
            )
            expected_request_kwargs = {
                'id': crash_id,
                'replication': 'async',
            }

            mock_es.index.assert_called_with(
                *expected_request_args,
                **expected_request_kwargs
            )
예제 #12
0
    def test_failure_limited_retry(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
                'timeout': 0,
                'backoff_delays': [0, 0, 0],
                'transaction_executor_class':
                    TransactionExecutorWithLimitedBackoff
            }],
            argv_source=[]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)

            failure_exception = pyelasticsearch.exceptions.Timeout
            mock_es.index.side_effect = failure_exception

            crash_id = a_processed_crash['uuid']

            assert_raises(
                pyelasticsearch.exceptions.Timeout,
                es_storage.save_raw_and_processed,
                a_raw_crash,
                None,
                a_processed_crash.copy(),
                crash_id,
            )

            expected_crash = {
                'crash_id': crash_id,
                'processed_crash': a_processed_crash.copy(),
                'raw_crash': a_raw_crash
            }

            expected_request_args = (
                'socorro201214',
                'crash_reports',
                expected_crash
            )
            expected_request_kwargs = {
                'id': crash_id,
            }

            mock_es.index.assert_called_with(
                *expected_request_args,
                **expected_request_kwargs
            )
예제 #13
0
    def __init__(self, *args, **kwargs):
        super(
            IntegrationTestIndexCleaner,
            self
        ).__init__(*args, **kwargs)

        storage_config = self._setup_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)
예제 #14
0
    def tearDown(self):
        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)
            storage.es.delete_index(config.elasticsearch_index)
            storage.es.delete_index(config.elasticsearch_emails_index)
            storage.es.flush()

        super(IntegrationTestAutomaticEmails, self).tearDown()
    def _setup_storage_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager([storage_conf],
                                    values_source_list=[os.environ],
                                    argv_source=[])
예제 #16
0
    def test_indexing(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()
        pyes_mock.exceptions.ElasticHttpNotFoundError = \
            pyelasticsearch.exceptions.ElasticHttpNotFoundError

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
            }],
            argv_source=[]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            crash_report = a_processed_crash.copy()
            crash_report['date_processed'] = '2013-01-01 10:56:41.558922'

            def create_index_fn(index, **kwargs):
                assert 'socorro20130' in index
                if index == 'socorro201301':
                    raise IndexAlreadyExistsError()

            mock_es.create_index.side_effect = create_index_fn

            # The index does not exist and is created
            es_storage.save_processed(crash_report)
            eq_(mock_es.create_index.call_count, 1)
            call_args = [
                args for args, kwargs in mock_logging.info.call_args_list
            ]
            ok_(
                ('created new elasticsearch index: %s', 'socorro201300')
                in call_args
            )

            # The index exists and is not created
            crash_report['date_processed'] = '2013-01-10 10:56:41.558922'
            es_storage.save_processed(crash_report)

            eq_(mock_es.create_index.call_count, 2)
            call_args = [
                args for args, kwargs in mock_logging.info.call_args_list
            ]
            ok_(
                ('created new elasticsearch index: %s', 'socorro201301')
                not in call_args
            )
예제 #17
0
    def test_indexing(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()
        pyes_mock.exceptions.ElasticHttpNotFoundError = \
                            pyelasticsearch.exceptions.ElasticHttpNotFoundError

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.required_config
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
            }]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            crash_report = a_processed_crash.copy()
            crash_report['date_processed'] = '2013-01-01 10:56:41.558922'

            def status_fn(index):
                assert 'socorro20130' in index
                if index == 'socorro201300':
                    raise pyelasticsearch.exceptions.ElasticHttpNotFoundError()

            mock_es.status = status_fn

            # The index does not exist and is created
            es_storage.save_processed(crash_report)
            self.assertEqual(mock_es.create_index.call_count, 1)

            # The index exists and is not created
            crash_report['date_processed'] = '2013-01-10 10:56:41.558922'
            es_storage.save_processed(crash_report)

            self.assertEqual(mock_es.create_index.call_count, 1)
예제 #18
0
    def _setup_storage_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager(
            [storage_conf],
            values_source_list=[os.environ],
            argv_source=[]
        )
예제 #19
0
    def _setup_storage_config(self):
        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock.Mock())

        return ConfigurationManager(
            [storage_conf],
            values_source_list=[{
                'elasticsearch_index': 'socorro_integration_test',
                'elasticsearch_emails_index': 'socorro_integration_test_emails'
            }],
            argv_source=[]
        )
예제 #20
0
    def _setup_storage_config(self):
        required_config = ElasticSearchCrashStorage.get_required_config()

        overrides = {
            'elasticsearch_index': 'socorro_integration_test',
            'elasticsearch_emails_index': 'socorro_integration_test_emails',
            'elasticsearch_timeout': 5,
            'backoff_delays': [1],
        }

        return get_config_manager_for_crontabber(
            more_definitions=required_config, overrides=overrides)
예제 #21
0
    def test_success(self):
        mock_logging = mock.Mock()
        required_config = ElasticSearchCrashStorage.required_config
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger':
                mock_logging,
                'submission_url':
                'http://elasticsearch_host/%s'
            }])

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            urllib_str = 'socorro.external.elasticsearch.crashstorage.urllib2'
            m_request = mock.Mock()
            m_urlopen = mock.Mock()
            with mock.patch(urllib_str) as mocked_urllib:
                mocked_urllib.Request = m_request
                m_request.return_value = 17
                mocked_urllib.urlopen = m_urlopen

                es_storage.save_processed(a_processed_crash)

                expected_request_args = (
                    'http://elasticsearch_host/9120408936ce666-ff3b-4c7a-9674-'
                    '367fe2120408',
                    {},
                )
                m_request.assert_called_with(*expected_request_args)
                expected_urlopen_args = (17, )
                expected_urlopen_kwargs = {'timeout': 2}
                m_urlopen.assert_called_with(*expected_urlopen_args,
                                             **expected_urlopen_kwargs)
예제 #22
0
    def test_success(self):
        mock_logging = mock.Mock()
        required_config = ElasticSearchCrashStorage.required_config
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
          [required_config],
          app_name='testapp',
          app_version='1.0',
          app_description='app description',
          values_source_list=[{
            'logger': mock_logging,
            'submission_url': 'http://elasticsearch_host/%s'
          }]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            urllib_str = 'socorro.external.elasticsearch.crashstorage.urllib2'
            m_request = mock.Mock()
            m_urlopen = mock.Mock()
            with mock.patch(urllib_str) as mocked_urllib:
                mocked_urllib.Request = m_request
                m_request.return_value = 17
                mocked_urllib.urlopen = m_urlopen

                es_storage.save_processed(a_processed_crash)

                expected_request_args = (
                  'http://elasticsearch_host/9120408936ce666-ff3b-4c7a-9674-'
                                             '367fe2120408',
                  {},
                )
                m_request.assert_called_with(*expected_request_args)
                expected_urlopen_args = (17,)
                expected_urlopen_kwargs = {'timeout': 2}
                m_urlopen.assert_called_with(*expected_urlopen_args,
                                             **expected_urlopen_kwargs)
예제 #23
0
    def _setup_storage_config(self):
        required_config = ElasticSearchCrashStorage.get_required_config()

        overrides = {
            'elasticsearch_index': 'socorro_integration_test',
            'elasticsearch_emails_index': 'socorro_integration_test_emails',
            'elasticsearch_timeout': 5,
            'backoff_delays': [1],
        }

        return get_config_manager_for_crontabber(
            more_definitions=required_config,
            overrides=overrides
        )
예제 #24
0
    def test_indexing(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()
        pyes_mock.exceptions.ElasticHttpNotFoundError = \
            pyelasticsearch.exceptions.ElasticHttpNotFoundError

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
            }],
            argv_source=[]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            crash_report = a_processed_crash.copy()
            crash_report['date_processed'] = '2013-01-01 10:56:41.558922'

            def create_index_fn(index, **kwargs):
                assert 'socorro20130' in index
                if index == 'socorro201301':
                    raise IndexAlreadyExistsError()

            mock_es.create_index.side_effect = create_index_fn

            # The index does not exist and is created
            es_storage.save_processed(crash_report)
            eq_(mock_es.create_index.call_count, 1)
            call_args = [
                args for args, kwargs in mock_logging.info.call_args_list
            ]
            ok_(
                ('created new elasticsearch index: %s', 'socorro201300')
                in call_args
            )

            # The index exists and is not created
            crash_report['date_processed'] = '2013-01-10 10:56:41.558922'
            es_storage.save_processed(crash_report)

            eq_(mock_es.create_index.call_count, 2)
            call_args = [
                args for args, kwargs in mock_logging.info.call_args_list
            ]
            ok_(
                ('created new elasticsearch index: %s', 'socorro201301')
                not in call_args
            )
예제 #25
0
    def _setup_storage_config(self):
        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock.Mock())

        values_source_list = {
            'elasticsearch_index': 'socorro_integration_test',
            'elasticsearch_emails_index': 'socorro_integration_test_emails',
            'elasticsearch_timeout': 5,
            'backoff_delays': [1],
        }

        return ConfigurationManager(
            [storage_conf],
            values_source_list=[os.environ, values_source_list],
            argv_source=[])
예제 #26
0
    def _setup_storage_config(self):
        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock.Mock())

        values_source_list = {
            'elasticsearch_index': 'socorro_integration_test',
            'elasticsearch_emails_index': 'socorro_integration_test_emails',
            'elasticsearch_timeout': 5,
            'backoff_delays': [1],
        }

        return ConfigurationManager(
            [storage_conf],
            values_source_list=[os.environ, values_source_list],
            argv_source=[]
        )
예제 #27
0
    def test_failure_no_retry(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger': mock_logging,
                'elasticsearch_urls': 'http://elasticsearch_host:9200',
            }]
        )

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)

            failure_exception = Exception('horrors')
            mock_es.index.side_effect = failure_exception

            self.assertRaises(Exception,
                              es_storage.save_processed,
                              a_processed_crash)

            expected_request_args = (
                'socorro201214',
                'crash_reports',
                a_processed_crash
            )
            expected_request_kwargs = {
                'replication': 'async',
                'id': a_processed_crash['uuid'],
            }

            mock_es.index.assert_called_with(
                *expected_request_args,
                **expected_request_kwargs
            )
예제 #28
0
    def test_indexing(self, pyes_mock):
        mock_logging = mock.Mock()
        mock_es = mock.Mock()
        pyes_mock.exceptions.ElasticHttpNotFoundError = \
                            pyelasticsearch.exceptions.ElasticHttpNotFoundError

        pyes_mock.ElasticSearch.return_value = mock_es
        required_config = ElasticSearchCrashStorage.get_required_config()
        required_config.add_option('logger', default=mock_logging)

        config_manager = ConfigurationManager(
            [required_config],
            app_name='testapp',
            app_version='1.0',
            app_description='app description',
            values_source_list=[{
                'logger':
                mock_logging,
                'elasticsearch_urls':
                'http://elasticsearch_host:9200',
            }])

        with config_manager.context() as config:
            es_storage = ElasticSearchCrashStorage(config)
            crash_report = a_processed_crash.copy()
            crash_report['date_processed'] = '2013-01-01 10:56:41.558922'

            def status_fn(index):
                assert 'socorro20130' in index
                if index == 'socorro201300':
                    raise pyelasticsearch.exceptions.ElasticHttpNotFoundError()

            mock_es.status = status_fn

            # The index does not exist and is created
            es_storage.save_processed(crash_report)
            self.assertEqual(mock_es.create_index.call_count, 1)

            # The index exists and is not created
            crash_report['date_processed'] = '2013-01-10 10:56:41.558922'
            es_storage.save_processed(crash_report)

            self.assertEqual(mock_es.create_index.call_count, 1)
예제 #29
0
    def get_config_context(self):
        storage_config = ElasticSearchCrashStorage.get_required_config()
        storage_config.add_option('logger', default=self.config.logger)
        values_source = {
            'resource.elasticsearch.elasticsearch_default_index': 'socorro_integration_test',
            'resource.elasticsearch.elasticsearch_index': 'socorro_integration_test_reports',
            'resource.elasticsearch.backoff_delays': [1],
            'resource.elasticsearch.elasticsearch_timeout': 5,
            'resource.elasticsearch.use_mapping_file': False,
        }

        config_manager = ConfigurationManager(
            [storage_config],
            app_name='test_elasticsearch_indexing',
            app_version='1.0',
            app_description=__doc__,
            values_source_list=[os.environ, values_source],
            argv_source=[],
        )

        return config_manager.get_config()
예제 #30
0
    def setUp(self):
        super(IntegrationTestAutomaticEmails, self).setUp()
        # prep a fake table
        now = utc_now() - datetime.timedelta(minutes=30)
        last_month = now - datetime.timedelta(days=31)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)
            # clear the indices cache so the index is created on every test
            storage.indices_cache = set()

            storage.save_processed({
                'uuid': '1',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'unknown'
                    }
                }
            })
            storage.save_processed({
                'uuid': '2',
                'email': '"Quidam" <*****@*****.**>',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': None
                    }
                }
            })
            storage.save_processed({
                'uuid': '3',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'bitguard'
                    }
                }
            })
            storage.save_processed({
                'uuid': '4',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '5',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '6',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '7',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '8',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '9',
                'email': '*****@*****.**',
                'product': 'EarthRaccoon',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '18',
                'email': 'z\xc3\[email protected]',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Let's insert a duplicate
            storage.save_processed({
                'uuid': '10',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # And let's insert some invalid crashes
            storage.save_processed({
                'uuid': '11',
                'email': None,
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '12',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': last_month
            })
            storage.save_processed({
                'uuid': '13',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '14',
                'email': '*****@*****.**',
                'product': 'WindBear',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Finally some invalid email addresses
            storage.save_processed({
                'uuid': '15',
                'email': '     ',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '16',
                'email': 'invalid@email',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '17',
                'email': 'i.do.not.work',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Create some email addresses.
            storage.create_emails_index()
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': last_month
                },
                id='*****@*****.**',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '"Quidam" <*****@*****.**>',
                    'last_sending': last_month
                },
                id='"Quidam" <*****@*****.**>',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': now
                },
                id='*****@*****.**',
            )

            # As indexing is asynchronous, we need to force elasticsearch to
            # make the newly created content searchable before we run the
            # tests.
            storage.es.refresh()
예제 #31
0
    def test_email_after_delay(self, exacttarget_mock):
        """Test that a user will receive an email if he or she sends us a new
        crash report after the delay is passed (but not before). """
        config_manager = self._setup_config_manager(
            delay_between_emails=1, restrict_products=['EarthRaccoon'])
        email = '*****@*****.**'
        list_service_mock = exacttarget_mock.return_value.list.return_value
        list_service_mock.get_subscriber.return_value = {'token': email}
        trigger_send_mock = exacttarget_mock.return_value.trigger_send
        tomorrow = utc_now() + datetime.timedelta(days=1, hours=2)
        twohourslater = utc_now() + datetime.timedelta(hours=2)

        storage_config_manager = self._setup_storage_config()
        with storage_config_manager.context() as storage_config:
            storage = ElasticSearchCrashStorage(storage_config)

        with config_manager.context() as config:
            # 1. Send an email to the user and update emailing data
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['automatic-emails']
            assert not information['automatic-emails']['last_error']
            assert information['automatic-emails']['last_success']

            exacttarget_mock.return_value.trigger_send.assert_called_with(
                'socorro_dev_test', {
                    'EMAIL_ADDRESS_': email,
                    'EMAIL_FORMAT_': 'H',
                    'TOKEN': email
                })
            eq_(trigger_send_mock.call_count, 1)

            # 2. Test that before 'delay' is passed user doesn't receive
            # another email

            # Insert a new crash report with the same email address
            storage.save_processed({
                'uuid':
                '50',
                'email':
                email,
                'product':
                'EarthRaccoon',
                'version':
                '20.0',
                'release_channel':
                'Release',
                'date_processed':
                utc_now() + datetime.timedelta(hours=1)
            })
            storage.es.refresh()

            # Run crontabber with time pushed by two hours
            with mock.patch('crontabber.app.utc_now') as cronutc_mock:
                with mock.patch('crontabber.base.utc_now') as baseutc_mock:
                    cronutc_mock.return_value = twohourslater
                    baseutc_mock.return_value = twohourslater
                    tab.run_all()

            information = self._load_structure()
            assert information['automatic-emails']
            assert not information['automatic-emails']['last_error']
            assert information['automatic-emails']['last_success']

            # No new email was sent
            eq_(trigger_send_mock.call_count, 1)

            # 3. Verify that, after 'delay' is passed, a new email is sent
            # to our user

            # Insert a new crash report with the same email address
            storage.save_processed({
                'uuid':
                '51',
                'email':
                email,
                'product':
                'EarthRaccoon',
                'version':
                '20.0',
                'release_channel':
                'Release',
                'date_processed':
                utc_now() + datetime.timedelta(days=1)
            })
            storage.es.refresh()

            # Run crontabber with time pushed by a day
            with mock.patch('crontabber.app.utc_now') as cronutc_mock:
                with mock.patch('crontabber.base.utc_now') as baseutc_mock:
                    cronutc_mock.return_value = tomorrow
                    baseutc_mock.return_value = tomorrow
                    tab.run_all()

            information = self._load_structure()
            assert information['automatic-emails']
            assert not information['automatic-emails']['last_error']
            assert information['automatic-emails']['last_success']

            # A new email was sent
            eq_(trigger_send_mock.call_count, 2)
예제 #32
0
class IntegrationTestIndexCleaner(ElasticSearchTestCase):

    def __init__(self, *args, **kwargs):
        super(
            IntegrationTestIndexCleaner,
            self
        ).__init__(*args, **kwargs)

        storage_config = self._setup_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def setUp(self):
        self.indices = []

    def tearDown(self):
        # Clean up created indices.
        for index in self.indices:
            try:
                self.storage.es.delete_index(index)
            # "Missing" indices have already been deleted, no need to worry.
            except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
                pass

        super(IntegrationTestIndexCleaner, self).tearDown()

    def _setup_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        cleaner_conf = IndexCleaner.get_required_config()
        cleaner_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager(
            [storage_conf, cleaner_conf],
            values_source_list=[environment],
            argv_source=[]
        )

    @maximum_es_version('0.90')
    def test_correct_indices_are_deleted(self):
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.indices.append('socorro200142')

            self.storage.create_index('socorro200000', {})
            self.indices.append('socorro200000')

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            self.indices.append('socorro200201_20030101')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1)
            )
            self.storage.create_index('socorro_some_aliased_index', {})
            self.indices.append('socorro_some_aliased_index')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})
            self.indices.append(now_index)

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200142'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200000'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200201'
            )

    @maximum_es_version('0.90')
    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()
            self.indices.append('socorro_emails')

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')
class IntegrationTestElasticsearchCleanup(IntegrationTestBase):
    def _setup_config_manager(self):
        return get_config_manager_for_crontabber(
            jobs='socorro.cron.jobs.elasticsearch_cleanup.'
            'ElasticsearchCleanupCronApp|30d', )

    def __init__(self, *args, **kwargs):
        super(IntegrationTestElasticsearchCleanup,
              self).__init__(*args, **kwargs)

        storage_config = self._setup_storage_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def tearDown(self):
        # Clean up created indices.
        self.storage.es.delete_index('socorro*')
        super(IntegrationTestElasticsearchCleanup, self).tearDown()

    def _setup_storage_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager([storage_conf],
                                    values_source_list=[os.environ],
                                    argv_source=[])

    def test_right_indices_are_deleted(self):
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.storage.create_index('socorro200000', {})

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1))
            self.storage.create_index('socorro_some_aliased_index', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200142')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200000')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200201')

    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')
예제 #34
0
    def test_email_after_delay(self, exacttarget_mock):
        """Test that a user will receive an email if he or she sends us a new
        crash report after the delay is passed (but not before). """
        config_manager = self._setup_config_manager(
            delay_between_emails=1,
            restrict_products=['EarthRaccoon']
        )
        email = '*****@*****.**'
        list_service_mock = exacttarget_mock.return_value.list.return_value
        list_service_mock.get_subscriber.return_value = {
            'token': email
        }
        trigger_send_mock = exacttarget_mock.return_value.trigger_send
        tomorrow = utc_now() + datetime.timedelta(days=1, hours=2)
        twohourslater = utc_now() + datetime.timedelta(hours=2)

        storage_config_manager = self._setup_storage_config()
        with storage_config_manager.context() as storage_config:
            storage = ElasticSearchCrashStorage(storage_config)

        with config_manager.context() as config:
            # 1. Send an email to the user and update emailing data
            tab = crontabber.CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['automatic-emails']
            assert not information['automatic-emails']['last_error']
            assert information['automatic-emails']['last_success']

            exacttarget_mock.return_value.trigger_send.assert_called_with(
                'socorro_dev_test',
                {
                    'EMAIL_ADDRESS_': email,
                    'EMAIL_FORMAT_': 'H',
                    'TOKEN': email
                }
            )
            self.assertEqual(trigger_send_mock.call_count, 1)

            # 2. Test that before 'delay' is passed user doesn't receive
            # another email

            # Insert a new crash report with the same email address
            storage.save_processed({
                'uuid': '50',
                'email': email,
                'product': 'EarthRaccoon',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': utc_now() + datetime.timedelta(hours=1)
            })
            storage.es.refresh()

            # Run crontabber with time pushed by two hours
            with mock.patch('socorro.cron.crontabber.utc_now') as cronutc_mock:
                with mock.patch('socorro.cron.base.utc_now') as baseutc_mock:
                    cronutc_mock.return_value = twohourslater
                    baseutc_mock.return_value = twohourslater
                    tab.run_all()

            information = self._load_structure()
            assert information['automatic-emails']
            assert not information['automatic-emails']['last_error']
            assert information['automatic-emails']['last_success']

            # No new email was sent
            self.assertEqual(trigger_send_mock.call_count, 1)

            # 3. Verify that, after 'delay' is passed, a new email is sent
            # to our user

            # Insert a new crash report with the same email address
            storage.save_processed({
                'uuid': '51',
                'email': email,
                'product': 'EarthRaccoon',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': utc_now() + datetime.timedelta(days=1)
            })
            storage.es.refresh()

            # Run crontabber with time pushed by a day
            with mock.patch('socorro.cron.crontabber.utc_now') as cronutc_mock:
                with mock.patch('socorro.cron.base.utc_now') as baseutc_mock:
                    cronutc_mock.return_value = tomorrow
                    baseutc_mock.return_value = tomorrow
                    tab.run_all()

            information = self._load_structure()
            assert information['automatic-emails']
            assert not information['automatic-emails']['last_error']
            assert information['automatic-emails']['last_success']

            # A new email was sent
            self.assertEqual(trigger_send_mock.call_count, 2)
예제 #35
0
class IntegrationTestIndexCleaner(ElasticSearchTestCase):
    def __init__(self, *args, **kwargs):
        super(IntegrationTestIndexCleaner, self).__init__(*args, **kwargs)

        storage_config = self._setup_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def setUp(self):
        self.indices = []

    def tearDown(self):
        # Clean up created indices.
        for index in self.indices:
            try:
                self.storage.es.delete_index(index)
            # "Missing" indices have already been deleted, no need to worry.
            except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
                pass

        super(IntegrationTestIndexCleaner, self).tearDown()

    def _setup_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        cleaner_conf = IndexCleaner.get_required_config()
        cleaner_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager([storage_conf, cleaner_conf],
                                    values_source_list=[environment],
                                    argv_source=[])

    @maximum_es_version('0.90')
    def test_correct_indices_are_deleted(self):
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.indices.append('socorro200142')

            self.storage.create_index('socorro200000', {})
            self.indices.append('socorro200000')

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            self.indices.append('socorro200201_20030101')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1))
            self.storage.create_index('socorro_some_aliased_index', {})
            self.indices.append('socorro_some_aliased_index')
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})
            self.indices.append(now_index)

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200142')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200000')

            assert_raises(pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                          es.status, 'socorro200201')

    @maximum_es_version('0.90')
    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()
            self.indices.append('socorro_emails')

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            api = IndexCleaner(config)
            api.delete_old_indices()

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')
예제 #36
0
    def setUp(self):
        super(IntegrationTestAutomaticEmails, self).setUp()
        # prep a fake table
        now = utc_now() - datetime.timedelta(minutes=30)
        last_month = now - datetime.timedelta(days=31)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)
            # clear the indices cache so the index is created on every test
            storage.indices_cache = set()

            storage.save_processed({
                'uuid': '1',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'unknown'
                    }
                }
            })
            storage.save_processed({
                'uuid': '2',
                'email': '"Quidam" <*****@*****.**>',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': None
                    }
                }
            })
            storage.save_processed({
                'uuid': '3',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now,
                'classifications': {
                    'support': {
                        'classification': 'bitguard'
                    }
                }
            })
            storage.save_processed({
                'uuid': '4',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '5',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '6',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '7',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '8',
                'email': '*****@*****.**',
                'product': 'NightlyTrain',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '9',
                'email': '*****@*****.**',
                'product': 'EarthRaccoon',
                'version': '1.0',
                'release_channel': 'Nightly',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '18',
                'email': 'z\xc3\[email protected]',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Let's insert a duplicate
            storage.save_processed({
                'uuid': '10',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # And let's insert some invalid crashes
            storage.save_processed({
                'uuid': '11',
                'email': None,
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '12',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': last_month
            })
            storage.save_processed({
                'uuid': '13',
                'email': '*****@*****.**',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '14',
                'email': '*****@*****.**',
                'product': 'WindBear',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Finally some invalid email addresses
            storage.save_processed({
                'uuid': '15',
                'email': '     ',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '16',
                'email': 'invalid@email',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })
            storage.save_processed({
                'uuid': '17',
                'email': 'i.do.not.work',
                'product': 'WaterWolf',
                'version': '20.0',
                'release_channel': 'Release',
                'date_processed': now
            })

            # Create some email addresses.
            storage.create_emails_index()
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': last_month
                },
                id='*****@*****.**',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '"Quidam" <*****@*****.**>',
                    'last_sending': last_month
                },
                id='"Quidam" <*****@*****.**>',
            )
            storage.es.index(
                index=config.elasticsearch_emails_index,
                doc_type='emails',
                doc={
                    'email': '*****@*****.**',
                    'last_sending': now
                },
                id='*****@*****.**',
            )

            # As indexing is asynchronous, we need to force elasticsearch to
            # make the newly created content searchable before we run the
            # tests.
            storage.es.refresh()
예제 #37
0
class IntegrationTestElasticsearchCleanup(IntegrationTestBase):

    def _setup_config_manager(self):
        return get_config_manager_for_crontabber(
            jobs='socorro.cron.jobs.elasticsearch_cleanup.'
                'ElasticsearchCleanupCronApp|30d',
        )

    def __init__(self, *args, **kwargs):
        super(
            IntegrationTestElasticsearchCleanup,
            self
        ).__init__(*args, **kwargs)

        storage_config = self._setup_storage_config()
        with storage_config.context() as config:
            self.storage = ElasticSearchCrashStorage(config)

    def tearDown(self):
        # Clean up created indices.
        self.storage.es.delete_index('socorro*')
        super(IntegrationTestElasticsearchCleanup, self).tearDown()

    def _setup_storage_config(self):
        mock_logging = mock.Mock()

        storage_conf = ElasticSearchCrashStorage.get_required_config()
        storage_conf.add_option('logger', default=mock_logging)

        return ConfigurationManager(
            [storage_conf],
            values_source_list=[os.environ],
            argv_source=[]
        )

    def test_right_indices_are_deleted(self):
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create old indices to be deleted.
            self.storage.create_index('socorro200142', {})
            self.storage.create_index('socorro200000', {})

            # Create an old aliased index.
            self.storage.create_index('socorro200201_20030101', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro200201_20030101',
                        'alias': 'socorro200201'
                    }
                }]
            })

            # Create a recent aliased index.
            last_week_index = self.storage.get_index_for_crash(
                utc_now() - datetime.timedelta(weeks=1)
            )
            self.storage.create_index('socorro_some_aliased_index', {})
            es.update_aliases({
                'actions': [{
                    'add': {
                        'index': 'socorro_some_aliased_index',
                        'alias': last_week_index
                    }
                }]
            })

            # Create a recent index that should not be deleted.
            now_index = self.storage.get_index_for_crash(utc_now())
            self.storage.create_index(now_index, {})

            # These will raise an error if an index was not correctly created.
            es.status('socorro200142')
            es.status('socorro200000')
            es.status('socorro200201')
            es.status(now_index)
            es.status(last_week_index)

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the recent index is still there.
            es.status(now_index)
            es.status(last_week_index)

            # Verify the old indices are gone.
            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200142'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200000'
            )

            assert_raises(
                pyelasticsearch.exceptions.ElasticHttpNotFoundError,
                es.status,
                'socorro200201'
            )

    def test_other_indices_are_not_deleted(self):
        """Verify that non-week-based indices are not removed. For example,
        the socorro_email index should not be deleted by the cron job.
        """
        config_manager = self._setup_config_manager()
        with config_manager.context() as config:
            # clear the indices cache so the index is created on every test
            self.storage.indices_cache = set()

            es = self.storage.es

            # Create the socorro emails index.
            self.storage.create_emails_index()

            # This will raise an error if the index was not correctly created.
            es.status('socorro_emails')

            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['elasticsearch-cleanup']
            assert not information['elasticsearch-cleanup']['last_error']
            assert information['elasticsearch-cleanup']['last_success']

            # Verify the email index is still there. This will raise an error
            # if the index does not exist.
            es.status('socorro_emails')