def configure_metrics(): # Check feature flag #if not settings.METRICS_ENABLED: # return if False: elasticsearch = ElasticsearchBackend('localhost', index='metrics') settings.configure(backends=[elasticsearch]) if True: metrics_backends = [] async_es_metrics = ThreadedBackend( ElasticsearchBackend, backend_kwargs={ 'host': 'localhost', 'port': '9200', #'url_prefix': settings.ELASTICSEARCH_PREFIX, #'use_ssl': settings.ELASTICSEARCH_SSL, #'verify_certs': settings.ELASTICSEARCH_VERIFY_CERTS, #'index': settings.ELASTICSEARCH_INDEX, #'http_auth': settings.ELASTICSEARCH_AUTH, }, ) metrics_backends.append(async_es_metrics) settings.configure(backends=metrics_backends, hooks=[ status_code_hook, ], origin='inspire_next')
def test_backend_args(self): self.MockedBackendClass.assert_called_with('arg1', 'arg2', key1='kwarg1', key2='kwarg2') ThreadedBackend(self.MockedBackendClass) self.MockedBackendClass.assert_called_with()
def test_backend_args(self): self.MockedBackendClass.assert_called_with("arg1", "arg2", key1="kwarg1", key2="kwarg2") ThreadedBackend(self.MockedBackendClass) self.MockedBackendClass.assert_called_with()
def configure_appmetrics(self, app): if not app.config.get('FEATURE_FLAG_ENABLE_APPMETRICS'): return if app.config['APPMETRICS_THREADED_BACKEND']: backend = ThreadedBackend( ElasticsearchBackend, backend_kwargs=dict( hosts=app.config['APPMETRICS_ELASTICSEARCH_HOSTS'], index=app.config['APPMETRICS_ELASTICSEARCH_INDEX']), lazy_init=True, ) else: backend = ElasticsearchBackend( hosts=app.config['APPMETRICS_ELASTICSEARCH_HOSTS'], index=app.config['APPMETRICS_ELASTICSEARCH_INDEX'], ) origin = 'inspire_next' hooks = [ inspire_service_orcid_hooks.status_code_hook, inspire_service_orcid_hooks.orcid_error_code_hook, inspire_service_orcid_hooks.orcid_service_exception_hook, # Add other hooks here: exception_hook, ] time_execution.settings.configure( backends=[backend], hooks=hooks, origin=origin )
def configure_appmetrics(self, app): if not app.config.get("FEATURE_FLAG_ENABLE_APPMETRICS"): return if app.config["APPMETRICS_THREADED_BACKEND"]: backend = ThreadedBackend( ElasticsearchBackend, backend_kwargs=dict( hosts=app.config["APPMETRICS_ELASTICSEARCH_HOSTS"], index=app.config["APPMETRICS_ELASTICSEARCH_INDEX"], ), ) else: backend = ElasticsearchBackend( hosts=app.config["APPMETRICS_ELASTICSEARCH_HOSTS"], index=app.config["APPMETRICS_ELASTICSEARCH_INDEX"], ) origin = "inspirehep" hooks = [ inspire_service_orcid_hooks.status_code_hook, inspire_service_orcid_hooks.orcid_error_code_hook, inspire_service_orcid_hooks.orcid_service_exception_hook, ] time_execution.settings.configure( backends=[backend], hooks=hooks, origin=origin )
def setUp(self): self.qsize = 10 self.qtimeout = 0.1 self.mocked_backend = mock.Mock( spec=elasticsearch.ElasticsearchBackend) self.MockedBackendClass = mock.Mock(return_value=self.mocked_backend) self.backend = ThreadedBackend( self.MockedBackendClass, backend_args=('arg1', 'arg2'), backend_kwargs=dict(key1='kwarg1', key2='kwarg2'), queue_maxsize=self.qsize, queue_timeout=self.qtimeout, ) self.backend.bulk_size = self.qsize / 2 self.backend.bulk_timeout = self.qtimeout * 2 settings.configure(backends=[self.backend])
def setUp(self): self.qtime = 0.1 self.backend = ThreadedBackend( elasticsearch.ElasticsearchBackend, backend_args=('elasticsearch', ), backend_kwargs=dict(index='threaded-metrics'), queue_timeout=self.qtime, ) settings.configure(backends=[self.backend]) self._clear(self.backend.backend)
def setUp(self): self.qsize = 10 self.qtimeout = 0.1 self.mocked_backend = mock.Mock(spec=elasticsearch.ElasticsearchBackend) self.MockedBackendClass = mock.Mock(return_value=self.mocked_backend) self.backend = ThreadedBackend( self.MockedBackendClass, backend_args=('arg1', 'arg2'), backend_kwargs=dict(key1='kwarg1', key2='kwarg2'), queue_maxsize=self.qsize, queue_timeout=self.qtimeout, ) self.backend.bulk_size = self.qsize / 2 self.backend.bulk_timeout = self.qtimeout * 2 settings.configure(backends=[self.backend])
#!/usr/bin/env python import os import sys # make sure we can import time_execution library path = os.path.dirname(os.path.abspath(__file__)) sys.path.append("/".join(path.split("/")[:-1])) from time_execution.backends.base import BaseMetricsBackend # noqa isort:skip from time_execution.backends.threaded import ThreadedBackend # noqa isort:skip class DummyBackend(BaseMetricsBackend): def write(self, name, **data): pass ThreadedBackend(DummyBackend, queue_timeout=1)
class TestTimeExecution(TestBaseBackend): def setUp(self): self.qsize = 10 self.qtimeout = 0.1 self.mocked_backend = mock.Mock(spec=elasticsearch.ElasticsearchBackend) self.MockedBackendClass = mock.Mock(return_value=self.mocked_backend) self.backend = ThreadedBackend( self.MockedBackendClass, backend_args=('arg1', 'arg2'), backend_kwargs=dict(key1='kwarg1', key2='kwarg2'), queue_maxsize=self.qsize, queue_timeout=self.qtimeout, ) self.backend.bulk_size = self.qsize / 2 self.backend.bulk_timeout = self.qtimeout * 2 settings.configure(backends=[self.backend]) def stop_worker(self): self.backend.worker_limit = 0 time.sleep(self.qtimeout * 2) self.assertEqual(self.backend.thread, None) def resume_worker(self, worker_limit=None, **kwargs): self.backend.worker_limit = worker_limit for key, val in kwargs.items(): if hasattr(self.backend, key): setattr(self.backend, key, val) self.backend.start_worker() def test_thread_name(self): self.assertEquals(self.backend.thread.name, "TimeExecutionThread") def test_backend_args(self): self.MockedBackendClass.assert_called_with('arg1', 'arg2', key1='kwarg1', key2='kwarg2') ThreadedBackend(self.MockedBackendClass) self.MockedBackendClass.assert_called_with() def test_empty_queue(self): time.sleep(2 * self.qtimeout) # ensures queue.get times out self.assertEqual(0, self.backend.fetched_items) def test_decorator(self): with freeze_time('2016-08-01 00:00:00'): go() # ensure worker thread catches up time.sleep(2 * self.backend.bulk_timeout) mocked_write = self.mocked_backend.bulk_write self.assertEqual(1, self.backend.fetched_items) mocked_write.assert_called_with([{ 'timestamp': datetime(2016, 8, 1, 0, 0), 'hostname': SHORT_HOSTNAME, 'name': 'tests.conftest.go', 'value': 0.0, }]) def test_double_start(self): self.assertEqual(0, self.backend.fetched_items) go() time.sleep(2 * self.qtimeout) self.assertEqual(1, self.backend.fetched_items) # try to double start self.backend.start_worker() self.assertEqual(1, self.backend.fetched_items) def test_write_error(self): self.mocked_backend.write.side_effect = RuntimeError('mocked') go() time.sleep(2 * self.qtimeout) def test_queue_congestion(self): # assure worker is stopped self.stop_worker() # fill in the queue for _ in range(self.qsize * 2): go() self.assertTrue(self.backend._queue.full()) self.resume_worker(bulk_timeout=self.qtimeout) # wait until all metrics are picked up time.sleep(self.qsize * self.qtimeout) # check that metrics in the queue were sent with bulk_write calls call_args_list = self.mocked_backend.bulk_write.call_args_list time.sleep(2 * self.qtimeout) self.assertEqual( self.qsize, sum(len(args[0]) for args, _ in call_args_list) ) def test_worker_sends_remainder(self): self.stop_worker() self.mocked_backend.bulk_write.side_effect = RuntimeError('mock') loops_count = 3 self.assertTrue(loops_count < self.backend.bulk_size) for _ in range(loops_count): go() self.backend.worker_limit = loops_count self.backend.worker() self.assertEqual(loops_count, self.backend.fetched_items) mocked_bulk_write = self.mocked_backend.bulk_write mocked_bulk_write.assert_called_once() time.sleep(self.qtimeout * 2) self.assertEqual( loops_count, len(mocked_bulk_write.call_args[0][0]) ) def test_worker_error(self): self.assertFalse(self.backend.thread is None) # simulate TypeError in queue.get with mock.patch.object(self.backend._queue, 'get', side_effect=TypeError): # ensure worker loop repeat time.sleep(2 * self.qtimeout) # assert thread stopped self.assertTrue(self.backend.thread is None)
def test_backend_class(self): backend = ThreadedBackend(backend=elasticsearch.ElasticsearchBackend) assert isinstance(backend.backend, elasticsearch.ElasticsearchBackend)
def test_backend_importpath_wrong_path(self): with pytest.raises(ImportError): ThreadedBackend( backend='time_execution.backends.wrong_path.NewBackend', )
def test_backend_importpath(self, backend_string, expected_class): backend = ThreadedBackend(backend=backend_string, ) assert isinstance(backend.backend, expected_class)
class TestTimeExecution(TestBaseBackend): def setUp(self): self.qsize = 10 self.qtimeout = 0.1 self.mocked_backend = mock.Mock( spec=elasticsearch.ElasticsearchBackend) self.MockedBackendClass = mock.Mock(return_value=self.mocked_backend) self.backend = ThreadedBackend( self.MockedBackendClass, backend_args=('arg1', 'arg2'), backend_kwargs=dict(key1='kwarg1', key2='kwarg2'), queue_maxsize=self.qsize, queue_timeout=self.qtimeout, ) self.backend.bulk_size = self.qsize / 2 self.backend.bulk_timeout = self.qtimeout * 2 settings.configure(backends=[self.backend]) def stop_worker(self): self.backend.worker_limit = 0 time.sleep(self.qtimeout * 2) self.assertEqual(self.backend.thread, None) def resume_worker(self, worker_limit=None, **kwargs): self.backend.worker_limit = worker_limit for key, val in kwargs.items(): if hasattr(self.backend, key): setattr(self.backend, key, val) self.backend.start_worker() def test_thread_name(self): self.assertEqual(self.backend.thread.name, "TimeExecutionThread") def test_backend_args(self): self.MockedBackendClass.assert_called_with('arg1', 'arg2', key1='kwarg1', key2='kwarg2') ThreadedBackend(self.MockedBackendClass) self.MockedBackendClass.assert_called_with() def test_empty_queue(self): time.sleep(2 * self.qtimeout) # ensures queue.get times out self.assertEqual(0, self.backend.fetched_items) def test_decorator(self): now = datetime.now() with freeze_time(now): go() # ensure worker thread catches up time.sleep(2 * self.backend.bulk_timeout) mocked_write = self.mocked_backend.bulk_write self.assertEqual(1, self.backend.fetched_items) mocked_write.assert_called_with([{ 'timestamp': now, 'hostname': SHORT_HOSTNAME, 'name': 'tests.conftest.go', 'value': 0.0, }]) def test_double_start(self): self.assertEqual(0, self.backend.fetched_items) go() time.sleep(2 * self.qtimeout) self.assertEqual(1, self.backend.fetched_items) # try to double start self.backend.start_worker() self.assertEqual(1, self.backend.fetched_items) def test_write_error(self): self.mocked_backend.write.side_effect = RuntimeError('mocked') go() time.sleep(2 * self.qtimeout) def test_queue_congestion(self): # assure worker is stopped self.stop_worker() # fill in the queue for _ in range(self.qsize * 2): go() self.assertTrue(self.backend._queue.full()) self.resume_worker(bulk_timeout=self.qtimeout) # wait until all metrics are picked up time.sleep(self.qsize * self.qtimeout) # check that metrics in the queue were sent with bulk_write calls call_args_list = self.mocked_backend.bulk_write.call_args_list time.sleep(2 * self.qtimeout) self.assertEqual(self.qsize, sum(len(args[0]) for args, _ in call_args_list)) def test_worker_sends_remainder(self): self.stop_worker() self.mocked_backend.bulk_write.side_effect = RuntimeError('mock') loops_count = 3 self.assertTrue(loops_count < self.backend.bulk_size) for _ in range(loops_count): go() self.backend.worker_limit = loops_count self.backend.worker() self.assertEqual(loops_count, self.backend.fetched_items) mocked_bulk_write = self.mocked_backend.bulk_write mocked_bulk_write.assert_called_once() time.sleep(self.qtimeout * 2) self.assertEqual(loops_count, len(mocked_bulk_write.call_args[0][0])) def test_worker_error(self): self.assertFalse(self.backend.thread is None) # simulate TypeError in queue.get with mock.patch.object(self.backend._queue, 'get', side_effect=TypeError): # ensure worker loop repeat time.sleep(2 * self.qtimeout) # assert thread stopped self.assertTrue(self.backend.thread is None) def test_producer_in_another_process(self): # assure worker is stopped self.stop_worker() # fill in the queue process = Process(target=go) process.start() process.join() # check the queue contains the item self.assertEqual(self.backend._queue.qsize(), 1) def test_flush_metrics_when_parent_process_not_alive(self): self.stop_worker() loops = 3 with mock.patch.object(self.backend, 'parent_thread', spec=Thread) as parent_thread: parent_thread.is_alive.return_value = False for _ in range(loops): go() #: do not allow flush metrics before checking if parent_thread is alive self.backend.worker_limit = loops + 1 self.backend.worker() mocked_bulk_write = self.mocked_backend.bulk_write mocked_bulk_write.assert_called_once() self.assertEqual(loops, len(mocked_bulk_write.call_args[0][0]))
class TestTimeExecution(TestBaseBackend): def setUp(self): self.qsize = 10 self.qtimeout = 0.1 self.mocked_backend = mock.Mock( spec=elasticsearch.ElasticsearchBackend) self.MockedBackendClass = mock.Mock(return_value=self.mocked_backend) self.backend = ThreadedBackend( self.MockedBackendClass, backend_args=('arg1', 'arg2'), backend_kwargs=dict(key1='kwarg1', key2='kwarg2'), queue_maxsize=self.qsize, queue_timeout=self.qtimeout, ) self.backend.bulk_size = self.qsize / 2 self.backend.bulk_timeout = self.qtimeout * 2 settings.configure(backends=[self.backend]) def stop_worker(self): self.backend.worker_limit = 0 time.sleep(self.qtimeout * 2) self.assertEqual(self.backend.thread, None) def resume_worker(self, worker_limit=None, **kwargs): self.backend.worker_limit = worker_limit for key, val in kwargs.items(): if hasattr(self.backend, key): setattr(self.backend, key, val) self.backend.start_worker() def test_backend_args(self): self.MockedBackendClass.assert_called_with('arg1', 'arg2', key1='kwarg1', key2='kwarg2') ThreadedBackend(self.MockedBackendClass) self.MockedBackendClass.assert_called_with() def test_empty_queue(self): time.sleep(2 * self.qtimeout) # ensures queue.get times out self.assertEqual(0, self.backend.fetched_items) def test_decorator(self): with freeze_time('2016-08-01 00:00:00'): go() # ensure worker thread catches up time.sleep(2 * self.backend.bulk_timeout) mocked_write = self.mocked_backend.bulk_write self.assertEqual(1, self.backend.fetched_items) mocked_write.assert_called_with([{ 'timestamp': datetime(2016, 8, 1, 0, 0), 'hostname': SHORT_HOSTNAME, 'name': 'tests.conftest.go', 'value': 0.0, }]) def test_double_start(self): self.assertEqual(0, self.backend.fetched_items) go() time.sleep(2 * self.qtimeout) self.assertEqual(1, self.backend.fetched_items) # try to double start self.backend.start_worker() self.assertEqual(1, self.backend.fetched_items) def test_write_error(self): self.mocked_backend.write.side_effect = RuntimeError('mocked') go() time.sleep(2 * self.qtimeout) def test_queue_congestion(self): # assure worker is stopped self.stop_worker() # fill in the queue for _ in range(self.qsize * 2): go() self.assertTrue(self.backend._queue.full()) self.resume_worker(bulk_timeout=self.qtimeout) # wait until all metrics are picked up time.sleep(self.qsize * self.qtimeout) # check that metrics in the queue were sent with bulk_write calls call_args_list = self.mocked_backend.bulk_write.call_args_list self.assertEqual(self.qsize, sum(len(args[0]) for args, _ in call_args_list)) def test_worker_sends_remainder(self): self.stop_worker() self.mocked_backend.bulk_write.side_effect = RuntimeError('mock') loops_count = 3 self.assertTrue(loops_count < self.backend.bulk_size) for _ in range(loops_count): go() self.backend.worker_limit = loops_count self.backend.worker() self.assertEqual(loops_count, self.backend.fetched_items) mocked_bulk_write = self.mocked_backend.bulk_write mocked_bulk_write.assert_called_once() self.assertEqual(loops_count, len(mocked_bulk_write.call_args[0][0])) def test_worker_error(self): self.assertFalse(self.backend.thread is None) # simulate TypeError in queue.get with mock.patch.object(self.backend._queue, 'get', side_effect=TypeError): # ensure worker loop repeat time.sleep(2 * self.qtimeout) # assert thread stopped self.assertTrue(self.backend.thread is None)
def test_backend_importpath(self): backend = ThreadedBackend( backend="time_execution.backends.elasticsearch.ElasticsearchBackend" ) assert isinstance(backend.backend, elasticsearch.ElasticsearchBackend)