def es_conn(): """Create an Elasticsearch ConnectionContext and clean up indices afterwards. This uses defaults and configuration from the environment. """ manager = ConfigurationManager(ESConnectionContext.get_required_config(), values_source_list=[environment]) conn = ESConnectionContext(manager.get_config()) # Create two indexes--this week and last week template = conn.config.elasticsearch_index conn.create_index(utc_now().strftime(template)) conn.create_index( (utc_now() - datetime.timedelta(weeks=1)).strftime(template)) conn.health_check() yield conn for index in conn.get_indices(): conn.delete_index(index)
class ElasticsearchTestCase(TestCaseWithConfig): """Base class for Elastic Search related unit tests""" def setup_method(self, method): super().setup_method(method) self.config = self.get_base_config() self.es_context = ConnectionContext(self.config) self.index_client = self.es_context.indices_client() with self.es_context() as conn: self.connection = conn self.es_context.create_index(self.es_context.get_index_template()) def teardown_method(self, method): # Clear the test indices. self.index_client.delete(self.es_context.get_index_template()) super().teardown_method(method) def health_check(self): self.connection.cluster.health(wait_for_status="yellow", request_timeout=5) def get_url(self): """Returns the first url in the elasticsearch_urls list""" return self.config.elasticsearch_urls[0] def get_tuned_config(self, sources, extra_values=None): values_source = DEFAULT_VALUES.copy() if extra_values: values_source.update(extra_values) return super().get_tuned_config(sources, values_source) def get_base_config(self, cls=ConnectionContext, es_index=None): extra_values = None if es_index: extra_values = { "resource.elasticsearch.elasticsearch_index": es_index } return self.get_tuned_config(cls, extra_values=extra_values) def index_crash(self, processed_crash=None, raw_crash=None, crash_id=None): if crash_id is None: crash_id = str(uuid.UUID(int=random.getrandbits(128))) raw_crash = raw_crash or {} processed_crash = processed_crash or {} doc = { "crash_id": crash_id, "processed_crash": processed_crash, "raw_crash": raw_crash, } index_name = self.es_context.get_index_template() res = self.connection.index( index=index_name, doc_type=self.es_context.get_doctype(), id=crash_id, body=doc, ) return res["_id"] def index_many_crashes(self, number, processed_crash=None, raw_crash=None, loop_field=None): processed_crash = processed_crash or {} raw_crash = raw_crash or {} actions = [] for i in range(number): crash_id = str(uuid.UUID(int=random.getrandbits(128))) if loop_field is not None: processed_copy = processed_crash.copy() processed_copy[loop_field] = processed_crash[loop_field] % i else: processed_copy = processed_crash doc = { "crash_id": crash_id, "processed_crash": processed_copy, "raw_crash": raw_crash, } action = { "_index": self.es_context.get_index_template(), "_type": self.es_context.get_doctype(), "_id": crash_id, "_source": doc, } actions.append(action) bulk(client=self.connection, actions=actions) self.es_context.refresh()
class ElasticsearchTestCase(TestCaseWithConfig): """Base class for Elastic Search related unit tests""" def setup_method(self): super().setup_method() self.config = self.get_base_config() self.es_context = ConnectionContext(self.config) self.crashstorage = ESCrashStorage( config=self.get_tuned_config(ESCrashStorage)) self.index_client = self.es_context.indices_client() self.conn = self.es_context.connection() # Delete everything there first for index_name in self.es_context.get_indices(): print(f"setup: delete test index: {index_name}") self.es_context.delete_index(index_name) to_create = [ self.es_context.get_index_for_date(utc_now()), self.es_context.get_index_for_date(utc_now() - timedelta(days=7)), ] for index_name in to_create: print(f"setup: creating index: {index_name}") self.es_context.create_index(index_name) def teardown_method(self): for index_name in self.es_context.get_indices(): print(f"teardown: delete test index: {index_name}") self.es_context.delete_index(index_name) super().teardown_method() def health_check(self): self.conn.cluster.health(wait_for_status="yellow", request_timeout=5) def get_url(self): """Returns the first url in the elasticsearch_urls list""" return self.config.elasticsearch_urls[0] def get_tuned_config(self, sources, extra_values=None): values_source = DEFAULT_VALUES.copy() if extra_values: values_source.update(extra_values) return super().get_tuned_config(sources, values_source) def get_base_config(self, cls=ConnectionContext, es_index=None): extra_values = None if es_index: extra_values = { "resource.elasticsearch.elasticsearch_index": es_index } return self.get_tuned_config(cls, extra_values=extra_values) def index_crash(self, processed_crash=None, raw_crash=None, crash_id=None, refresh=True): """Index a single crash and refresh""" if crash_id is None: crash_id = str(uuid.UUID(int=random.getrandbits(128))) raw_crash = raw_crash or {} processed_crash = processed_crash or {} raw_crash["uuid"] = crash_id processed_crash["crash_id"] = crash_id processed_crash["uuid"] = crash_id self.crashstorage.save_processed_crash(raw_crash, processed_crash) if refresh: self.es_context.refresh() return crash_id def index_many_crashes(self, number, processed_crash=None, raw_crash=None, loop_field=None): """Index multiple crashes and refresh at the end""" processed_crash = processed_crash or {} raw_crash = raw_crash or {} crash_ids = [] for i in range(number): if loop_field is not None: processed_copy = processed_crash.copy() processed_copy[loop_field] = processed_crash[loop_field] % i else: processed_copy = processed_crash crash_ids.append( self.index_crash(raw_crash=raw_crash, processed_crash=processed_copy, refresh=False)) self.es_context.refresh() return crash_ids