def test_not_found(self, mocked_load_search_apps): """ Test that get_search_app raises KeyError if it can't find the right search app for the name passed in. """ mocked_load_search_apps.return_value = { 'app1': mock.Mock(), } with pytest.raises(KeyError): get_search_app(mock.Mock())
def test_company_dbmodels_to_documents(self, opensearch): """Tests conversion of db models to OpenSearch documents.""" companies = CompanyFactory.create_batch(2) app = get_search_app('company') companies_qs = app.queryset.all() result = SearchCompany.db_objects_to_documents(companies_qs) assert len(list(result)) == len(companies)
def sync_model(search_app_name): """ Task that syncs a single model to Elasticsearch. acks_late is set to True so that the task restarts if interrupted. priority is set to the lowest priority (for Redis, 0 is the highest priority). """ search_app = get_search_app(search_app_name) sync_app(search_app)
def test_found(self, mocked_load_search_apps): """ Test that get_search_app returns the right search app for the name passed in. """ search_app = mock.Mock() mocked_load_search_apps.return_value = { 'app1': mock.Mock(), 'app2': search_app, } assert get_search_app('app2') == search_app
def sync_object_task(search_app_name, pk): """ Syncs a single object to Elasticsearch. If an error occurs, the task will be automatically retried with an exponential back-off. The wait between attempts is approximately 2 ** attempt_num seconds (with some jitter added). This task is named sync_object_task to avoid a conflict with sync_object. """ from datahub.search.sync_object import sync_object search_app = get_search_app(search_app_name) sync_object(search_app, pk)
def test_company_dbmodel_to_dict(self, opensearch): """Tests conversion of db model to dict.""" company = CompanyFactory() app = get_search_app('company') company_qs = app.queryset.get(pk=company.pk) result = SearchCompany.db_object_to_dict(company_qs) keys = { '_document_type', 'archived', 'archived_by', 'archived_on', 'archived_reason', 'business_type', 'company_number', 'created_on', 'description', 'employee_range', 'export_experience_category', 'export_to_countries', 'future_interest_countries', 'headquarter_type', 'id', 'modified_on', 'name', 'global_headquarters', 'reference_code', 'sector', 'latest_interaction_date', 'address', 'registered_address', 'one_list_group_global_account_manager', 'trading_names', 'turnover_range', 'uk_based', 'uk_region', 'uk_address_postcode', 'uk_registered_address_postcode', 'vat_number', 'duns_number', 'website', 'export_segment', 'export_sub_segment', } assert set(result.keys()) == keys
def complete_model_migration(self, search_app_name, new_mapping_hash): """ Completes a migration by performing a full resync, updating aliases and removing old indices. """ search_app = get_search_app(search_app_name) if search_app.es_model.get_target_mapping_hash() != new_mapping_hash: warning_message = f"""Unexpected target mapping hash. This indicates that the task was \ generated by either a newer or an older version of the app. This could happen during a blue-green \ deployment where a new app instance creates the task and it's picked up by an old Celery instance. Rescheduling the {search_app_name} search app migration to attempt to resolve the conflict... """ logger.warning(warning_message) raise self.retry() with advisory_lock(f'leeloo-resync_after_migrate-{search_app_name}', wait=False) as lock_held: if not lock_held: logger.warning( f'Another complete_model_migration task is in progress for the {search_app_name} ' f'search app. Aborting...', ) return resync_after_migrate(search_app)