def _record_audit(update_results, producer_task, start_time): """ Record an audit log for the get_company_updates task which expresses the number of companies successfully updates, failures, ids of companies updated, celery task info and start/end times. """ audit = { 'success_count': 0, 'failure_count': 0, 'updated_company_ids': [], 'producer_task_id': producer_task.request.id, 'start_time': start_time.isoformat(), 'end_time': now().isoformat(), } for result in update_results: if result.successful(): audit['success_count'] += 1 audit['updated_company_ids'].append(result.result) else: audit['failure_count'] += 1 log_to_sentry('get_company_updates task completed.', extra=audit) success_count, failure_count = audit['success_count'], audit[ 'failure_count'] realtime_message = (f'{producer_task.name} updated: {success_count}; ' f'failed to update: {failure_count}') send_realtime_message(realtime_message)
def execute_search_query(query): """ Executes an Elasticsearch query using the globally configured request timeout. (A warning is also logged if the query takes longer than a set threshold.) """ try: response = query.params( request_timeout=settings.ES_SEARCH_REQUEST_TIMEOUT).execute() except ConnectionError: raise APIBadGatewayException( f'Upstream service unavailable: {urlparse(settings.ES_URL).netloc}', ) if response.took >= settings.ES_SEARCH_REQUEST_WARNING_THRESHOLD * 1000: logger.warning( f'Elasticsearch query took a long time ({response.took / 1000:.2f}s)' ) log_data = { 'query': query.to_dict(), 'took': response.took, 'timed_out': response.timed_out, } log_to_sentry('Elasticsearch query took a long time', extra=log_data) return response
def _record_audit_log(self): audit = { 'success_count': self.success_count, 'failure_count': self.processed_count - self.success_count, 'updated_company_ids': self.processed_ids, 'start_time': self.start_timestamp, 'end_time': now().isoformat(timespec='seconds'), } log_to_sentry('update_company_dnb_data command completed.', extra=audit)
def execute_search_query(query): """ Executes an Elasticsearch query using the globally configured request timeout. (A warning is also logged if the query takes longer than a set threshold.) """ response = query.params(request_timeout=settings.ES_SEARCH_REQUEST_TIMEOUT).execute() if response.took >= settings.ES_SEARCH_REQUEST_WARNING_THRESHOLD * 1000: logger.warning(f'Elasticsearch query took a long time ({response.took/1000:.2f} seconds)') log_data = { 'query': query.to_dict(), 'took': response.took, 'timed_out': response.timed_out, } log_to_sentry('Elasticsearch query took a long time', extra=log_data) return response
def test_log_to_sentry(mocked_capture_message, mocked_push_scope, level, extra): """ Test log_to_sentry utility. """ kwargs = {} expected_extra = {} if extra: kwargs['extra'] = extra expected_extra = extra expected_level = 'info' if level: kwargs['level'] = level expected_level = level log_to_sentry('foo', **kwargs) mocked_capture_message.assert_called_with('foo', level=expected_level) mocked_scope = mocked_push_scope.return_value.__enter__.return_value for key, value in expected_extra.items(): mocked_scope.set_extra.assert_any_call(key, value)