def send_and_save_webhook_request(sentry_app, app_platform_event, url=None): """ Notify a SentryApp's webhook about an incident and log response on redis. :param sentry_app: The SentryApp to notify via a webhook. :param app_platform_event: Incident data. See AppPlatformEvent. :param url: The URL to hit for this webhook if it is different from `sentry_app.webhook_url`. :return: Webhook response """ buffer = SentryAppWebhookRequestsBuffer(sentry_app) org_id = app_platform_event.install.organization_id event = f"{app_platform_event.resource}.{app_platform_event.action}" slug = sentry_app.slug_for_metrics url = url or sentry_app.webhook_url try: resp = safe_urlopen( url=url, data=app_platform_event.body, headers=app_platform_event.headers, timeout=5 ) except (Timeout, ConnectionError) as e: error_type = e.__class__.__name__.lower() logger.info( "send_and_save_webhook_request.timeout", extra={ "error_type": error_type, "organization_id": org_id, "integration_slug": sentry_app.slug, }, ) track_response_code(error_type, slug, event) # Response code of 0 represents timeout buffer.add_request(response_code=0, org_id=org_id, event=event, url=url) # Re-raise the exception because some of these tasks might retry on the exception raise else: track_response_code(resp.status_code, slug, event) buffer.add_request( response_code=resp.status_code, org_id=org_id, event=event, url=url, error_id=resp.headers.get("Sentry-Hook-Error"), project_id=resp.headers.get("Sentry-Hook-Project"), ) if resp.status_code == 503: raise ApiHostError.from_request(resp.request) elif resp.status_code == 504: raise ApiTimeoutError.from_request(resp.request) if 400 <= resp.status_code < 500: raise ClientError(resp.status_code, url, response=resp) resp.raise_for_status() return resp
def send_and_save_sentry_app_request(url, sentry_app, org_id, event, **kwargs): """ Send a webhook request, and save the request into the Redis buffer for the app dashboard request log Returns the response of the request kwargs ends up being the arguments passed into safe_urlopen """ buffer = SentryAppWebhookRequestsBuffer(sentry_app) slug = sentry_app.slug_for_metrics try: resp = safe_urlopen(url=url, **kwargs) except RequestException: track_response_code("timeout", slug, event) # Response code of 0 represents timeout buffer.add_request(response_code=0, org_id=org_id, event=event, url=url) # Re-raise the exception because some of these tasks might retry on the exception raise track_response_code(resp.status_code, slug, event) buffer.add_request( response_code=resp.status_code, org_id=org_id, event=event, url=url, error_id=resp.headers.get("Sentry-Hook-Error"), project_id=resp.headers.get("Sentry-Hook-Project"), ) return resp
def send_and_save_webhook_request(url, sentry_app, app_platform_event): buffer = SentryAppWebhookRequestsBuffer(sentry_app) org_id = app_platform_event.install.organization_id event = "{}.{}".format(app_platform_event.resource, app_platform_event.action) slug = sentry_app.slug_for_metrics try: resp = safe_urlopen( url=url, data=app_platform_event.body, headers=app_platform_event.headers, timeout=5 ) except RequestException: track_response_code("timeout", slug, event) # Response code of 0 represents timeout buffer.add_request(response_code=0, org_id=org_id, event=event, url=url) # Re-raise the exception because some of these tasks might retry on the exception raise track_response_code(resp.status_code, slug, event) buffer.add_request( response_code=resp.status_code, org_id=org_id, event=event, url=url, error_id=resp.headers.get("Sentry-Hook-Error"), project_id=resp.headers.get("Sentry-Hook-Project"), ) return resp
def send_and_save_sentry_app_request(url, sentry_app, org_id, event, **kwargs): """ Send a webhook request, and save the request into the Redis buffer for the app dashboard request log Returns the response of the request kwargs ends up being the arguments passed into safe_urlopen """ buffer = SentryAppWebhookRequestsBuffer(sentry_app) slug = sentry_app.slug_for_metrics try: resp = safe_urlopen(url=url, **kwargs) except (Timeout, ConnectionError) as e: error_type = e.__class__.__name__.lower() logger.info( "send_and_save_sentry_app_request.timeout", extra={ "error_type": error_type, "organization_id": org_id, "integration_slug": sentry_app.slug, }, ) track_response_code(error_type, slug, event) # Response code of 0 represents timeout buffer.add_request(response_code=0, org_id=org_id, event=event, url=url) # Re-raise the exception because some of these tasks might retry on the exception raise else: track_response_code(resp.status_code, slug, event) buffer.add_request( response_code=resp.status_code, org_id=org_id, event=event, url=url, error_id=resp.headers.get("Sentry-Hook-Error"), project_id=resp.headers.get("Sentry-Hook-Project"), ) resp.raise_for_status() return resp
def send_and_save_webhook_request(url, sentry_app, app_platform_event): buffer = SentryAppWebhookRequestsBuffer(sentry_app) org_id = app_platform_event.install.organization_id event = "{}.{}".format(app_platform_event.resource, app_platform_event.action) slug = sentry_app.slug_for_metrics try: resp = safe_urlopen( url=url, data=app_platform_event.body, headers=app_platform_event.headers, timeout=5 ) except (Timeout, ConnectionError) as e: track_response_code(e.__class__.__name__.lower(), slug, event) # Response code of 0 represents timeout buffer.add_request(response_code=0, org_id=org_id, event=event, url=url) # Re-raise the exception because some of these tasks might retry on the exception raise else: track_response_code(resp.status_code, slug, event) buffer.add_request( response_code=resp.status_code, org_id=org_id, event=event, url=url, error_id=resp.headers.get("Sentry-Hook-Error"), project_id=resp.headers.get("Sentry-Hook-Project"), ) if resp.status_code == 503: raise ApiHostError.from_request(resp.request) elif resp.status_code == 504: raise ApiTimeoutError.from_request(resp.request) resp.raise_for_status() return resp