def run_test(self, incident, method): from sentry.rules.actions.notify_event_service import build_incident_attachment action = self.create_alert_rule_trigger_action( target_identifier=self.sentry_app.id, type=AlertRuleTriggerAction.Type.SENTRY_APP, target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP, sentry_app=self.sentry_app, ) responses.add( method=responses.POST, url="https://example.com/webhook", status=200, content_type="application/json", body=json.dumps({"ok": "true"}), ) handler = SentryAppActionHandler(action, incident, self.project) metric_value = 1000 with self.tasks(): getattr(handler, method)(metric_value, IncidentStatus(incident.status)) data = responses.calls[0].request.body assert (json.dumps( build_incident_attachment(incident, IncidentStatus(incident.status), metric_value)) in data)
def build_activity_context(activity, user): if activity.type == IncidentActivityType.COMMENT.value: action = "left a comment" else: action = "changed status from %s to %s" % ( IncidentStatus(int(activity.previous_value)).name.lower(), IncidentStatus(int(activity.value)).name.lower(), ) incident = activity.incident action = "%s on incident %s (#%s)" % (action, incident.title, incident.identifier) return { "user_name": activity.user.name if activity.user else "Sentry", "action": action, "link": absolute_uri( reverse( "sentry-incident", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )) + "?" + urlencode({"referrer": "incident_activity_email"}), "comment": activity.comment, "unsubscribe_link": generate_signed_link(user, "sentry-account-email-unsubscribe-incident", kwargs={"incident_id": incident.id}), }
def run_test(self, incident, method): from sentry.integrations.pagerduty.utils import build_incident_attachment action = self.create_alert_rule_trigger_action( target_identifier=self.service.id, type=AlertRuleTriggerAction.Type.PAGERDUTY, target_type=AlertRuleTriggerAction.TargetType.SPECIFIC, integration=self.integration, ) responses.add( method=responses.POST, url="https://events.pagerduty.com/v2/enqueue/", json={}, status=202, content_type="application/json", ) handler = PagerDutyActionHandler(action, incident, self.project) metric_value = 1000 with self.tasks(): getattr(handler, method)(metric_value, IncidentStatus(incident.status)) data = responses.calls[0].request.body assert json.loads(data) == build_incident_attachment( incident, self.service.integration_key, IncidentStatus(incident.status), metric_value)
def build_activity_context(activity, user): if activity.type == IncidentActivityType.COMMENT.value: action = "left a comment" else: action = "changed status from {} to {}".format( INCIDENT_STATUS[IncidentStatus(int(activity.previous_value))], INCIDENT_STATUS[IncidentStatus(int(activity.value))], ) incident = activity.incident action = f"{action} on alert {incident.title} (#{incident.identifier})" return { "user_name": activity.user.name if activity.user else "Sentry", "action": action, "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )) + "?" + urlencode({"referrer": "incident_activity_email"}), "comment": activity.comment, }
def build_activity_context(activity, user): if activity.type == IncidentActivityType.COMMENT.value: action = 'left a comment' else: action = 'changed status from %s to %s' % ( IncidentStatus(int(activity.previous_value)).name.lower(), IncidentStatus(int(activity.value)).name.lower(), ) incident = activity.incident action = '%s on incident %s (#%s)' % (action, incident.title, incident.identifier) return { 'user_name': activity.user.name if activity.user else 'Sentry', 'action': action, 'link': absolute_uri(reverse( 'sentry-incident', kwargs={ 'organization_slug': incident.organization.slug, 'incident_id': incident.identifier, }, )) + '?' + urlencode({'referrer': 'incident_activity_email'}), 'comment': activity.comment, 'unsubscribe_link': generate_signed_link( user, 'sentry-account-email-unsubscribe-incident', kwargs={'incident_id': incident.id}, ), }
def test(self): status = TriggerStatus.ACTIVE incident = self.create_incident() action = self.create_alert_rule_trigger_action( triggered_for_incident=incident) aggregate = action.alert_rule_trigger.alert_rule.snuba_query.aggregate expected = { "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "rule_link": absolute_uri( reverse( "sentry-alert-rule", kwargs={ "organization_slug": incident.organization.slug, "project_slug": self.project.slug, "alert_rule_id": action.alert_rule_trigger.alert_rule_id, }, )), "incident_name": incident.title, "aggregate": aggregate, "query": action.alert_rule_trigger.alert_rule.snuba_query.query, "threshold": action.alert_rule_trigger.alert_threshold, "status": INCIDENT_STATUS[IncidentStatus(incident.status)], "status_key": INCIDENT_STATUS[IncidentStatus(incident.status)].lower(), "environment": "All", "is_critical": False, "is_warning": False, "threshold_direction_string": ">", "time_window": "10 minutes", "triggered_at": timezone.now(), "project_slug": self.project.slug, "unsubscribe_link": None, } assert expected == generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status)
def generate_incident_trigger_email_context(project, incident, alert_rule_trigger, status): trigger = alert_rule_trigger incident_trigger = IncidentTrigger.objects.get(incident=incident, alert_rule_trigger=trigger) alert_rule = trigger.alert_rule snuba_query = alert_rule.snuba_query is_active = status == TriggerStatus.ACTIVE is_threshold_type_above = trigger.threshold_type == AlertRuleThresholdType.ABOVE.value # if alert threshold and threshold type is above then show '>' # if resolve threshold and threshold type is *BELOW* then show '>' # we can simplify this to be the below statement show_greater_than_string = is_active == is_threshold_type_above environment_string = snuba_query.environment.name if snuba_query.environment else "All" aggregate = alert_rule.snuba_query.aggregate return { "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, ) ), "rule_link": absolute_uri( reverse( "sentry-alert-rule", kwargs={ "organization_slug": incident.organization.slug, "project_slug": project.slug, "alert_rule_id": trigger.alert_rule_id, }, ) ), "project_slug": project.slug, "incident_name": incident.title, "environment": environment_string, "time_window": format_duration(snuba_query.time_window / 60), "triggered_at": incident_trigger.date_added, "aggregate": aggregate, "query": snuba_query.query, "threshold": trigger.alert_threshold if is_active else trigger.resolve_threshold, # if alert threshold and threshold type is above then show '>' # if resolve threshold and threshold type is *BELOW* then show '>' "threshold_direction_string": ">" if show_greater_than_string else "<", "status": INCIDENT_STATUS[IncidentStatus(incident.status)], "status_key": INCIDENT_STATUS_KEY[IncidentStatus(incident.status)], "is_critical": incident.status == IncidentStatus.CRITICAL, "is_warning": incident.status == IncidentStatus.WARNING, "unsubscribe_link": None, }
def run_test(self, incident, method): action = self.create_alert_rule_trigger_action( target_identifier=str(self.user.id), triggered_for_incident=incident, ) handler = EmailActionHandler(action, incident, self.project) with self.tasks(): handler.fire(1000, IncidentStatus(incident.status)) out = mail.outbox[0] assert out.to == [self.user.email] assert out.subject == "[{}] {} - {}".format( INCIDENT_STATUS[IncidentStatus(incident.status)], incident.title, self.project.slug)
def run_test(self, incident, method): from sentry.integrations.msteams.card_builder import build_incident_attachment integration = Integration.objects.create( provider="msteams", name="Galactic Empire", external_id="D4r7h_Pl4gu315_th3_w153", metadata={ "service_url": "https://smba.trafficmanager.net/amer", "access_token": "d4rk51d3", "expires_at": int(time.time()) + 86400, }, ) integration.add_organization(self.organization, self.user) channel_id = "d_s" channel_name = "Death Star" channels = [{"id": channel_id, "name": channel_name}] responses.add( method=responses.GET, url= "https://smba.trafficmanager.net/amer/v3/teams/D4r7h_Pl4gu315_th3_w153/conversations", json={"conversations": channels}, ) action = self.create_alert_rule_trigger_action( target_identifier=channel_name, type=AlertRuleTriggerAction.Type.MSTEAMS, target_type=AlertRuleTriggerAction.TargetType.SPECIFIC, integration=integration, ) responses.add( method=responses.POST, url= "https://smba.trafficmanager.net/amer/v3/conversations/d_s/activities", status=200, json={}, ) handler = MsTeamsActionHandler(action, incident, self.project) metric_value = 1000 with self.tasks(): getattr(handler, method)(metric_value, IncidentStatus(incident.status)) data = json.loads(responses.calls[1].request.body) assert data["attachments"][0]["content"] == build_incident_attachment( incident, IncidentStatus(incident.status), metric_value)
def test_build_incident_attachment(self): from sentry.integrations.pagerduty.utils import build_incident_attachment alert_rule = self.create_alert_rule() incident = self.create_incident(alert_rule=alert_rule) update_incident_status( incident, IncidentStatus.CRITICAL, status_method=IncidentStatusMethod.RULE_TRIGGERED) self.create_alert_rule_trigger_action( target_identifier=self.service.id, type=AlertRuleTriggerAction.Type.PAGERDUTY, target_type=AlertRuleTriggerAction.TargetType.SPECIFIC, integration=self.integration, ) metric_value = 1000 data = build_incident_attachment(incident, self.integration_key, IncidentStatus(incident.status), metric_value) assert data["routing_key"] == self.integration_key assert data["event_action"] == "trigger" assert data[ "dedup_key"] == f"incident_{incident.organization_id}_{incident.identifier}" assert data["payload"]["summary"] == alert_rule.name assert data["payload"]["severity"] == "critical" assert data["payload"]["source"] == str(incident.identifier) assert data["payload"]["custom_details"] == { "details": "1000 events in the last 10 minutes\nFilter: level:error" } assert data["links"][0]["text"] == f"Critical: {alert_rule.name}" assert data["links"][0][ "href"] == "http://testserver/organizations/baz/alerts/1/"
def test_fire_metric_alert_with_missing_integration(self): alert_rule = self.create_alert_rule() incident = self.create_incident(alert_rule=alert_rule, status=IncidentStatus.CLOSED.value) integration = Integration.objects.create( external_id="1", provider="slack", metadata={ "access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx", "installation_type": "born_as_bot", }, ) action = AlertRuleTriggerAction.objects.create( alert_rule_trigger=self.create_alert_rule_trigger(), type=AlertRuleTriggerAction.Type.SLACK.value, target_type=AlertRuleTriggerAction.TargetType.SPECIFIC.value, target_identifier="some_id", target_display="#hello", integration=integration, sentry_app=None, ) integration.delete() handler = SlackActionHandler(action, incident, self.project) metric_value = 1000 with self.tasks(): handler.fire(metric_value, IncidentStatus(incident.status))
def get(self, request: Request) -> Response: organization = Organization(slug="myorg") project = Project(slug="myproject", organization=organization) query = SnubaQuery(time_window=60, query="transaction:/some/transaction", aggregate="count()") alert_rule = AlertRule(id=1, organization=organization, name="My Alert", snuba_query=query) incident = Incident( id=2, identifier=123, organization=organization, title="Something broke", alert_rule=alert_rule, status=IncidentStatus.CRITICAL, ) trigger = AlertRuleTrigger(alert_rule=alert_rule) context = generate_incident_trigger_email_context( project, incident, trigger, TriggerStatus.ACTIVE, IncidentStatus(incident.status)) return MailPreview( text_template="sentry/emails/incidents/trigger.txt", html_template="sentry/emails/incidents/trigger.html", context=context, ).render(request)
def build_incident_attachment( incident: Incident, metric_value: Optional[int] = None, ) -> SlackBody: """@deprecated""" return SlackIncidentsMessageBuilder( incident, IncidentStatus(incident.status), metric_value ).build()
def validate_status(self, value): try: value = IncidentStatus(value) except Exception: raise serializers.ValidationError( "Invalid value for status. Valid values: {}".format( [e.value for e in IncidentStatus])) return value
def incident_status_info(incident, metric_value, action, method): if action and method: # Get status from trigger incident_status = (IncidentStatus.CLOSED if method == "resolve" else ( IncidentStatus.CRITICAL if action.alert_rule_trigger.label == CRITICAL_TRIGGER_LABEL else IncidentStatus.WARNING)) else: incident_status = incident.status return IncidentStatus(incident_status)
def validate_status(self, attrs, source): value = attrs[source] try: attrs[source] = IncidentStatus(value) except Exception: raise serializers.ValidationError( 'Invalid value for status. Valid values: {}'.format( [e.value for e in IncidentStatus], ), ) return attrs
def incident_attachment_info(incident, metric_value=None): logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule status = INCIDENT_STATUS[IncidentStatus(incident.status)] agg_text = QUERY_AGGREGATION_DISPLAY.get( alert_rule.snuba_query.aggregate, alert_rule.snuba_query.aggregate ) if metric_value is None: incident_trigger = ( IncidentTrigger.objects.filter(incident=incident).order_by("-date_modified").first() ) if incident_trigger: alert_rule_trigger = incident_trigger.alert_rule_trigger # TODO: If we're relying on this and expecting possible delays between a # trigger fired and this function running, then this could actually be # incorrect if they changed the trigger's time window in this time period. # Should we store it? start = incident_trigger.date_modified - timedelta( seconds=alert_rule_trigger.alert_rule.snuba_query.time_window ) end = incident_trigger.date_modified else: start, end = None, None metric_value = get_incident_aggregates(incident, start, end, use_alert_aggregate=True)[ "count" ] time_window = alert_rule.snuba_query.time_window // 60 text = "{} {} in the last {} minutes".format(metric_value, agg_text, time_window) if alert_rule.snuba_query.query != "": text += "\nFilter: {}".format(alert_rule.snuba_query.query) ts = incident.date_started title = u"{}: {}".format(status, alert_rule.name) title_link = absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, ) ) return { "title": title, "text": text, "logo_url": logo_url, "status": status, "ts": ts, "title_link": title_link, }
def run_test(self, incident, method): from sentry.integrations.slack.message_builder.incidents import build_incident_attachment token = "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx" integration = Integration.objects.create( external_id="1", provider="slack", metadata={ "access_token": token, "installation_type": "born_as_bot" }, ) integration.add_organization(self.organization, self.user) channel_id = "some_id" channel_name = "#hello" responses.add( method=responses.GET, url="https://slack.com/api/conversations.list", status=200, content_type="application/json", body=json.dumps({ "ok": "true", "channels": [{ "name": channel_name[1:], "id": channel_id }] }), ) action = self.create_alert_rule_trigger_action( target_identifier=channel_name, type=AlertRuleTriggerAction.Type.SLACK, target_type=AlertRuleTriggerAction.TargetType.SPECIFIC, integration=integration, ) responses.add( method=responses.POST, url="https://slack.com/api/chat.postMessage", status=200, content_type="application/json", body='{"ok": true}', ) handler = SlackActionHandler(action, incident, self.project) metric_value = 1000 with self.tasks(): getattr(handler, method)(metric_value, IncidentStatus(incident.status)) data = parse_qs(responses.calls[1].request.body) assert data["channel"] == [channel_id] assert data["token"] == [token] assert json.loads( data["attachments"][0])[0] == build_incident_attachment( incident, metric_value)
def test(self): status = TriggerStatus.ACTIVE action = self.create_alert_rule_trigger_action() incident = self.create_incident() handler = EmailActionHandler(action, incident, self.project) expected = { "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "rule_link": absolute_uri( reverse( "sentry-alert-rule", kwargs={ "organization_slug": incident.organization.slug, "project_slug": self.project.slug, "alert_rule_id": action.alert_rule_trigger.alert_rule_id, }, )), "incident_name": incident.title, "aggregate": handler.query_aggregations_display[QueryAggregations( action.alert_rule_trigger.alert_rule.aggregation)], "query": action.alert_rule_trigger.alert_rule.query, "threshold": action.alert_rule_trigger.alert_threshold, "status": INCIDENT_STATUS[IncidentStatus(incident.status)], "environment": "All", "is_critical": False, "is_warning": False, "threshold_direction_string": ">", "time_window": "10 minutes", "triggered_at": timezone.now(), "unsubscribe_link": None, } assert expected == handler.generate_email_context(status)
def generate_email_context(self, status): trigger = self.action.alert_rule_trigger alert_rule = trigger.alert_rule is_active = status == TriggerStatus.ACTIVE is_threshold_type_above = trigger.threshold_type == AlertRuleThresholdType.ABOVE.value # if alert threshold and threshold type is above then show '>' # if resolve threshold and threshold type is *BELOW* then show '>' # we can simplify this to be the below statement show_greater_than_string = is_active == is_threshold_type_above environments = list(alert_rule.environment.all()) environment_string = ( ", ".join(sorted([env.name for env in environments])) if len(environments) else "All" ) return { "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": self.incident.organization.slug, "incident_id": self.incident.identifier, }, ) ), "rule_link": absolute_uri( reverse( "sentry-alert-rule", kwargs={ "organization_slug": self.incident.organization.slug, "project_slug": self.project.slug, "alert_rule_id": self.action.alert_rule_trigger.alert_rule_id, }, ) ), "incident_name": self.incident.title, "environment": environment_string, "time_window": format_duration(alert_rule.time_window), "triggered_at": trigger.date_added, "aggregate": self.query_aggregations_display[QueryAggregations(alert_rule.aggregation)], "query": alert_rule.query, "threshold": trigger.alert_threshold if is_active else trigger.resolve_threshold, # if alert threshold and threshold type is above then show '>' # if resolve threshold and threshold type is *BELOW* then show '>' "threshold_direction_string": ">" if show_greater_than_string else "<", "status": INCIDENT_STATUS[IncidentStatus(self.incident.status)], "is_critical": self.incident.status == IncidentStatus.CRITICAL, "is_warning": self.incident.status == IncidentStatus.WARNING, "unsubscribe_link": None, }
def send_incident_alert_notification(action, incident, metric_value=None): """ When a metric alert is triggered, send incident data to the SentryApp's webhook. :param action: The triggered `AlertRuleTriggerAction`. :param incident: The `Incident` for which to build a payload. :param metric_value: The value of the metric that triggered this alert to fire. If not provided we'll attempt to calculate this ourselves. :return: """ sentry_app = action.sentry_app organization = incident.organization metrics.incr("notifications.sent", instance=sentry_app.slug, skip_internal=False) try: install = SentryAppInstallation.objects.get( organization=organization.id, sentry_app=sentry_app, status=SentryAppInstallationStatus.INSTALLED, ) except SentryAppInstallation.DoesNotExist: logger.info( "metric_alert_webhook.missing_installation", extra={ "action": action.id, "incident": incident.id, "organization": organization.slug, "sentry_app_id": sentry_app.id, }, ) return send_and_save_webhook_request( sentry_app, AppPlatformEvent( resource="metric_alert", action=INCIDENT_STATUS[IncidentStatus(incident.status)].lower(), install=install, data=build_incident_attachment(incident, metric_value), ), )
def generate_incident_trigger_email_context(project, incident, alert_rule_trigger, status): trigger = alert_rule_trigger incident_trigger = IncidentTrigger.objects.get(incident=incident, alert_rule_trigger=trigger) alert_rule = trigger.alert_rule snuba_query = alert_rule.snuba_query is_active = status == TriggerStatus.ACTIVE is_threshold_type_above = alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value # if alert threshold and threshold type is above then show '>' # if resolve threshold and threshold type is *BELOW* then show '>' # we can simplify this to be the below statement show_greater_than_string = is_active == is_threshold_type_above environment_string = snuba_query.environment.name if snuba_query.environment else "All" aggregate = alert_rule.snuba_query.aggregate if CRASH_RATE_ALERT_AGGREGATE_ALIAS in aggregate: aggregate = aggregate.split( f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip() threshold = trigger.alert_threshold if is_active else alert_rule.resolve_threshold if threshold is None: # Setting this to trigger threshold because in the case of a resolve if no resolve # threshold is specified this will be None. Since we add a comparison sign to the # string it makes sense to set this to the trigger alert threshold if no threshold is # specified threshold = trigger.alert_threshold return { "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "rule_link": absolute_uri( reverse( "sentry-alert-rule", kwargs={ "organization_slug": incident.organization.slug, "project_slug": project.slug, "alert_rule_id": trigger.alert_rule_id, }, )), "project_slug": project.slug, "incident_name": incident.title, "environment": environment_string, "time_window": format_duration(snuba_query.time_window / 60), "triggered_at": incident_trigger.date_added, "aggregate": aggregate, "query": snuba_query.query, "threshold": threshold, # if alert threshold and threshold type is above then show '>' # if resolve threshold and threshold type is *BELOW* then show '>' "threshold_direction_string": ">" if show_greater_than_string else "<", "status": INCIDENT_STATUS[IncidentStatus(incident.status)], "status_key": INCIDENT_STATUS[IncidentStatus(incident.status)].lower(), "is_critical": incident.status == IncidentStatus.CRITICAL, "is_warning": incident.status == IncidentStatus.WARNING, "unsubscribe_link": None, }
def run_test(self, incident, method): from sentry.rules.actions.notify_event_service import build_incident_attachment trigger = self.create_alert_rule_trigger(self.alert_rule, "hi", 1000) action = self.create_alert_rule_trigger_action( alert_rule_trigger=trigger, target_identifier=self.sentry_app.id, type=AlertRuleTriggerAction.Type.SENTRY_APP, target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP, sentry_app=self.sentry_app, sentry_app_config=[ { "name": "channel", "value": "#santry" }, { "name": "workspace_name", "value": "santrysantrysantry" }, { "name": "tag", "value": "triage" }, { "name": "assignee", "value": "Nisanthan Nanthakumar" }, { "name": "teamId", "value": 1 }, ], ) responses.add( method=responses.POST, url="https://example.com/webhook", status=200, content_type="application/json", body=json.dumps({"ok": "true"}), ) handler = SentryAppActionHandler(action, incident, self.project) metric_value = 1000 with self.tasks(): getattr(handler, method)(metric_value, IncidentStatus(incident.status)) data = responses.calls[0].request.body assert (json.dumps( build_incident_attachment(incident, IncidentStatus(incident.status), metric_value)) in data) # Check that the Alert Rule UI Component settings are returned assert json.loads(data)["data"]["metric_alert"]["alert_rule"][ "triggers"][0]["actions"][0]["settings"] == [ { "name": "channel", "value": "#santry" }, { "name": "workspace_name", "value": "santrysantrysantry" }, { "name": "tag", "value": "triage" }, { "name": "assignee", "value": "Nisanthan Nanthakumar" }, { "name": "teamId", "value": 1 }, ]