def get(self, request: Request) -> Response: organization = Organization(slug="myorg") project = Project(slug="myproject", organization=organization) query = SnubaQuery( time_window=60, query="transaction:/some/transaction", aggregate="count()" ) alert_rule = AlertRule(id=1, organization=organization, name="My Alert", snuba_query=query) incident = Incident( id=2, identifier=123, organization=organization, title="Something broke", alert_rule=alert_rule, status=IncidentStatus.CRITICAL, ) trigger = AlertRuleTrigger(alert_rule=alert_rule) context = generate_incident_trigger_email_context( project, incident, trigger, TriggerStatus.ACTIVE ) return MailPreview( text_template="sentry/emails/incidents/trigger.txt", html_template="sentry/emails/incidents/trigger.html", context=context, ).render(request)
def test(self): status = TriggerStatus.ACTIVE incident = self.create_incident() action = self.create_alert_rule_trigger_action( triggered_for_incident=incident) aggregate = action.alert_rule_trigger.alert_rule.snuba_query.aggregate expected = { "link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "rule_link": absolute_uri( reverse( "sentry-alert-rule", kwargs={ "organization_slug": incident.organization.slug, "project_slug": self.project.slug, "alert_rule_id": action.alert_rule_trigger.alert_rule_id, }, )), "incident_name": incident.title, "aggregate": aggregate, "query": action.alert_rule_trigger.alert_rule.snuba_query.query, "threshold": action.alert_rule_trigger.alert_threshold, "status": INCIDENT_STATUS[IncidentStatus(incident.status)], "status_key": INCIDENT_STATUS[IncidentStatus(incident.status)].lower(), "environment": "All", "is_critical": False, "is_warning": False, "threshold_direction_string": ">", "time_window": "10 minutes", "triggered_at": timezone.now(), "project_slug": self.project.slug, "unsubscribe_link": None, } assert expected == generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status)
def test_resolve(self): status = TriggerStatus.RESOLVED incident = self.create_incident() action = self.create_alert_rule_trigger_action( triggered_for_incident=incident) generated_email_context = generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status) assert generated_email_context["threshold"] == 100 assert generated_email_context["threshold_direction_string"] == "<"
def test(self): # Full integration test to ensure that when a subscription receives an update # the `QuerySubscriptionConsumer` successfully retries the subscription and # calls the correct callback, which should result in an incident being created. message = { "version": 1, "payload": { "subscription_id": self.subscription.subscription_id, "values": {"data": [{"some_col": self.trigger.alert_threshold + 1}]}, "timestamp": "2020-01-01T01:23:45.1234", }, } self.producer.produce(self.topic, json.dumps(message)) self.producer.flush() def active_incident(): return Incident.objects.filter( type=IncidentType.ALERT_TRIGGERED.value, alert_rule=self.rule ).exclude(status=IncidentStatus.CLOSED.value) consumer = QuerySubscriptionConsumer("hi", topic=self.topic) original_callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] def shutdown_callback(*args, **kwargs): # We want to just exit after the callback so that we can see the result of # processing. original_callback(*args, **kwargs) consumer.shutdown() subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = shutdown_callback with self.feature(["organizations:incidents", "organizations:performance-view"]): with self.assertChanges( lambda: active_incident().exists(), before=False, after=True ), self.tasks(), self.capture_on_commit_callbacks(execute=True): consumer.run() assert len(mail.outbox) == 1 handler = EmailActionHandler(self.action, active_incident().get(), self.project) message = handler.build_message( generate_incident_trigger_email_context( handler.project, handler.incident, handler.action.alert_rule_trigger, TriggerStatus.ACTIVE, ), TriggerStatus.ACTIVE, self.user.id, ) out = mail.outbox[0] assert out.to == [self.user.email] assert out.subject == message.subject built_message = message.build(self.user.email) assert out.body == built_message.body
def test_environment(self): status = TriggerStatus.ACTIVE environments = [ self.create_environment(project=self.project, name="prod"), self.create_environment(project=self.project, name="dev"), ] alert_rule = self.create_alert_rule(environment=environments[0]) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule=alert_rule) incident = self.create_incident() action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident ) assert "prod" == generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status ).get("environment")
def test_context_for_crash_rate_alert(self): """ Test that ensures the metric name for Crash rate alerts excludes the alias """ status = TriggerStatus.ACTIVE incident = self.create_incident() alert_rule = self.create_alert_rule( aggregate= "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" ) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule) action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident) assert (generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status)["aggregate"] == "percentage(sessions_crashed, sessions)")
def test_context_for_resolved_crash_rate_alert(self): """ Test that ensures the resolved notification contains the correct threshold string """ status = TriggerStatus.RESOLVED incident = self.create_incident() alert_rule = self.create_alert_rule( aggregate= "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate", threshold_type=AlertRuleThresholdType.BELOW, query="", ) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule) action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident) generated_email_context = generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status) assert generated_email_context[ "aggregate"] == "percentage(sessions_crashed, sessions)" assert generated_email_context["threshold"] == 100 assert generated_email_context["threshold_direction_string"] == ">"
def test_resolve_critical_trigger_with_warning(self): status = TriggerStatus.RESOLVED rule = self.create_alert_rule() incident = self.create_incident(alert_rule=rule) crit_trigger = self.create_alert_rule_trigger(rule, CRITICAL_TRIGGER_LABEL, 100) self.create_alert_rule_trigger_action(crit_trigger, triggered_for_incident=incident) self.create_alert_rule_trigger(rule, WARNING_TRIGGER_LABEL, 50) generated_email_context = generate_incident_trigger_email_context( self.project, incident, crit_trigger, status, IncidentStatus.WARNING, ) assert generated_email_context["threshold"] == 100 assert generated_email_context["threshold_direction_string"] == "<" assert not generated_email_context["is_critical"] assert generated_email_context["is_warning"] assert generated_email_context["status"] == "Warning" assert generated_email_context["status_key"] == "warning"