def build_incident_attachment(incident): logo_url = absolute_uri(get_asset_url('sentry', 'images/sentry-email-avatar.png')) aggregates = get_incident_aggregates(incident) status = 'Closed' if incident.status == IncidentStatus.CLOSED.value else 'Open' fields = [ {'title': 'Status', 'value': status, 'short': True}, {'title': 'Events', 'value': aggregates['count'], 'short': True}, {'title': 'Users', 'value': aggregates['unique_users'], 'short': True}, ] ts = incident.date_started return { 'fallback': u'{} (#{})'.format(incident.title, incident.identifier), 'title': u'{} (#{})'.format(incident.title, incident.identifier), 'title_link': absolute_uri(reverse( 'sentry-incident', kwargs={ 'organization_slug': incident.organization.slug, 'incident_id': incident.identifier, }, )), 'text': ' ', 'fields': fields, 'mrkdwn_in': ['text'], 'footer_icon': logo_url, 'footer': 'Sentry Incident', 'ts': to_timestamp(ts), 'color': LEVEL_TO_COLOR['error'], 'actions': [], }
def test(self): closed_incident = create_incident( self.organization, IncidentType.CREATED, "Closed", "", groups=[self.group], date_started=timezone.now() - timedelta(days=30), ) update_incident_status(closed_incident, IncidentStatus.CLOSED) open_incident = create_incident( self.organization, IncidentType.CREATED, "Open", "", groups=[self.group], date_started=timezone.now() - timedelta(days=30), ) incidents = [closed_incident, open_incident] for incident, incident_stats in zip(incidents, bulk_get_incident_stats(incidents)): event_stats = get_incident_event_stats(incident) assert incident_stats["event_stats"].data["data"] == event_stats.data["data"] assert incident_stats["event_stats"].start == event_stats.start assert incident_stats["event_stats"].end == event_stats.end assert incident_stats["event_stats"].rollup == event_stats.rollup aggregates = get_incident_aggregates(incident) assert incident_stats["total_events"] == aggregates["count"] assert incident_stats["unique_users"] == aggregates["unique_users"]
def test_groups(self): fp = 'group' group = self.create_event(self.now - timedelta(minutes=1), fingerprint=fp).group self.create_event(self.now - timedelta(minutes=2), user={'id': 123}, fingerprint=fp) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}, fingerprint=fp) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}, fingerprint='other') self.create_event(self.now - timedelta(minutes=2), user={'id': 124}, fingerprint=fp) self.create_event(self.now - timedelta(minutes=2), user={'id': 124}, fingerprint='other') incident = self.create_incident( date_started=self.now - timedelta(minutes=5), query='', projects=[], groups=[group], ) assert get_incident_aggregates(incident) == { 'count': 4, 'unique_users': 2 }
def build_incident_attachment(incident): logo_url = absolute_uri( get_asset_url("sentry", "images/sentry-email-avatar.png")) aggregates = get_incident_aggregates(incident) status = "Closed" if incident.status == IncidentStatus.CLOSED.value else "Open" fields = [ { "title": "Status", "value": status, "short": True }, { "title": "Events", "value": aggregates["count"], "short": True }, { "title": "Users", "value": aggregates["unique_users"], "short": True }, ] ts = incident.date_started title = u"INCIDENT: {} (#{})".format(incident.title, incident.identifier) return { "fallback": title, "title": title, "title_link": absolute_uri( reverse( "sentry-incident", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "text": " ", "fields": fields, "mrkdwn_in": ["text"], "footer_icon": logo_url, "footer": "Sentry Incident", "ts": to_timestamp(ts), "color": LEVEL_TO_COLOR["error"], "actions": [], }
def incident_attachment_info(incident, metric_value=None): logo_url = absolute_uri( get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule incident_trigger = (IncidentTrigger.objects.filter( incident=incident).order_by("-date_modified").first()) if incident_trigger: alert_rule_trigger = incident_trigger.alert_rule_trigger # TODO: If we're relying on this and expecting possible delays between a trigger fired and this function running, # then this could actually be incorrect if they changed the trigger's time window in this time period. Should we store it? start = incident_trigger.date_modified - timedelta( seconds=alert_rule_trigger.alert_rule.snuba_query.time_window) end = incident_trigger.date_modified else: start, end = None, None if incident.status == IncidentStatus.CLOSED.value: status = "Resolved" elif incident.status == IncidentStatus.WARNING.value: status = "Warning" elif incident.status == IncidentStatus.CRITICAL.value: status = "Critical" agg_text = QUERY_AGGREGATION_DISPLAY.get(alert_rule.snuba_query.aggregate, alert_rule.snuba_query.aggregate) if metric_value is None: metric_value = get_incident_aggregates( incident, start, end, use_alert_aggregate=True)["count"] time_window = alert_rule.snuba_query.time_window // 60 text = "{} {} in the last {} minutes".format(metric_value, agg_text, time_window) if alert_rule.snuba_query.query != "": text = text + "\nFilter: {}".format(alert_rule.snuba_query.query) ts = incident.date_started title = u"{}: {}".format(status, alert_rule.name) title_link = absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )) return { "title": title, "text": text, "logo_url": logo_url, "status": status, "ts": ts, "title_link": title_link, }
def incident_attachment_info(incident, metric_value=None, action=None, method=None): logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule status = INCIDENT_STATUS[incident_status_info(incident, metric_value, action, method)] agg_text = QUERY_AGGREGATION_DISPLAY.get( alert_rule.snuba_query.aggregate, alert_rule.snuba_query.aggregate ) if metric_value is None: incident_trigger = ( IncidentTrigger.objects.filter(incident=incident).order_by("-date_modified").first() ) if incident_trigger: alert_rule_trigger = incident_trigger.alert_rule_trigger # TODO: If we're relying on this and expecting possible delays between a # trigger fired and this function running, then this could actually be # incorrect if they changed the trigger's time window in this time period. # Should we store it? start = incident_trigger.date_modified - timedelta( seconds=alert_rule_trigger.alert_rule.snuba_query.time_window ) end = incident_trigger.date_modified else: start, end = None, None metric_value = get_incident_aggregates(incident, start, end, use_alert_aggregate=True)[ "count" ] time_window = alert_rule.snuba_query.time_window // 60 text = f"{metric_value} {agg_text} in the last {time_window} minutes" if alert_rule.snuba_query.query != "": text += f"\nFilter: {alert_rule.snuba_query.query}" ts = incident.date_started title = f"{status}: {alert_rule.name}" title_link = absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, ) ) return { "title": title, "text": text, "logo_url": logo_url, "status": status, "ts": ts, "title_link": title_link, }
def test_projects(self): incident = self.create_incident( date_started=self.now - timedelta(minutes=5), query='', projects=[self.project] ) self.create_event(self.now - timedelta(minutes=1)) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}) self.create_event(self.now - timedelta(minutes=2), user={'id': 124}) assert get_incident_aggregates(incident) == {'count': 4, 'unique_users': 2}
def build_incident_attachment(incident): logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule aggregates = get_incident_aggregates(incident) if incident.status == IncidentStatus.CLOSED.value: status = "Resolved" color = RESOLVED_COLOR elif incident.status == IncidentStatus.WARNING.value: status = "Warning" color = LEVEL_TO_COLOR["warning"] elif incident.status == IncidentStatus.CRITICAL.value: status = "Critical" color = LEVEL_TO_COLOR["fatal"] agg_text = QUERY_AGGREGATION_DISPLAY[alert_rule.aggregation] agg_value = ( aggregates["count"] if alert_rule.aggregation == QueryAggregations.TOTAL.value else aggregates["unique_users"] ) time_window = alert_rule.time_window text = "{} {} in the last {} minutes".format(agg_value, agg_text, time_window) if alert_rule.query != "": text = text + "\Filter: {}".format(alert_rule.query) ts = incident.date_started title = u"{}: {}".format(status, alert_rule.name) return { "fallback": title, "title": title, "title_link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, ) ), "text": text, "fields": [], "mrkdwn_in": ["text"], "footer_icon": logo_url, "footer": "Sentry Incident", "ts": to_timestamp(ts), "color": color, "actions": [], }
def test_projects(self): incident = self.create_incident( date_started=self.now - timedelta(minutes=5), query='', projects=[self.project] ) self.create_event(self.now - timedelta(minutes=1)) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}) self.create_event(self.now - timedelta(minutes=2), user={'id': 124}) assert get_incident_aggregates(incident) == {'count': 4, 'unique_users': 2}
def test_windowed(self): incident = self.create_incident(self.organization) incident.update(status=IncidentStatus.CLOSED.value) snapshot = create_incident_snapshot(incident, windowed_stats=True) expected_snapshot = create_event_stat_snapshot(incident, windowed_stats=True) assert snapshot.event_stats_snapshot.start == expected_snapshot.start assert snapshot.event_stats_snapshot.end == expected_snapshot.end assert snapshot.event_stats_snapshot.values == expected_snapshot.values assert snapshot.event_stats_snapshot.period == expected_snapshot.period assert snapshot.event_stats_snapshot.date_added == expected_snapshot.date_added aggregates = get_incident_aggregates(incident) assert snapshot.unique_users == aggregates["unique_users"] assert snapshot.total_events == aggregates["count"]
def get_attrs(self, item_list, user, **kwargs): incident_projects = defaultdict(list) for incident_project in IncidentProject.objects.filter( incident__in=item_list).select_related('project'): incident_projects[incident_project.incident_id].append(incident_project.project.slug) results = {} for incident in item_list: results[incident] = { 'projects': incident_projects.get(incident.id, []), 'event_stats': get_incident_event_stats(incident), 'aggregates': get_incident_aggregates(incident), } return results
def get_attrs(self, item_list, user, **kwargs): incident_projects = defaultdict(list) for incident_project in IncidentProject.objects.filter( incident__in=item_list).select_related('project'): incident_projects[incident_project.incident_id].append(incident_project.project.slug) results = {} for item in item_list: results[item] = {'projects': incident_projects.get(item.id, [])} for incident in item_list: results[item]['event_stats'] = get_incident_event_stats(incident) results[item]['aggregates'] = get_incident_aggregates(incident) return results
def test_groups(self): fp = 'group' group = self.create_event(self.now - timedelta(minutes=1), fingerprint=fp).group self.create_event(self.now - timedelta(minutes=2), user={'id': 123}, fingerprint=fp) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}, fingerprint=fp) self.create_event(self.now - timedelta(minutes=2), user={'id': 123}, fingerprint='other') self.create_event(self.now - timedelta(minutes=2), user={'id': 124}, fingerprint=fp) self.create_event(self.now - timedelta(minutes=2), user={'id': 124}, fingerprint='other') incident = self.create_incident( date_started=self.now - timedelta(minutes=5), query='', projects=[], groups=[group], ) assert get_incident_aggregates(incident) == {'count': 4, 'unique_users': 2}
def test(self): incident = self.create_incident(self.organization) incident.update(status=IncidentStatus.CLOSED.value) snapshot = create_incident_snapshot(incident) expected_snapshot = create_event_stat_snapshot( incident, incident.date_started, incident.date_closed, ) assert snapshot.event_stats_snapshot.start == expected_snapshot.start assert snapshot.event_stats_snapshot.end == expected_snapshot.end assert snapshot.event_stats_snapshot.values == expected_snapshot.values assert snapshot.event_stats_snapshot.period == expected_snapshot.period assert snapshot.event_stats_snapshot.date_added == expected_snapshot.date_added aggregates = get_incident_aggregates(incident) assert snapshot.unique_users == aggregates['unique_users'] assert snapshot.total_events == aggregates['count']
def test(self): closed_incident = create_incident( self.organization, IncidentType.ALERT_TRIGGERED, "Closed", "", QueryAggregations.TOTAL, groups=[self.group], date_started=timezone.now() - timedelta(days=30), ) update_incident_status(closed_incident, IncidentStatus.CLOSED) open_incident = create_incident( self.organization, IncidentType.ALERT_TRIGGERED, "Open", "", QueryAggregations.TOTAL, groups=[self.group], date_started=timezone.now() - timedelta(days=30), ) incidents = [closed_incident, open_incident] changed = False for incident, incident_stats in zip( incidents, bulk_get_incident_stats(incidents)): event_stats = get_incident_event_stats(incident) assert incident_stats["event_stats"].data[ "data"] == event_stats.data["data"] expected_start = incident_stats["event_stats"].start expected_end = incident_stats["event_stats"].end if not changed: expected_start = expected_start - calculate_incident_prewindow( expected_start, expected_end, incident) changed = True assert event_stats.start == expected_start assert event_stats.end == expected_end assert incident_stats["event_stats"].rollup == event_stats.rollup aggregates = get_incident_aggregates(incident) assert incident_stats["total_events"] == aggregates["count"] assert incident_stats["unique_users"] == aggregates["unique_users"]
def test_groups(self): assert get_incident_aggregates(self.group_incident) == { "count": 4, "unique_users": 2 }
def test_projects(self): assert get_incident_aggregates(self.project_incident) == { "count": 4, "unique_users": 2 }
def incident_attachment_info(incident, new_status: IncidentStatus, metric_value=None): logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule status = INCIDENT_STATUS[new_status] agg_display_key = alert_rule.snuba_query.aggregate if CRASH_RATE_ALERT_AGGREGATE_ALIAS in alert_rule.snuba_query.aggregate: agg_display_key = agg_display_key.split(f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip() agg_text = QUERY_AGGREGATION_DISPLAY.get(agg_display_key, alert_rule.snuba_query.aggregate) if metric_value is None: incident_trigger = ( IncidentTrigger.objects.filter(incident=incident).order_by("-date_modified").first() ) if incident_trigger: alert_rule_trigger = incident_trigger.alert_rule_trigger # TODO: If we're relying on this and expecting possible delays between a # trigger fired and this function running, then this could actually be # incorrect if they changed the trigger's time window in this time period. # Should we store it? start = incident_trigger.date_modified - timedelta( seconds=alert_rule_trigger.alert_rule.snuba_query.time_window ) end = incident_trigger.date_modified else: start, end = None, None metric_value = get_incident_aggregates(incident=incident, start=start, end=end).get("count") time_window = alert_rule.snuba_query.time_window // 60 if agg_text.startswith("%"): if metric_value is not None: metric_and_agg_text = f"{metric_value}{agg_text}" else: metric_and_agg_text = f"No{agg_text[1:]}" else: metric_and_agg_text = f"{metric_value} {agg_text}" interval = "minute" if time_window == 1 else "minutes" text = _("%(metric_and_agg_text)s in the last %(time_window)d %(interval)s") % { "metric_and_agg_text": metric_and_agg_text, "time_window": time_window, "interval": interval, } if alert_rule.snuba_query.query != "": text += f"\nFilter: {alert_rule.snuba_query.query}" ts = incident.date_started title = f"{status}: {alert_rule.name}" title_link = absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, ) ) return { "title": title, "text": text, "logo_url": logo_url, "status": status, "ts": ts, "title_link": title_link, }
def test_projects(self): assert get_incident_aggregates(self.project_incident) == { 'count': 4, 'unique_users': 2 }
def test_groups(self): assert get_incident_aggregates(self.group_incident) == { 'count': 4, 'unique_users': 2 }
def build_incident_attachment(incident): logo_url = absolute_uri( get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule incident_trigger = (IncidentTrigger.objects.filter( incident=incident).order_by("-date_modified").first()) if incident_trigger: alert_rule_trigger = incident_trigger.alert_rule_trigger # TODO: If we're relying on this and expecting possible delays between a trigger fired and this function running, # then this could actually be incorrect if they changed the trigger's time window in this time period. Should we store it? start = incident_trigger.date_modified - timedelta( minutes=alert_rule_trigger.alert_rule.time_window) end = incident_trigger.date_modified else: start, end = None, None aggregates = get_incident_aggregates(incident, start, end) if incident.status == IncidentStatus.CLOSED.value: status = "Resolved" color = RESOLVED_COLOR elif incident.status == IncidentStatus.WARNING.value: status = "Warning" color = LEVEL_TO_COLOR["warning"] elif incident.status == IncidentStatus.CRITICAL.value: status = "Critical" color = LEVEL_TO_COLOR["fatal"] agg_text = QUERY_AGGREGATION_DISPLAY[alert_rule.aggregation] agg_value = (aggregates["count"] if alert_rule.aggregation == QueryAggregations.TOTAL.value else aggregates["unique_users"]) time_window = alert_rule.time_window text = "{} {} in the last {} minutes".format(agg_value, agg_text, time_window) if alert_rule.query != "": text = text + "\nFilter: {}".format(alert_rule.query) ts = incident.date_started title = u"{}: {}".format(status, alert_rule.name) return { "fallback": title, "title": title, "title_link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "text": text, "fields": [], "mrkdwn_in": ["text"], "footer_icon": logo_url, "footer": "Sentry Incident", "ts": to_timestamp(ts), "color": color, "actions": [], }
def build_incident_attachment(incident, metric_value=None): """ Builds an incident attachment for slack unfurling :param incident: The `Incident` to build the attachment for :param metric_value: The value of the metric that triggered this alert to fire. If not provided we'll attempt to calculate this ourselves. :return: """ logo_url = absolute_uri( get_asset_url("sentry", "images/sentry-email-avatar.png")) alert_rule = incident.alert_rule incident_trigger = (IncidentTrigger.objects.filter( incident=incident).order_by("-date_modified").first()) if incident_trigger: alert_rule_trigger = incident_trigger.alert_rule_trigger # TODO: If we're relying on this and expecting possible delays between a trigger fired and this function running, # then this could actually be incorrect if they changed the trigger's time window in this time period. Should we store it? start = incident_trigger.date_modified - timedelta( seconds=alert_rule_trigger.alert_rule.snuba_query.time_window) end = incident_trigger.date_modified else: start, end = None, None if incident.status == IncidentStatus.CLOSED.value: status = "Resolved" color = RESOLVED_COLOR elif incident.status == IncidentStatus.WARNING.value: status = "Warning" color = LEVEL_TO_COLOR["warning"] elif incident.status == IncidentStatus.CRITICAL.value: status = "Critical" color = LEVEL_TO_COLOR["fatal"] agg_text = QUERY_AGGREGATION_DISPLAY.get(alert_rule.snuba_query.aggregate, alert_rule.snuba_query.aggregate) if metric_value is None: metric_value = get_incident_aggregates( incident, start, end, use_alert_aggregate=True)["count"] time_window = alert_rule.snuba_query.time_window / 60 text = "{} {} in the last {} minutes".format(metric_value, agg_text, time_window) if alert_rule.snuba_query.query != "": text = text + "\nFilter: {}".format(alert_rule.snuba_query.query) ts = incident.date_started title = u"{}: {}".format(status, alert_rule.name) return { "fallback": title, "title": title, "title_link": absolute_uri( reverse( "sentry-metric-alert", kwargs={ "organization_slug": incident.organization.slug, "incident_id": incident.identifier, }, )), "text": text, "fields": [], "mrkdwn_in": ["text"], "footer_icon": logo_url, "footer": "Sentry Incident", "ts": to_timestamp(ts), "color": color, "actions": [], }