def to_context(report, fetch_groups=None): series, aggregates, issue_list, release_list = report series = [(timestamp, Point(*values)) for timestamp, values in series] return { 'series': { 'points': series, 'maximum': max(sum(point) for timestamp, point in series), 'all': sum([sum(point) for timestamp, point in series]), 'resolved': sum([point.resolved for timestamp, point in series]), }, 'comparisons': [ ('last week', change(aggregates[-1], aggregates[-2])), ('last month', change( aggregates[-1], mean(aggregates) if all(v is not None for v in aggregates) else None, )), ], 'issue_list': rewrite_issue_list( issue_list, fetch_groups, ), }
def check_project_alerts(project_id, **kwargs): """ Given 'when' and 'count', which should signify recent times we compare it to historical data for this project and if over a given threshold, create an alert. """ from sentry.app import tsdb from sentry.constants import DEFAULT_ALERT_PROJECT_THRESHOLD from sentry.models import ProjectOption, Alert threshold, min_events = ProjectOption.objects.get_value( project_id, 'alert:threshold', DEFAULT_ALERT_PROJECT_THRESHOLD) if not threshold and min_events: return end = datetime.now().replace(tzinfo=utc) - timedelta(seconds=10) start = end - timedelta(minutes=5) results = [v for _, v in tsdb.get_range( tsdb.models.project, [project_id], start=start, end=end, rollup=10, )[project_id]] half_intervals = int(len(results) / 2) previous_data, current_data = results[:half_intervals], results[half_intervals:] if not current_data: return current_avg = sum(current_data) / len(current_data) # if there first few points within previous data are empty, assume that the # project hasn't been active long enough for rates to be valid if not any(previous_data[:3]): return if min_events > current_avg: return mean = math.mean(previous_data) dev = math.mad(previous_data) previous_avg = (mean + dev * 2) pct_increase = (current_avg / previous_avg * 100) - 100 logger.info('Rate of events for project %d changed from %.2f to %2.f', project_id, previous_avg, current_avg) if pct_increase > threshold and current_avg > previous_avg: Alert.maybe_alert( project_id=project_id, message='Rate of events increased from %.2f to %.2f' % (previous_avg, current_avg), )
def check_project_alerts(project_id, **kwargs): """ Given 'when' and 'count', which should signify recent times we compare it to historical data for this project and if over a given threshold, create an alert. """ from sentry.app import tsdb from sentry.constants import DEFAULT_ALERT_PROJECT_THRESHOLD from sentry.models import ProjectOption, Alert threshold, min_events = ProjectOption.objects.get_value( project_id, 'alert:threshold', DEFAULT_ALERT_PROJECT_THRESHOLD) if not threshold and min_events: return end = datetime.now().replace(tzinfo=utc) - timedelta(seconds=10) start = end - timedelta(minutes=5) results = [ v for _, v in tsdb.get_range( tsdb.models.project, [project_id], start=start, end=end, rollup=10, )[project_id] ] half_intervals = int(len(results) / 2) previous_data, current_data = results[:half_intervals], results[ half_intervals:] current_avg = sum(current_data) / len(current_data) # if there first few points within previous data are empty, assume that the # project hasn't been active long enough for rates to be valid if not any(previous_data[:3]): return if min_events > current_avg: return mean = math.mean(previous_data) dev = math.mad(previous_data) previous_avg = (mean + dev * 2) pct_increase = (current_avg / previous_avg * 100) - 100 logger.info('Rate of events for project %d changed from %.2f to %2.f', project_id, previous_avg, current_avg) if pct_increase > threshold and current_avg > previous_avg: Alert.maybe_alert( project_id=project_id, message='Rate of events increased from %.2f to %.2f' % (previous_avg, current_avg), )
def to_context(organization, interval, reports): report = reduce(merge_reports, reports.values()) error_series = [ # Drop the transaction count from each series entry (to_datetime(timestamp), Point(*values[:2])) for timestamp, values in report.series ] return { # This "error_series" can be removed for new email template "error_series": { "points": error_series, "maximum": max(sum(point) for timestamp, point in error_series), "all": sum(sum(point) for timestamp, point in error_series), "resolved": sum(point.resolved for timestamp, point in error_series), }, "distribution": { "types": list( zip( ( DistributionType("New", "#DF5120"), DistributionType("Reopened", "#FF7738"), DistributionType("Existing", "#F9C7B9"), ), report.issue_summaries, )), "total": sum(report.issue_summaries), }, "comparisons": [ ("last week", change(report.aggregates[-1], report.aggregates[-2])), ( "four week average", change( report.aggregates[-1], mean(report.aggregates) if all( v is not None for v in report.aggregates) else None, ), ), ], "projects": { "series": build_project_breakdown_series(reports) }, "calendar": to_calendar(organization, interval, report.calendar_series), "key_errors": build_key_errors_ctx(report.key_events, organization), "key_transactions": build_key_transactions_ctx(report.key_transactions, organization, reports.keys()), }
def check_project_alerts(project_id, when, count, **kwargs): """ Given 'when' and 'count', which should signify recent times we compare it to historical data for this project and if over a given threshold, create an alert. """ from sentry.conf import settings from sentry.models import ProjectCountByMinute, ProjectOption, Alert # TODO: make this use the cache try: threshold, min_events = ProjectOption.objects.get( project=project_id, key='alert:threshold', ).value except ProjectOption.DoesNotExist: threshold, min_events = settings.DEFAULT_ALERT_PROJECT_THRESHOLD if not threshold and min_events: return if min_events > count: return # number of 15 minute intervals to capture intervals = 8 max_date = when - timedelta(minutes=MINUTE_NORMALIZATION) min_date = max_date - timedelta(minutes=(intervals * MINUTE_NORMALIZATION)) # get historical data data = list( ProjectCountByMinute.objects.filter( project=project_id, date__lte=max_date, date__gt=min_date, ).values_list('times_seen', flat=True)) # Bail if we don't have enough data points if len(data) != intervals: return mean = math.mean(data) dev = math.mad(data) previous = (mean + dev * 2) / MINUTE_NORMALIZATION pct_increase = count / previous * 100 if pct_increase > threshold: Alert.maybe_alert( project_id=project_id, message='Rate of events per minute increased from %d to %d (+%d%%)' % (previous, count, pct_increase), )
def check_project_alerts(project_id, when, count, **kwargs): """ Given 'when' and 'count', which should signify recent times we compare it to historical data for this project and if over a given threshold, create an alert. """ from sentry.conf import settings from sentry.models import ProjectCountByMinute, ProjectOption, Alert # TODO: make this use the cache try: threshold, min_events = ProjectOption.objects.get( project=project_id, key='alert:threshold', ).value except ProjectOption.DoesNotExist: threshold, min_events = settings.DEFAULT_ALERT_PROJECT_THRESHOLD if not threshold and min_events: return if min_events > count: return # number of 15 minute intervals to capture intervals = 8 max_date = when - timedelta(minutes=MINUTE_NORMALIZATION) min_date = max_date - timedelta(minutes=(intervals * MINUTE_NORMALIZATION)) # get historical data data = list(ProjectCountByMinute.objects.filter( project=project_id, date__lte=max_date, date__gt=min_date, ).values_list('times_seen', flat=True)) # Bail if we don't have enough data points if len(data) != intervals: return mean = math.mean(data) dev = math.mad(data) previous = (mean + dev * 2) / MINUTE_NORMALIZATION pct_increase = count / previous * 100 if pct_increase > threshold: Alert.maybe_alert( project_id=project_id, message='Rate of events per minute increased from %d to %d (+%d%%)' % (previous, count, pct_increase), )
def to_context(organization, interval, reports): report = reduce(merge_reports, reports.values()) series = [(to_datetime(timestamp), Point(*values)) for timestamp, values in report.series] context = { 'series': { 'points': series, 'maximum': max(sum(point) for timestamp, point in series), 'all': sum([sum(point) for timestamp, point in series]), 'resolved': sum([point.resolved for timestamp, point in series]), }, 'distribution': { 'types': list( zip( ( DistributionType('New', '#8477e0'), DistributionType('Reopened', '#6C5FC7'), DistributionType('Existing', '#534a92'), ), report.issue_summaries, ), ), 'total': sum(report.issue_summaries), }, 'comparisons': [ ('last week', change(report.aggregates[-1], report.aggregates[-2])), ('four week average', change( report.aggregates[-1], mean(report.aggregates) if all( v is not None for v in report.aggregates) else None, )), ], 'projects': { 'series': build_project_breakdown_series(reports), }, } if features.has('organizations:reports:calendar', organization): context['calendar'] = to_calendar( interval, report.calendar_series, ) return context
def to_context(organization, interval, reports): report = reduce(merge_reports, reports.values()) series = [(to_datetime(timestamp), Point(*values)) for timestamp, values in report.series] return { 'series': { 'points': series, 'maximum': max(sum(point) for timestamp, point in series), 'all': sum([sum(point) for timestamp, point in series]), 'resolved': sum([point.resolved for timestamp, point in series]), }, 'distribution': { 'types': list( zip( ( DistributionType( 'New', '#8477e0'), DistributionType( 'Reopened', '#6C5FC7'), DistributionType('Existing', '#534a92'), ), report.issue_summaries, ), ), 'total': sum(report.issue_summaries), }, 'comparisons': [ ('last week', change(report.aggregates[-1], report.aggregates[-2])), ( 'four week average', change( report.aggregates[-1], mean(report.aggregates) if all(v is not None for v in report.aggregates) else None, ) ), ], 'projects': { 'series': build_project_breakdown_series(reports), }, 'calendar': to_calendar( interval, report.calendar_series, ), }
def to_context(organization, interval, reports): report = reduce(merge_reports, reports.values()) series = [(to_datetime(timestamp), Point(*values)) for timestamp, values in report.series] return { "series": { "points": series, "maximum": max(sum(point) for timestamp, point in series), "all": sum([sum(point) for timestamp, point in series]), "resolved": sum([point.resolved for timestamp, point in series]), }, "distribution": { "types": list( zip( ( DistributionType("New", "#8477e0"), DistributionType("Reopened", "#6C5FC7"), DistributionType("Existing", "#534a92"), ), report.issue_summaries, )), "total": sum(report.issue_summaries), }, "comparisons": [ ("last week", change(report.aggregates[-1], report.aggregates[-2])), ( "four week average", change( report.aggregates[-1], mean(report.aggregates) if all( v is not None for v in report.aggregates) else None, ), ), ], "projects": { "series": build_project_breakdown_series(reports) }, "calendar": to_calendar(interval, report.calendar_series), }
def to_context(reports): series, aggregates, issue_summaries, release_list, usage_summary = reduce( merge_reports, reports.values(), ) series = [(to_datetime(timestamp), Point(*values)) for timestamp, values in series] return { 'series': { 'points': series, 'maximum': max(sum(point) for timestamp, point in series), 'all': sum([sum(point) for timestamp, point in series]), 'resolved': sum([point.resolved for timestamp, point in series]), }, 'distribution': { 'types': list( zip( ( DistributionType('New', '#8477e0'), DistributionType('Reopened', '#6C5FC7'), DistributionType('Existing', '#534a92'), ), issue_summaries, ), ), 'total': sum(issue_summaries), }, 'comparisons': [ ('last week', change(aggregates[-1], aggregates[-2])), ('four week average', change( aggregates[-1], mean(aggregates) if all(v is not None for v in aggregates) else None, )), ], 'projects': { 'series': build_project_breakdown_series(reports), }, }
def to_context(report, fetch_groups=None): series, aggregates, issue_list = report series = [(timestamp, Point(*values)) for timestamp, values in series] return { 'series': { 'points': series, 'maximum': max(sum(point) for timestamp, point in series), 'all': sum([sum(point) for timestamp, point in series]), 'resolved': sum([point.resolved for timestamp, point in series]), }, 'comparisons': [ ('last week', change(aggregates[-1], aggregates[-2])), ('last month', change( aggregates[-1], mean(aggregates) if all(v is not None for v in aggregates) else None, )), ], 'issue_list': rewrite_issue_list( issue_list, fetch_groups, ), }
def to_context(report, fetch_groups=None): series, aggregates, issue_list, release_list = report series = [(to_datetime(timestamp), Point(*values)) for timestamp, values in series] return { 'series': { 'points': series, 'maximum': max(sum(point) for timestamp, point in series), 'all': sum([sum(point) for timestamp, point in series]), 'resolved': sum([point.resolved for timestamp, point in series]), }, 'distribution': { 'types': list( zip( ( DistributionType('New', '#c9c2e1'), DistributionType('Reopened', '#9990ab'), DistributionType('Existing', '#675f76'), ), issue_list[0], ), ), 'total': sum(issue_list[0]), }, 'comparisons': [ ('last week', change(aggregates[-1], aggregates[-2])), ('last month', change( aggregates[-1], mean(aggregates) if all(v is not None for v in aggregates) else None, )), ], 'issue_list': rewrite_issue_list( issue_list, fetch_groups, ), }
def get_timeseries_snuba_filter(selected_columns, query, params): snuba_filter = get_filter(query, params) if not snuba_filter.start and not snuba_filter.end: raise InvalidSearchQuery( "Cannot get timeseries result without a start and end.") columns = [] equations = [] for column in selected_columns: if is_equation(column): equations.append(strip_equation(column)) else: columns.append(column) if len(equations) > 0: resolved_equations, updated_columns = resolve_equation_list( equations, columns, aggregates_only=True, auto_add=True) else: resolved_equations = [] updated_columns = columns # For the new apdex, we need to add project threshold config as a selected # column which means the group by for the time series won't work. # As a temporary solution, we will calculate the mean of all the project # level thresholds in the request and use the legacy apdex, user_misery # or count_miserable calculation. # TODO(snql): Alias the project_threshold_config column so it doesn't # have to be in the SELECT statement and group by to be able to use new apdex, # user_misery and count_miserable. threshold = None for agg in CONFIGURABLE_AGGREGATES: if agg not in updated_columns: continue if threshold is None: project_ids = params.get("project_id") threshold_configs = list( ProjectTransactionThreshold.objects.filter( organization_id=params["organization_id"], project_id__in=project_ids, ).values_list("threshold", flat=True)) projects_without_threshold = len(project_ids) - len( threshold_configs) threshold_configs.extend([DEFAULT_PROJECT_THRESHOLD] * projects_without_threshold) threshold = int(mean(threshold_configs)) updated_columns.remove(agg) updated_columns.append( CONFIGURABLE_AGGREGATES[agg].format(threshold=threshold)) snuba_filter.update_with( resolve_field_list(updated_columns, snuba_filter, auto_fields=False, resolved_equations=resolved_equations)) # Resolve the public aliases into the discover dataset names. snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter) if not snuba_filter.aggregations: raise InvalidSearchQuery( "Cannot get timeseries result with no aggregation.") return snuba_filter, translated_columns