def test_environment(self): snuba_query = create_snuba_query( QueryDatasets.EVENTS, "hello", "count_unique(tags[sentry:user])", timedelta(minutes=100), timedelta(minutes=2), self.environment, ) new_env = self.create_environment() dataset = QueryDatasets.TRANSACTIONS query = "level:error" aggregate = "count()" time_window = timedelta(minutes=10) resolution = timedelta(minutes=1) event_types = snuba_query.event_types update_snuba_query( snuba_query, dataset, query, aggregate, time_window, resolution, new_env, None ) assert snuba_query.dataset == dataset.value assert snuba_query.query == query assert snuba_query.aggregate == aggregate assert snuba_query.time_window == int(time_window.total_seconds()) assert snuba_query.resolution == int(resolution.total_seconds()) assert snuba_query.environment == new_env assert set(snuba_query.event_types) == set(event_types)
def test(self): snuba_query = create_snuba_query( QueryDatasets.EVENTS, "hello", "count_unique(tags[sentry:user])", timedelta(minutes=100), timedelta(minutes=2), self.environment, [SnubaQueryEventType.EventType.ERROR], ) dataset = QueryDatasets.TRANSACTIONS query = "level:error" aggregate = "count()" time_window = timedelta(minutes=10) resolution = timedelta(minutes=1) event_types = [ SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT ] update_snuba_query( snuba_query, dataset, query, aggregate, time_window, resolution, None, event_types, ) assert snuba_query.dataset == dataset.value assert snuba_query.query == query assert snuba_query.aggregate == aggregate assert snuba_query.time_window == int(time_window.total_seconds()) assert snuba_query.resolution == int(resolution.total_seconds()) assert snuba_query.environment is None assert set(snuba_query.event_types) == set(event_types) event_types = [SnubaQueryEventType.EventType.DEFAULT] update_snuba_query( snuba_query, dataset, query, aggregate, time_window, resolution, None, event_types, ) assert set(snuba_query.event_types) == set(event_types)
def test_subscriptions(self): dataset = QueryDatasets.EVENTS snuba_query = create_snuba_query( dataset, "hello", "count_unique(tags[sentry:user])", timedelta(minutes=100), timedelta(minutes=2), self.environment, ) sub = create_snuba_subscription(self.project, "hi", snuba_query) new_env = self.create_environment() query = "level:error" aggregate = "count()" time_window = timedelta(minutes=10) resolution = timedelta(minutes=1) update_snuba_query(snuba_query, query, aggregate, time_window, resolution, new_env) sub.refresh_from_db() assert sub.snuba_query == snuba_query assert sub.status == QuerySubscription.Status.UPDATING.value
def test(self): dataset = QueryDatasets.EVENTS snuba_query = create_snuba_query( dataset, "hello", "count_unique(tags[sentry:user])", timedelta(minutes=100), timedelta(minutes=2), self.environment, ) query = "level:error" aggregate = "count()" time_window = timedelta(minutes=10) resolution = timedelta(minutes=1) update_snuba_query(snuba_query, query, aggregate, time_window, resolution, None) assert snuba_query.dataset == dataset.value assert snuba_query.query == query assert snuba_query.aggregate == aggregate assert snuba_query.time_window == int(time_window.total_seconds()) assert snuba_query.resolution == int(resolution.total_seconds()) assert snuba_query.environment is None
def test(self): dataset = QueryDatasets.EVENTS snuba_query = create_snuba_query( dataset, "hello", QueryAggregations.UNIQUE_USERS, timedelta(minutes=100), timedelta(minutes=2), self.environment, ) query = "level:error" aggregation = QueryAggregations.TOTAL time_window = timedelta(minutes=10) resolution = timedelta(minutes=1) update_snuba_query(snuba_query, query, aggregation, time_window, resolution, None) assert snuba_query.dataset == dataset.value assert snuba_query.query == query assert snuba_query.aggregate == translate_aggregation(aggregation) assert snuba_query.time_window == int(time_window.total_seconds()) assert snuba_query.resolution == int(resolution.total_seconds()) assert snuba_query.environment is None
def update_alert_rule( alert_rule, projects=None, name=None, query=None, aggregation=None, time_window=None, environment=None, threshold_period=None, include_all_projects=None, excluded_projects=None, ): """ Updates an alert rule. :param alert_rule: The alert rule to update :param excluded_projects: List of projects to subscribe to the rule. Ignored if `include_all_projects` is True :param name: Name for the alert rule. This will be used as part of the incident name, and must be unique per project. :param query: An event search query to subscribe to and monitor for alerts :param aggregation: An AlertRuleAggregation that we want to fetch for this alert rule :param time_window: Time period to aggregate over, in minutes. :param environment: An optional environment that this rule applies to :param threshold_period: How many update periods the value of the subscription needs to exceed the threshold before triggering :param include_all_projects: Whether to include all current and future projects from this organization :param excluded_projects: List of projects to exclude if we're using `include_all_projects`. Ignored otherwise. :return: The updated `AlertRule` """ if (name and alert_rule.name != name and AlertRule.objects.filter(organization=alert_rule.organization, name=name).exists()): raise AlertRuleNameAlreadyUsedError() updated_fields = {} updated_query_fields = {} if name: updated_fields["name"] = name if query is not None: validate_alert_rule_query(query) updated_query_fields["query"] = query if aggregation is not None: updated_query_fields["aggregation"] = aggregation if time_window: updated_query_fields["time_window"] = timedelta(minutes=time_window) if threshold_period: updated_fields["threshold_period"] = threshold_period if include_all_projects is not None: updated_fields["include_all_projects"] = include_all_projects with transaction.atomic(): incidents = Incident.objects.filter(alert_rule=alert_rule).exists() if incidents: snapshot_alert_rule(alert_rule) alert_rule.update(**updated_fields) if updated_query_fields or environment != alert_rule.snuba_query.environment: snuba_query = alert_rule.snuba_query updated_query_fields.setdefault("query", snuba_query.query) # XXX: We use the alert rule aggregation here since currently we're # expecting the enum value to be passed. updated_query_fields.setdefault( "aggregation", aggregate_to_query_aggregation[snuba_query.aggregate]) updated_query_fields.setdefault( "time_window", timedelta(seconds=snuba_query.time_window)) update_snuba_query( alert_rule.snuba_query, resolution=timedelta(minutes=DEFAULT_ALERT_RULE_RESOLUTION), environment=environment, **updated_query_fields) existing_subs = [] if (query is not None or aggregation is not None or time_window is not None or projects is not None or include_all_projects is not None or excluded_projects is not None): existing_subs = alert_rule.snuba_query.subscriptions.all( ).select_related("project") new_projects = [] deleted_subs = [] if not alert_rule.include_all_projects: # We don't want to have any exclusion rows present if we're not in # `include_all_projects` mode get_excluded_projects_for_alert_rule(alert_rule).delete() if alert_rule.include_all_projects: if include_all_projects or excluded_projects is not None: # If we're in `include_all_projects` mode, we want to just fetch # projects that aren't already subscribed, and haven't been excluded so # we can add them. excluded_project_ids = ({p.id for p in excluded_projects} if excluded_projects else set()) project_exclusions = get_excluded_projects_for_alert_rule( alert_rule) project_exclusions.exclude( project_id__in=excluded_project_ids).delete() existing_excluded_project_ids = { pe.project_id for pe in project_exclusions } new_exclusions = [ AlertRuleExcludedProjects(alert_rule=alert_rule, project_id=project_id) for project_id in excluded_project_ids if project_id not in existing_excluded_project_ids ] AlertRuleExcludedProjects.objects.bulk_create(new_exclusions) new_projects = Project.objects.filter( organization=alert_rule.organization).exclude( id__in=set([sub.project_id for sub in existing_subs]) | excluded_project_ids) # If we're subscribed to any of the excluded projects then we want to # remove those subscriptions deleted_subs = [ sub for sub in existing_subs if sub.project_id in excluded_project_ids ] elif projects is not None: existing_project_slugs = { sub.project.slug for sub in existing_subs } # Determine whether we've added any new projects as part of this update new_projects = [ project for project in projects if project.slug not in existing_project_slugs ] updated_project_slugs = {project.slug for project in projects} # Find any subscriptions that were removed as part of this update deleted_subs = [ sub for sub in existing_subs if sub.project.slug not in updated_project_slugs ] if new_projects: subscribe_projects_to_alert_rule(alert_rule, new_projects) if deleted_subs: bulk_delete_snuba_subscriptions(deleted_subs) return alert_rule