Exemple #1
0
def bulk_get_incident_event_stats(incidents,
                                  query_params_list,
                                  data_points=50):
    snuba_params_list = [
        SnubaQueryParams(
            aggregations=[(
                query_aggregation_to_snuba[QueryAggregations(
                    incident.aggregation)][0],
                query_aggregation_to_snuba[QueryAggregations(
                    incident.aggregation)][1],
                "count",
            )],
            orderby="time",
            groupby=["time"],
            rollup=max(int(incident.duration.total_seconds() / data_points),
                       1),
            limit=10000,
            **query_param)
        for incident, query_param in zip(incidents, query_params_list)
    ]
    results = bulk_raw_query(snuba_params_list,
                             referrer="incidents.get_incident_event_stats")
    return [
        SnubaTSResult(result, snuba_params.start, snuba_params.end,
                      snuba_params.rollup)
        for snuba_params, result in zip(snuba_params_list, results)
    ]
Exemple #2
0
def bulk_get_incident_event_stats(incidents, query_params_list):
    snuba_params_list = [
        SnubaQueryParams(
            aggregations=[(
                query_aggregation_to_snuba[QueryAggregations(
                    incident.aggregation)][0],
                query_aggregation_to_snuba[QueryAggregations(
                    incident.aggregation)][1],
                "count",
            )],
            orderby="time",
            groupby=["time"],
            rollup=incident.alert_rule.time_window *
            60 if incident.alert_rule is not None else 1 *
            60,  # TODO: When time_window is persisted, switch to using that instead of alert_rule.time_window.
            limit=10000,
            **query_param)
        for incident, query_param in zip(incidents, query_params_list)
    ]
    results = bulk_raw_query(snuba_params_list,
                             referrer="incidents.get_incident_event_stats")
    return [
        SnubaTSResult(result, snuba_params.start, snuba_params.end,
                      snuba_params.rollup)
        for snuba_params, result in zip(snuba_params_list, results)
    ]
Exemple #3
0
    def build_subscription_update(self,
                                  subscription,
                                  time_delta=None,
                                  value=None):
        if time_delta is not None:
            timestamp = timezone.now() + time_delta
        else:
            timestamp = timezone.now()
        timestamp = timestamp.replace(tzinfo=pytz.utc, microsecond=0)

        data = {}

        if subscription:
            aggregation_type = query_aggregation_to_snuba[QueryAggregations(
                subscription.aggregation)]
            value = randint(0, 100) if value is None else value
            data = {aggregation_type[2]: value}
        values = {"data": [data]}
        return {
            "subscription_id":
            subscription.subscription_id if subscription else uuid4().hex,
            "values":
            values,
            "timestamp":
            timestamp,
            "interval":
            1,
            "partition":
            1,
            "offset":
            1,
        }
Exemple #4
0
    def build_subscription_update(self,
                                  subscription,
                                  time_delta=None,
                                  value=None):
        if time_delta is not None:
            timestamp = int(to_timestamp(timezone.now() + time_delta))
        else:
            timestamp = int(time())

        values = {}

        if subscription:
            aggregation_type = query_aggregation_to_snuba[QueryAggregations(
                subscription.aggregation)]
            value = randint(0, 100) if value is None else value
            values = {aggregation_type[2]: value}
        return {
            "subscription_id":
            subscription.subscription_id if subscription else uuid4().hex,
            "values":
            values,
            "timestamp":
            timestamp,
            "interval":
            1,
            "partition":
            1,
            "offset":
            1,
        }
Exemple #5
0
    def test(self):
        # Full integration test to ensure that when a subscription receives an update
        # the `QuerySubscriptionConsumer` successfully retries the subscription and
        # calls the correct callback, which should result in an incident being created.

        callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE]

        def exception_callback(*args, **kwargs):
            # We want to just error after the callback so that we can see the result of
            # processing. This means the offset won't be committed, but that's fine, we
            # can still check the results.
            callback(*args, **kwargs)
            raise KeyboardInterrupt()

        value_name = query_aggregation_to_snuba[QueryAggregations(
            self.subscription.aggregation)][2]

        subscriber_registry[
            INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = exception_callback
        message = {
            "version": 1,
            "payload": {
                "subscription_id": self.subscription.subscription_id,
                "values": {
                    value_name: self.trigger.alert_threshold + 1
                },
                "timestamp": 1235,
                "interval": 5,
                "partition": 50,
                "offset": 10,
            },
        }
        self.producer.produce(self.topic, json.dumps(message))
        self.producer.flush()

        def active_incident():
            return Incident.objects.filter(
                type=IncidentType.ALERT_TRIGGERED.value,
                status=IncidentStatus.OPEN.value,
                alert_rule=self.rule,
            )

        consumer = QuerySubscriptionConsumer("hi", topic=self.topic)
        with self.assertChanges(lambda: active_incident().exists(),
                                before=False,
                                after=True), self.tasks():
            consumer.run()

        assert len(mail.outbox) == 1
        handler = EmailActionHandler(self.action,
                                     active_incident().get(), self.project)
        message = handler.build_message(
            handler.generate_email_context(TriggerStatus.ACTIVE),
            TriggerStatus.ACTIVE, self.user.id)

        out = mail.outbox[0]
        assert out.to == [self.user.email]
        assert out.subject == message.subject
        built_message = message.build(self.user.email)
        assert out.body == built_message.body
Exemple #6
0
 def validate_aggregation(self, aggregation):
     try:
         return QueryAggregations(aggregation)
     except ValueError:
         raise serializers.ValidationError(
             "Invalid aggregation, valid values are %s" %
             [item.value for item in QueryAggregations])
Exemple #7
0
def _create_in_snuba(subscription):
    conditions = resolve_discover_aliases(
        {"conditions":
         get_filter(subscription.query).conditions})[0]["conditions"]
    environments = list(subscription.environments.all())
    if environments:
        conditions.append(
            ["environment", "IN", [env.name for env in environments]])
    response = _snuba_pool.urlopen(
        "POST",
        "/%s/subscriptions" % (subscription.dataset, ),
        body=json.dumps({
            "project_id":
            subscription.project_id,
            "dataset":
            subscription.dataset,
            # We only care about conditions here. Filter keys only matter for
            # filtering to project and groups. Projects are handled with an
            # explicit param, and groups can't be queried here.
            "conditions":
            conditions,
            "aggregations": [
                query_aggregation_to_snuba[QueryAggregations(
                    subscription.aggregation)]
            ],
            "time_window":
            subscription.time_window,
            "resolution":
            subscription.resolution,
        }),
    )
    if response.status != 202:
        raise SnubaError("HTTP %s response from Snuba!" % response.status)
    return json.loads(response.data)["subscription_id"]
Exemple #8
0
def _create_in_snuba(subscription):
    conditions = resolve_discover_aliases(get_filter(subscription.query))[0].conditions
    try:
        environment = subscription.environments.all()[:1].get()
    except Environment.DoesNotExist:
        environment = None

    if environment:
        conditions.append(["environment", "=", environment.name])
    conditions = apply_dataset_conditions(QueryDatasets(subscription.dataset), conditions)
    response = _snuba_pool.urlopen(
        "POST",
        "/%s/subscriptions" % (subscription.dataset,),
        body=json.dumps(
            {
                "project_id": subscription.project_id,
                "dataset": subscription.dataset,
                # We only care about conditions here. Filter keys only matter for
                # filtering to project and groups. Projects are handled with an
                # explicit param, and groups can't be queried here.
                "conditions": conditions,
                "aggregations": [
                    query_aggregation_to_snuba[QueryAggregations(subscription.aggregation)]
                ],
                "time_window": subscription.time_window,
                "resolution": subscription.resolution,
            }
        ),
    )
    if response.status != 202:
        raise SnubaError("HTTP %s response from Snuba!" % response.status)
    return json.loads(response.data)["subscription_id"]
Exemple #9
0
def subscribe_projects_to_alert_rule(alert_rule, projects):
    """
    Subscribes a list of projects to an alert rule
    :return: The list of created subscriptions
    """
    try:
        environment = alert_rule.environment.all()[:1].get()
    except Environment.DoesNotExist:
        environment = None

    subscriptions = bulk_create_snuba_subscriptions(
        projects,
        tasks.INCIDENTS_SNUBA_SUBSCRIPTION_TYPE,
        QueryDatasets(alert_rule.dataset),
        alert_rule.query,
        QueryAggregations(alert_rule.aggregation),
        timedelta(minutes=alert_rule.time_window),
        timedelta(minutes=alert_rule.resolution),
        environment,
    )
    subscription_links = [
        AlertRuleQuerySubscription(query_subscription=subscription,
                                   alert_rule=alert_rule)
        for subscription in subscriptions
    ]
    AlertRuleQuerySubscription.objects.bulk_create(subscription_links)
    return subscriptions
Exemple #10
0
 def validate_aggregations(self, aggregations):
     # TODO: Remove this once FE transitions
     try:
         return [QueryAggregations(agg) for agg in aggregations]
     except ValueError:
         raise serializers.ValidationError(
             "Invalid aggregation, valid values are %s" %
             [item.value for item in QueryAggregations])
    def process_update(self, subscription_update):
        if not hasattr(self, "alert_rule"):
            # If the alert rule has been removed then just skip
            metrics.incr(
                "incidents.alert_rules.no_alert_rule_for_subscription")
            logger.error(
                "Received an update for a subscription, but no associated alert rule exists"
            )
            # TODO: Delete subscription here.
            return

        if subscription_update["timestamp"] <= self.last_update:
            metrics.incr(
                "incidents.alert_rules.skipping_already_processed_update")
            return

        self.last_update = subscription_update["timestamp"]

        aggregation = QueryAggregations(self.alert_rule.aggregation)
        aggregation_name = query_aggregation_to_snuba[aggregation][2]
        if len(subscription_update["values"]["data"]) > 1:
            logger.warning(
                "Subscription returned more than 1 row of data",
                extra={
                    "subscription_id": self.subscription.id,
                    "dataset": self.subscription.dataset,
                    "snuba_subscription_id": self.subscription.subscription_id,
                    "result": subscription_update,
                },
            )
        aggregation_value = subscription_update["values"]["data"][0][
            aggregation_name]

        for trigger in self.triggers:
            alert_operator, resolve_operator = self.THRESHOLD_TYPE_OPERATORS[
                AlertRuleThresholdType(trigger.threshold_type)]

            if alert_operator(
                    aggregation_value,
                    trigger.alert_threshold) and not self.check_trigger_status(
                        trigger, TriggerStatus.ACTIVE):
                with transaction.atomic():
                    self.trigger_alert_threshold(trigger)
            elif (trigger.resolve_threshold is not None and resolve_operator(
                    aggregation_value, trigger.resolve_threshold) and
                  self.check_trigger_status(trigger, TriggerStatus.ACTIVE)):
                with transaction.atomic():
                    self.trigger_resolve_threshold(trigger)
            else:
                self.trigger_alert_counts[trigger.id] = 0
                self.trigger_resolve_counts[trigger.id] = 0

        # We update the rule stats here after we commit the transaction. This guarantees
        # that we'll never miss an update, since we'll never roll back if the process
        # is killed here. The trade-off is that we might process an update twice. Mostly
        # this will have no effect, but if someone manages to close a triggered incident
        # before the next one then we might alert twice.
        self.update_alert_rule_stats()
    def trigger_alert_threshold(self, trigger):
        """
        Called when a subscription update exceeds the value defined in the
        `trigger.alert_threshold`, and the trigger hasn't already been activated.
        Increments the count of how many times we've consecutively exceeded the threshold, and if
        above the `threshold_period` defined in the alert rule then mark the trigger as
        activated, and create an incident if there isn't already one.
        :return:
        """
        self.trigger_alert_counts[trigger.id] += 1
        if self.trigger_alert_counts[
                trigger.id] >= self.alert_rule.threshold_period:
            metrics.incr("incidents.alert_rules.trigger",
                         tags={"type": "fire"})
            # Only create a new incident if we don't already have an active one
            if not self.active_incident:
                detected_at = self.last_update
                self.active_incident = create_incident(
                    self.alert_rule.organization,
                    IncidentType.ALERT_TRIGGERED,
                    # TODO: Include more info in name?
                    self.alert_rule.name,
                    alert_rule=self.alert_rule,
                    query=self.subscription.query,
                    aggregation=QueryAggregations(self.alert_rule.aggregation),
                    date_started=detected_at,
                    date_detected=detected_at,
                    projects=[self.subscription.project],
                )
            # Now create (or update if it already exists) the incident trigger so that
            # we have a record of this trigger firing for this incident
            incident_trigger = self.incident_triggers.get(trigger.id)
            if incident_trigger:
                incident_trigger.status = TriggerStatus.ACTIVE.value
                incident_trigger.save()
            else:
                incident_trigger = IncidentTrigger.objects.create(
                    incident=self.active_incident,
                    alert_rule_trigger=trigger,
                    status=TriggerStatus.ACTIVE.value,
                )
            self.handle_incident_severity_update()
            self.handle_trigger_actions(incident_trigger)
            self.incident_triggers[trigger.id] = incident_trigger

            # TODO: We should create an audit log, and maybe something that keeps
            # all of the details available for showing on the incident. Might be a json
            # blob or w/e? Or might be able to use the audit log

            # We now set this threshold to 0. We don't need to count it anymore
            # once we've triggered an incident.
            self.trigger_alert_counts[trigger.id] = 0
Exemple #13
0
    def process_update(self, subscription_update):
        if not hasattr(self, "alert_rule"):
            # If the alert rule has been removed then just skip
            metrics.incr("incidents.alert_rules.no_alert_rule_for_subscription")
            logger.error(
                "Received an update for a subscription, but no associated alert rule exists"
            )
            # TODO: Delete subscription here.
            return

        if subscription_update["timestamp"] <= self.last_update:
            metrics.incr("incidents.alert_rules.skipping_already_processed_update")
            return

        self.last_update = subscription_update["timestamp"]

        aggregation = QueryAggregations(self.alert_rule.aggregation)
        aggregation_name = query_aggregation_to_snuba[aggregation][2]
        aggregation_value = subscription_update["values"][aggregation_name]

        alert_operator, resolve_operator = self.THRESHOLD_TYPE_OPERATORS[
            AlertRuleThresholdType(self.alert_rule.threshold_type)
        ]

        if (
            alert_operator(aggregation_value, self.alert_rule.alert_threshold)
            and not self.active_incident
        ):
            with transaction.atomic():
                self.trigger_alert_threshold()
        elif (
            # TODO: Need to make `resolve_threshold` nullable so that it can be
            # optional
            self.alert_rule.resolve_threshold is not None
            and resolve_operator(aggregation_value, self.alert_rule.resolve_threshold)
            and self.active_incident
        ):
            with transaction.atomic():
                self.trigger_resolve_threshold()
        else:
            self.alert_triggers = 0
            self.resolve_triggers = 0

        # We update the rule stats here after we commit the transaction. This guarantees
        # that we'll never miss an update, since we'll never roll back if the process
        # is killed here. The trade-off is that we might process an update twice. Mostly
        # this will have no effect, but if someone manages to close a triggered incident
        # before the next one then we might alert twice.
        self.update_alert_rule_stats()
Exemple #14
0
    def test(self):
        # Full integration test to ensure that when a subscription receives an update
        # the `QuerySubscriptionConsumer` successfully retries the subscription and
        # calls the correct callback, which should result in an incident being created.

        callback = subscriber_registry[INCIDENTS_SNUBA_SUBSCRIPTION_TYPE]

        def exception_callback(*args, **kwargs):
            # We want to just error after the callback so that we can see the result of
            # processing. This means the offset won't be committed, but that's fine, we
            # can still check the results.
            callback(*args, **kwargs)
            raise KeyboardInterrupt()

        value_name = query_aggregation_to_snuba[QueryAggregations(
            self.subscription.aggregation)][2]

        subscriber_registry[
            INCIDENTS_SNUBA_SUBSCRIPTION_TYPE] = exception_callback
        message = {
            "version": 1,
            "payload": {
                "subscription_id": self.subscription.subscription_id,
                "values": {
                    value_name: self.trigger.alert_threshold + 1
                },
                "timestamp": 1235,
                "interval": 5,
                "partition": 50,
                "offset": 10,
            },
        }
        self.producer.produce(self.topic, json.dumps(message))
        self.producer.flush()

        def active_incident_exists():
            return Incident.objects.filter(
                type=IncidentType.ALERT_TRIGGERED.value,
                status=IncidentStatus.OPEN.value,
                alert_rule=self.rule,
            ).exists()

        consumer = QuerySubscriptionConsumer("hi", topic=self.topic)
        with self.assertChanges(active_incident_exists,
                                before=False,
                                after=True), self.tasks():
            # TODO: Need to check that the email gets sent once we hook that up
            consumer.run()
Exemple #15
0
def convert_alert_rule_to_snuba_query(alert_rule):
    """
    Temporary method to convert existing alert rules to have a snuba query
    """
    if alert_rule.snuba_query:
        return

    with transaction.atomic():
        snuba_query = create_snuba_query(
            QueryDatasets(alert_rule.dataset),
            alert_rule.query,
            QueryAggregations(alert_rule.aggregation),
            timedelta(minutes=alert_rule.time_window),
            timedelta(minutes=alert_rule.resolution),
            alert_rule.environment,
        )
        alert_rule.update(snuba_query=snuba_query)
        alert_rule.query_subscriptions.all().update(snuba_query=snuba_query)
Exemple #16
0
def subscribe_projects_to_alert_rule(alert_rule, projects):
    """
    Subscribes a list of projects to an alert rule
    :return: The list of created subscriptions
    """
    subscriptions = bulk_create_snuba_subscriptions(
        projects,
        tasks.INCIDENTS_SNUBA_SUBSCRIPTION_TYPE,
        alert_rule.snuba_query,
        QueryAggregations(alert_rule.aggregation),
    )
    subscription_links = [
        AlertRuleQuerySubscription(query_subscription=subscription,
                                   alert_rule=alert_rule)
        for subscription in subscriptions
    ]
    AlertRuleQuerySubscription.objects.bulk_create(subscription_links)
    return subscriptions
Exemple #17
0
def update_alert_rule(
    alert_rule,
    projects=None,
    name=None,
    query=None,
    aggregation=None,
    time_window=None,
    environment=None,
    threshold_period=None,
    include_all_projects=None,
    excluded_projects=None,
):
    """
    Updates an alert rule.

    :param alert_rule: The alert rule to update
    :param excluded_projects: List of projects to subscribe to the rule. Ignored if
    `include_all_projects` is True
    :param name: Name for the alert rule. This will be used as part of the
    incident name, and must be unique per project.
    :param query: An event search query to subscribe to and monitor for alerts
    :param aggregation: An AlertRuleAggregation that we want to fetch for this alert rule
    :param time_window: Time period to aggregate over, in minutes.
    :param environment: List of environments that this rule applies to
    :param threshold_period: How many update periods the value of the
    subscription needs to exceed the threshold before triggering
    :param include_all_projects: Whether to include all current and future projects
    from this organization
    :param excluded_projects: List of projects to exclude if we're using
    `include_all_projects`. Ignored otherwise.
    :return: The updated `AlertRule`
    """
    if (name and alert_rule.name != name
            and AlertRule.objects.filter(organization=alert_rule.organization,
                                         name=name).exists()):
        raise AlertRuleNameAlreadyUsedError()

    updated_fields = {}
    if name:
        updated_fields["name"] = name
    if query is not None:
        validate_alert_rule_query(query)
        updated_fields["query"] = query
    if aggregation is not None:
        updated_fields["aggregation"] = aggregation.value
    if time_window:
        updated_fields["time_window"] = time_window
    if threshold_period:
        updated_fields["threshold_period"] = threshold_period
    if include_all_projects is not None:
        updated_fields["include_all_projects"] = include_all_projects

    with transaction.atomic():
        incidents = Incident.objects.filter(alert_rule=alert_rule).exists()
        if incidents:
            snapshot_alert_rule(alert_rule)
        alert_rule.update(**updated_fields)

        existing_subs = []
        if (query is not None or aggregation is not None
                or time_window is not None or projects is not None
                or include_all_projects is not None
                or excluded_projects is not None):
            existing_subs = alert_rule.query_subscriptions.all(
            ).select_related("project")

        new_projects = []
        deleted_subs = []

        if not alert_rule.include_all_projects:
            # We don't want to have any exclusion rows present if we're not in
            # `include_all_projects` mode
            get_excluded_projects_for_alert_rule(alert_rule).delete()

        if alert_rule.include_all_projects:
            if include_all_projects or excluded_projects is not None:
                # If we're in `include_all_projects` mode, we want to just fetch
                # projects that aren't already subscribed, and haven't been excluded so
                # we can add them.
                excluded_project_ids = ({p.id
                                         for p in excluded_projects}
                                        if excluded_projects else set())
                project_exclusions = get_excluded_projects_for_alert_rule(
                    alert_rule)
                project_exclusions.exclude(
                    project_id__in=excluded_project_ids).delete()
                existing_excluded_project_ids = {
                    pe.project_id
                    for pe in project_exclusions
                }
                new_exclusions = [
                    AlertRuleExcludedProjects(alert_rule=alert_rule,
                                              project_id=project_id)
                    for project_id in excluded_project_ids
                    if project_id not in existing_excluded_project_ids
                ]
                AlertRuleExcludedProjects.objects.bulk_create(new_exclusions)

                new_projects = Project.objects.filter(
                    organization=alert_rule.organization).exclude(
                        id__in=set([sub.project_id for sub in existing_subs])
                        | excluded_project_ids)
                # If we're subscribed to any of the excluded projects then we want to
                # remove those subscriptions
                deleted_subs = [
                    sub for sub in existing_subs
                    if sub.project_id in excluded_project_ids
                ]
        elif projects is not None:
            existing_project_slugs = {
                sub.project.slug
                for sub in existing_subs
            }
            # Determine whether we've added any new projects as part of this update
            new_projects = [
                project for project in projects
                if project.slug not in existing_project_slugs
            ]
            updated_project_slugs = {project.slug for project in projects}
            # Find any subscriptions that were removed as part of this update
            deleted_subs = [
                sub for sub in existing_subs
                if sub.project.slug not in updated_project_slugs
            ]

        if new_projects:
            subscribe_projects_to_alert_rule(alert_rule, new_projects)

        if deleted_subs:
            bulk_delete_snuba_subscriptions(deleted_subs)
            # Remove any deleted subscriptions from `existing_subscriptions`, so that
            # if we need to update any subscriptions we don't end up doing it twice. We
            # don't add new subscriptions here since they'll already have the updated
            # values
            existing_subs = [sub for sub in existing_subs if sub.id]

        if environment:
            # Delete rows we don't have present in the updated data.
            AlertRuleEnvironment.objects.filter(alert_rule=alert_rule).exclude(
                environment__in=environment).delete()
            for e in environment:
                AlertRuleEnvironment.objects.get_or_create(
                    alert_rule=alert_rule, environment=e)
        else:
            AlertRuleEnvironment.objects.filter(alert_rule=alert_rule).delete()

        if existing_subs and (query is not None or aggregation is not None
                              or time_window is not None):
            # If updating any subscription details, update related Snuba subscriptions
            # too
            bulk_update_snuba_subscriptions(
                existing_subs,
                alert_rule.query,
                QueryAggregations(alert_rule.aggregation),
                timedelta(minutes=alert_rule.time_window),
                timedelta(minutes=DEFAULT_ALERT_RULE_RESOLUTION),
                list(alert_rule.environment.all()),
            )

    return alert_rule
Exemple #18
0
def update_alert_rule(
    alert_rule,
    name=None,
    threshold_type=None,
    query=None,
    aggregation=None,
    time_window=None,
    alert_threshold=None,
    resolve_threshold=None,
    threshold_period=None,
):
    """
    Updates an alert rule.

    :param alert_rule: The alert rule to update
    :param name: Name for the alert rule. This will be used as part of the
    incident name, and must be unique per project.
    :param threshold_type: An AlertRuleThresholdType
    :param query: An event search query to subscribe to and monitor for alerts
    :param aggregation: An AlertRuleAggregation that we want to fetch for this alert rule
    :param time_window: Time period to aggregate over, in minutes.
    :param alert_threshold: Value that the subscription needs to reach to
    trigger the alert
    :param resolve_threshold: Value that the subscription needs to reach to
    resolve the alert
    :param threshold_period: How many update periods the value of the
    subscription needs to exceed the threshold before triggering
    :return: The updated `AlertRule`
    """
    if (name and alert_rule.name != name and AlertRule.objects.filter(
            project=alert_rule.project, name=name).exists()):
        raise AlertRuleNameAlreadyUsedError()

    updated_fields = {}
    if name:
        updated_fields["name"] = name
    if threshold_type:
        updated_fields["threshold_type"] = threshold_type.value
    if query is not None:
        validate_alert_rule_query(query)
        updated_fields["query"] = query
    if aggregation is not None:
        updated_fields["aggregation"] = aggregation.value
    if time_window:
        updated_fields["time_window"] = time_window
    if alert_threshold is not None:
        updated_fields["alert_threshold"] = alert_threshold
    if resolve_threshold is not None:
        updated_fields["resolve_threshold"] = resolve_threshold
    if threshold_period:
        updated_fields["threshold_period"] = threshold_period

    with transaction.atomic():
        if query is not None or aggregation is not None or time_window is not None:
            # TODO: We're assuming only one subscription for the moment
            subscription = (AlertRuleQuerySubscription.objects.select_related(
                "query_subscription").get(
                    alert_rule=alert_rule).query_subscription)
            # If updating any details of the query, update the Snuba subscription
            update_snuba_subscription(
                subscription,
                query if query is not None else alert_rule.query,
                aggregation if aggregation is not None else QueryAggregations(
                    alert_rule.aggregation),
                time_window if time_window else alert_rule.time_window,
                DEFAULT_ALERT_RULE_RESOLUTION,
            )
        alert_rule.update(**updated_fields)

    return alert_rule
Exemple #19
0
def update_alert_rule(
    alert_rule,
    projects=None,
    name=None,
    threshold_type=None,
    query=None,
    aggregation=None,
    time_window=None,
    alert_threshold=None,
    resolve_threshold=None,
    threshold_period=None,
):
    """
    Updates an alert rule.

    :param alert_rule: The alert rule to update
    :param name: Name for the alert rule. This will be used as part of the
    incident name, and must be unique per project.
    :param threshold_type: An AlertRuleThresholdType
    :param query: An event search query to subscribe to and monitor for alerts
    :param aggregation: An AlertRuleAggregation that we want to fetch for this alert rule
    :param time_window: Time period to aggregate over, in minutes.
    :param alert_threshold: Value that the subscription needs to reach to
    trigger the alert
    :param resolve_threshold: Value that the subscription needs to reach to
    resolve the alert
    :param threshold_period: How many update periods the value of the
    subscription needs to exceed the threshold before triggering
    :return: The updated `AlertRule`
    """
    if (name and alert_rule.name != name
            and AlertRule.objects.filter(organization=alert_rule.organization,
                                         name=name).exists()):
        raise AlertRuleNameAlreadyUsedError()

    updated_fields = {}
    if name:
        updated_fields["name"] = name
    if threshold_type:
        updated_fields["threshold_type"] = threshold_type.value
    if query is not None:
        validate_alert_rule_query(query)
        updated_fields["query"] = query
    if aggregation is not None:
        updated_fields["aggregation"] = aggregation.value
    if time_window:
        updated_fields["time_window"] = time_window
    if alert_threshold is not None:
        updated_fields["alert_threshold"] = alert_threshold
    if resolve_threshold is not None:
        updated_fields["resolve_threshold"] = resolve_threshold
    if threshold_period:
        updated_fields["threshold_period"] = threshold_period

    with transaction.atomic():
        alert_rule.update(**updated_fields)
        existing_subs = []
        if (query is not None or aggregation is not None
                or time_window is not None or projects is not None):
            existing_subs = alert_rule.query_subscriptions.all(
            ).select_related("project")

        if projects is not None:
            existing_project_slugs = {
                sub.project.slug
                for sub in existing_subs
            }
            # Determine whether we've added any new projects as part of this update
            new_projects = [
                project for project in projects
                if project.slug not in existing_project_slugs
            ]
            updated_project_slugs = {project.slug for project in projects}
            # Find any subscriptions that were removed as part of this update
            deleted_subs = [
                sub for sub in existing_subs
                if sub.project.slug not in updated_project_slugs
            ]
            if new_projects:
                new_subscriptions = bulk_create_snuba_subscriptions(
                    new_projects,
                    tasks.INCIDENTS_SNUBA_SUBSCRIPTION_TYPE,
                    QueryDatasets(alert_rule.dataset),
                    alert_rule.query,
                    QueryAggregations(alert_rule.aggregation),
                    alert_rule.time_window,
                    DEFAULT_ALERT_RULE_RESOLUTION,
                )
                subscription_links = [
                    AlertRuleQuerySubscription(query_subscription=subscription,
                                               alert_rule=alert_rule)
                    for subscription in new_subscriptions
                ]
                AlertRuleQuerySubscription.objects.bulk_create(
                    subscription_links)

            if deleted_subs:
                bulk_delete_snuba_subscriptions(deleted_subs)

            # Remove any deleted subscriptions from `existing_subscriptions`, so that
            # if we need to update any subscriptions we don't end up doing it twice. We
            # don't add new subscriptions here since they'll already have the updated
            # values
            existing_subs = [sub for sub in existing_subs if sub.id]

        if existing_subs and (query is not None or aggregation is not None
                              or time_window is not None):
            # If updating any subscription details, update related Snuba subscriptions
            # too
            bulk_update_snuba_subscriptions(
                existing_subs,
                alert_rule.query,
                QueryAggregations(alert_rule.aggregation),
                alert_rule.time_window,
                DEFAULT_ALERT_RULE_RESOLUTION,
            )

    return alert_rule