コード例 #1
0
ファイル: logic.py プロジェクト: yujiaqi/sentry
def build_incident_query_params(incident, start=None, end=None, windowed_stats=False):
    params = {}
    params["start"], params["end"] = calculate_incident_time_range(
        incident, start, end, windowed_stats=windowed_stats
    )

    project_ids = list(
        IncidentProject.objects.filter(incident=incident).values_list("project_id", flat=True)
    )
    if project_ids:
        params["project_id"] = project_ids

    snuba_query = incident.alert_rule.snuba_query
    snuba_filter = build_snuba_filter(
        QueryDatasets(snuba_query.dataset),
        snuba_query.query,
        snuba_query.aggregate,
        snuba_query.environment,
        params=params,
    )

    return {
        "dataset": Dataset(snuba_query.dataset),
        "start": snuba_filter.start,
        "end": snuba_filter.end,
        "conditions": snuba_filter.conditions,
        "filter_keys": snuba_filter.filter_keys,
        "having": [],
        "aggregations": snuba_filter.aggregations,
    }
コード例 #2
0
    def get_comparison_aggregation_value(self, subscription_update,
                                         aggregation_value):
        # For comparison alerts run a query over the comparison period and use it to calculate the
        # % change.
        delta = timedelta(seconds=self.alert_rule.comparison_delta)
        end = subscription_update["timestamp"] - delta
        snuba_query = self.subscription.snuba_query
        start = end - timedelta(seconds=snuba_query.time_window)

        entity_subscription = get_entity_subscription_for_dataset(
            dataset=QueryDatasets(snuba_query.dataset),
            aggregate=snuba_query.aggregate,
            time_window=snuba_query.time_window,
            extra_fields={
                "org_id": self.subscription.project.organization,
                "event_types": snuba_query.event_types,
            },
        )
        try:
            snuba_filter = build_snuba_filter(
                entity_subscription,
                snuba_query.query,
                snuba_query.environment,
                params={
                    "project_id": [self.subscription.project_id],
                    "start": start,
                    "end": end,
                },
            )
            results = raw_query(
                aggregations=snuba_filter.aggregations,
                start=snuba_filter.start,
                end=snuba_filter.end,
                conditions=snuba_filter.conditions,
                filter_keys=snuba_filter.filter_keys,
                having=snuba_filter.having,
                dataset=Dataset(snuba_query.dataset),
                limit=1,
                referrer="subscription_processor.comparison_query",
            )
            comparison_aggregate = list(results["data"][0].values())[0]
        except Exception:
            logger.exception("Failed to run comparison query")
            return

        if not comparison_aggregate:
            metrics.incr(
                "incidents.alert_rules.skipping_update_comparison_value_invalid"
            )
            return

        return (aggregation_value / comparison_aggregate) * 100
コード例 #3
0
    def get_aggregation_value(self, subscription_update):
        is_sessions_dataset = Dataset(self.subscription.snuba_query.dataset) == Dataset.Sessions
        if is_sessions_dataset:
            aggregation_value = self.get_crash_rate_alert_aggregation_value(subscription_update)
        else:
            aggregation_value = list(subscription_update["values"]["data"][0].values())[0]
            # In some cases Snuba can return a None value for an aggregation. This means
            # there were no rows present when we made the query for certain types of aggregations
            # like avg. Defaulting this to 0 for now. It might turn out that we'd prefer to skip
            # the update in the future.
            if aggregation_value is None:
                aggregation_value = 0

            if self.alert_rule.comparison_delta:
                aggregation_value = self.get_comparison_aggregation_value(
                    subscription_update, aggregation_value
                )
        return aggregation_value
コード例 #4
0
ファイル: serializers.py プロジェクト: aleonidex/sentry
    def validate(self, data):
        """Performs validation on an alert rule's data
        This includes ensuring there is either 1 or 2 triggers, which each have actions, and have proper thresholds set.
        The critical trigger should both alert and resolve 'after' the warning trigger (whether that means > or < the value depends on threshold type).
        """
        data.setdefault("dataset", QueryDatasets.EVENTS)
        project_id = data.get("projects")
        if not project_id:
            # We just need a valid project id from the org so that we can verify
            # the query. We don't use the returned data anywhere, so it doesn't
            # matter which.
            project_id = list(
                self.context["organization"].project_set.all()[:1])
        try:
            snuba_filter = build_snuba_filter(
                data["dataset"],
                data["query"],
                data["aggregate"],
                data.get("environment"),
                params={
                    "project_id": [p.id for p in project_id],
                    "start": timezone.now() - timedelta(minutes=10),
                    "end": timezone.now(),
                },
            )
        except (InvalidSearchQuery, ValueError) as e:
            raise serializers.ValidationError(
                "Invalid Query or Metric: {}".format(e.message))
        else:
            if not snuba_filter.aggregations:
                raise serializers.ValidationError(
                    "Invalid Metric: Please pass a valid function for aggregation"
                )

            try:
                raw_query(
                    aggregations=snuba_filter.aggregations,
                    start=snuba_filter.start,
                    end=snuba_filter.end,
                    conditions=snuba_filter.conditions,
                    filter_keys=snuba_filter.filter_keys,
                    having=snuba_filter.having,
                    dataset=Dataset(data["dataset"].value),
                    limit=1,
                    referrer="alertruleserializer.test_query",
                )
            except Exception:
                logger.exception(
                    "Error while validating snuba alert rule query")
                raise serializers.ValidationError(
                    "Invalid Query or Metric: An error occurred while attempting "
                    "to run the query")

        triggers = data.get("triggers", [])
        if not triggers:
            raise serializers.ValidationError(
                "Must include at least one trigger")
        if len(triggers) > 2:
            raise serializers.ValidationError(
                "Must send 1 or 2 triggers - A critical trigger, and an optional warning trigger"
            )

        for i, (trigger, expected_label) in enumerate(
                zip(triggers,
                    (CRITICAL_TRIGGER_LABEL, WARNING_TRIGGER_LABEL))):
            if trigger.get("label", None) != expected_label:
                raise serializers.ValidationError(
                    'Trigger {} must be labeled "{}"'.format(
                        i + 1, expected_label))
        critical = triggers[0]
        if "threshold_type" in data:
            threshold_type = data["threshold_type"]
            for trigger in triggers:
                trigger["threshold_type"] = threshold_type.value
        else:
            data["threshold_type"] = threshold_type = AlertRuleThresholdType(
                critical.get("threshold_type",
                             AlertRuleThresholdType.ABOVE.value))

        if "resolve_threshold" in data:
            for trigger in triggers:
                trigger["resolve_threshold"] = data["resolve_threshold"]
        else:
            trigger_resolve_thresholds = [
                trigger["resolve_threshold"] for trigger in triggers
                if trigger.get("resolve_threshold")
            ]
            if trigger_resolve_thresholds:
                data["resolve_threshold"] = (
                    min(trigger_resolve_thresholds)
                    if threshold_type == AlertRuleThresholdType.ABOVE else
                    max(trigger_resolve_thresholds))
            else:
                data["resolve_threshold"] = None

        self._validate_trigger_thresholds(threshold_type, critical,
                                          data.get("resolve_threshold"))

        if len(triggers) == 2:
            warning = triggers[1]
            if critical["threshold_type"] != warning["threshold_type"]:
                raise serializers.ValidationError(
                    "Must have matching threshold types (i.e. critical and warning "
                    "triggers must both be an upper or lower bound)")
            self._validate_trigger_thresholds(threshold_type, warning,
                                              data.get("resolve_threshold"))
            self._validate_critical_warning_triggers(threshold_type, critical,
                                                     warning)

        # Triggers have passed checks. Check that all triggers have at least one action now.
        for trigger in triggers:
            actions = trigger.get("actions")
            if not actions:
                raise serializers.ValidationError(
                    '"' + trigger["label"] + '" trigger must have an action.')

        return data
コード例 #5
0
    def validate(self, data):
        """
        Performs validation on an alert rule's data.
        This includes ensuring there is either 1 or 2 triggers, which each have
        actions, and have proper thresholds set. The critical trigger should
        both alert and resolve 'after' the warning trigger (whether that means
        > or < the value depends on threshold type).
        """
        data.setdefault("dataset", QueryDatasets.EVENTS)
        project_id = data.get("projects")
        if not project_id:
            # We just need a valid project id from the org so that we can verify
            # the query. We don't use the returned data anywhere, so it doesn't
            # matter which.
            project_id = list(self.context["organization"].project_set.all()[:1])
        try:
            snuba_filter = build_snuba_filter(
                data["dataset"],
                data["query"],
                data["aggregate"],
                data.get("environment"),
                data.get("event_types"),
                params={
                    "project_id": [p.id for p in project_id],
                    "start": timezone.now() - timedelta(minutes=10),
                    "end": timezone.now(),
                },
            )
        except (InvalidSearchQuery, ValueError) as e:
            raise serializers.ValidationError("Invalid Query or Metric: {}".format(force_text(e)))
        else:
            if not snuba_filter.aggregations:
                raise serializers.ValidationError(
                    "Invalid Metric: Please pass a valid function for aggregation"
                )

            try:
                raw_query(
                    aggregations=snuba_filter.aggregations,
                    start=snuba_filter.start,
                    end=snuba_filter.end,
                    conditions=snuba_filter.conditions,
                    filter_keys=snuba_filter.filter_keys,
                    having=snuba_filter.having,
                    dataset=Dataset(data["dataset"].value),
                    limit=1,
                    referrer="alertruleserializer.test_query",
                )
            except Exception:
                logger.exception("Error while validating snuba alert rule query")
                raise serializers.ValidationError(
                    "Invalid Query or Metric: An error occurred while attempting "
                    "to run the query"
                )

        triggers = data.get("triggers", [])
        if not triggers:
            raise serializers.ValidationError("Must include at least one trigger")
        if len(triggers) > 2:
            raise serializers.ValidationError(
                "Must send 1 or 2 triggers - A critical trigger, and an optional warning trigger"
            )

        event_types = data.get("event_types")

        valid_event_types = dataset_valid_event_types[data["dataset"]]
        if event_types and set(event_types) - valid_event_types:
            raise serializers.ValidationError(
                "Invalid event types for this dataset. Valid event types are %s"
                % sorted([et.name.lower() for et in valid_event_types])
            )

        for i, (trigger, expected_label) in enumerate(
            zip(triggers, (CRITICAL_TRIGGER_LABEL, WARNING_TRIGGER_LABEL))
        ):
            if trigger.get("label", None) != expected_label:
                raise serializers.ValidationError(
                    'Trigger {} must be labeled "{}"'.format(i + 1, expected_label)
                )
        critical = triggers[0]
        threshold_type = data["threshold_type"]

        self._validate_trigger_thresholds(threshold_type, critical, data.get("resolve_threshold"))

        if len(triggers) == 2:
            warning = triggers[1]
            self._validate_trigger_thresholds(
                threshold_type, warning, data.get("resolve_threshold")
            )
            self._validate_critical_warning_triggers(threshold_type, critical, warning)

        return data
コード例 #6
0
    def validate(self, data):
        """
        Performs validation on an alert rule's data.
        This includes ensuring there is either 1 or 2 triggers, which each have
        actions, and have proper thresholds set. The critical trigger should
        both alert and resolve 'after' the warning trigger (whether that means
        > or < the value depends on threshold type).
        """
        data.setdefault("dataset", QueryDatasets.EVENTS)
        project_id = data.get("projects")
        if not project_id:
            # We just need a valid project id from the org so that we can verify
            # the query. We don't use the returned data anywhere, so it doesn't
            # matter which.
            project_id = list(
                self.context["organization"].project_set.all()[:1])

        try:
            entity_subscription = get_entity_subscription_for_dataset(
                dataset=QueryDatasets(data["dataset"]),
                aggregate=data["aggregate"],
                time_window=int(
                    timedelta(minutes=data["time_window"]).total_seconds()),
                extra_fields={
                    "org_id": project_id[0].organization_id,
                    "event_types": data.get("event_types"),
                },
            )
        except UnsupportedQuerySubscription as e:
            raise serializers.ValidationError(f"{e}")

        try:
            snuba_filter = build_snuba_filter(
                entity_subscription,
                data["query"],
                data.get("environment"),
                params={
                    "project_id": [p.id for p in project_id],
                    "start": timezone.now() - timedelta(minutes=10),
                    "end": timezone.now(),
                },
            )
            if any(cond[0] == "project_id"
                   for cond in snuba_filter.conditions):
                raise serializers.ValidationError(
                    {"query": "Project is an invalid search term"})
        except (InvalidSearchQuery, ValueError) as e:
            raise serializers.ValidationError(f"Invalid Query or Metric: {e}")
        else:
            if not snuba_filter.aggregations:
                raise serializers.ValidationError(
                    "Invalid Metric: Please pass a valid function for aggregation"
                )

            dataset = Dataset(data["dataset"].value)
            self._validate_time_window(dataset, data.get("time_window"))

            conditions = copy(snuba_filter.conditions)
            time_col = entity_subscription.time_col
            conditions += [
                [time_col, ">=", snuba_filter.start],
                [time_col, "<", snuba_filter.end],
            ]

            body = {
                "project": project_id[0].id,
                "project_id": project_id[0].id,
                "aggregations": snuba_filter.aggregations,
                "conditions": conditions,
                "filter_keys": snuba_filter.filter_keys,
                "having": snuba_filter.having,
                "dataset": dataset.value,
                "limit": 1,
                **entity_subscription.get_entity_extra_params(),
            }

            try:
                snql_query = json_to_snql(body,
                                          entity_subscription.entity_key.value)
                snql_query.validate()
            except Exception as e:
                raise serializers.ValidationError(str(e),
                                                  params={
                                                      "params":
                                                      json.dumps(body),
                                                      "dataset":
                                                      data["dataset"].value
                                                  })

            try:
                raw_snql_query(snql_query,
                               referrer="alertruleserializer.test_query")
            except Exception:
                logger.exception(
                    "Error while validating snuba alert rule query")
                raise serializers.ValidationError(
                    "Invalid Query or Metric: An error occurred while attempting "
                    "to run the query")

        triggers = data.get("triggers", [])
        if not triggers:
            raise serializers.ValidationError(
                "Must include at least one trigger")
        if len(triggers) > 2:
            raise serializers.ValidationError(
                "Must send 1 or 2 triggers - A critical trigger, and an optional warning trigger"
            )

        event_types = data.get("event_types")

        valid_event_types = dataset_valid_event_types.get(
            data["dataset"], set())
        if event_types and set(event_types) - valid_event_types:
            raise serializers.ValidationError(
                "Invalid event types for this dataset. Valid event types are %s"
                % sorted(et.name.lower() for et in valid_event_types))

        for i, (trigger, expected_label) in enumerate(
                zip(triggers,
                    (CRITICAL_TRIGGER_LABEL, WARNING_TRIGGER_LABEL))):
            if trigger.get("label", None) != expected_label:
                raise serializers.ValidationError(
                    f'Trigger {i + 1} must be labeled "{expected_label}"')
        threshold_type = data["threshold_type"]
        self._translate_thresholds(threshold_type,
                                   data.get("comparison_delta"), triggers,
                                   data)

        critical = triggers[0]

        self._validate_trigger_thresholds(threshold_type, critical,
                                          data.get("resolve_threshold"))

        if len(triggers) == 2:
            warning = triggers[1]
            self._validate_trigger_thresholds(threshold_type, warning,
                                              data.get("resolve_threshold"))
            self._validate_critical_warning_triggers(threshold_type, critical,
                                                     warning)

        return data