コード例 #1
0
ファイル: logic.py プロジェクト: js-shanyang/sentry
def bulk_get_incident_event_stats(incidents, query_params_list, data_points=50):
    snuba_params_list = [
        SnubaQueryParams(
            aggregations=[
                (
                    query_aggregation_to_snuba[QueryAggregations(incident.aggregation)][0],
                    query_aggregation_to_snuba[QueryAggregations(incident.aggregation)][1],
                    "count",
                )
            ],
            orderby="time",
            groupby=["time"],
            rollup=max(int(incident.duration.total_seconds() / data_points), 1),
            limit=10000,
            **query_param
        )
        for incident, query_param in zip(incidents, query_params_list)
    ]
    results = bulk_raw_query(snuba_params_list, referrer="incidents.get_incident_event_stats")
    return [
        SnubaTSResult(result, snuba_params.start, snuba_params.end, snuba_params.rollup)
        for snuba_params, result in zip(snuba_params_list, results)
    ]
コード例 #2
0
def bulk_get_incident_event_stats(incidents, query_params_list):
    snuba_params_list = [
        SnubaQueryParams(aggregations=[(
            query_aggregation_to_snuba[aggregate_to_query_aggregation[
                incident.alert_rule.snuba_query.aggregate]][0],
            query_aggregation_to_snuba[aggregate_to_query_aggregation[
                incident.alert_rule.snuba_query.aggregate]][1],
            "count",
        )],
                         orderby="time",
                         groupby=["time"],
                         rollup=incident.alert_rule.snuba_query.time_window,
                         limit=10000,
                         **query_param)
        for incident, query_param in zip(incidents, query_params_list)
    ]
    results = bulk_raw_query(snuba_params_list,
                             referrer="incidents.get_incident_event_stats")
    return [
        SnubaTSResult(result, snuba_params.start, snuba_params.end,
                      snuba_params.rollup)
        for snuba_params, result in zip(snuba_params_list, results)
    ]
コード例 #3
0
    def test_cache(self, _bulk_snuba_query):
        one_min_ago = iso_format(before_now(minutes=1))
        event_1 = self.store_event(
            data={
                "fingerprint": ["group-1"],
                "message": "hello",
                "timestamp": one_min_ago
            },
            project_id=self.project.id,
        )
        event_2 = self.store_event(
            data={
                "fingerprint": ["group-2"],
                "message": "hello",
                "timestamp": one_min_ago
            },
            project_id=self.project.id,
        )
        params = [
            snuba.SnubaQueryParams(
                start=timezone.now() - timedelta(days=1),
                end=timezone.now(),
                selected_columns=["event_id", "group_id", "timestamp"],
                filter_keys={
                    "project_id": [self.project.id],
                    "group_id": [event_1.group.id]
                },
            ),
            snuba.SnubaQueryParams(
                start=timezone.now() - timedelta(days=1),
                end=timezone.now(),
                selected_columns=["event_id", "group_id", "timestamp"],
                filter_keys={
                    "project_id": [self.project.id],
                    "group_id": [event_2.group.id]
                },
            ),
        ]

        results = snuba.bulk_raw_query(
            copy.deepcopy(params),
            use_cache=True,
        )
        assert [{(item["group_id"], item["event_id"])
                 for item in r["data"]} for r in results] == [
                     {(event_1.group.id, event_1.event_id)},
                     {(event_2.group.id, event_2.event_id)},
                 ]
        assert _bulk_snuba_query.call_count == 1
        _bulk_snuba_query.reset_mock()

        # # Make sure this doesn't appear in the cached results
        self.store_event(
            data={
                "fingerprint": ["group-2"],
                "message": "hello there",
                "timestamp": one_min_ago
            },
            project_id=self.project.id,
        )

        results = snuba.bulk_raw_query(
            copy.deepcopy(params),
            use_cache=True,
        )
        assert [{(item["group_id"], item["event_id"])
                 for item in r["data"]} for r in results] == [
                     {(event_1.group.id, event_1.event_id)},
                     {(event_2.group.id, event_2.event_id)},
                 ]
        assert _bulk_snuba_query.call_count == 0
コード例 #4
0
ファイル: logic.py プロジェクト: wenlaizhou/sentry
def get_incident_event_stats(incident,
                             start=None,
                             end=None,
                             windowed_stats=False):
    """
    Gets event stats for an incident. If start/end are provided, uses that time
    period, otherwise uses the incident start/current_end.
    """
    query_params = build_incident_query_params(incident,
                                               start=start,
                                               end=end,
                                               windowed_stats=windowed_stats)
    time_window = incident.alert_rule.snuba_query.time_window
    aggregations = query_params.pop("aggregations")[0]
    snuba_params = [
        SnubaQueryParams(aggregations=[(aggregations[0], aggregations[1],
                                        "count")],
                         orderby="time",
                         groupby=["time"],
                         rollup=time_window,
                         limit=10000,
                         **query_params)
    ]

    # We make extra queries to fetch these buckets
    def build_extra_query_params(bucket_start):
        extra_bucket_query_params = build_incident_query_params(
            incident,
            start=bucket_start,
            end=bucket_start + timedelta(seconds=time_window))
        aggregations = extra_bucket_query_params.pop("aggregations")[0]
        return SnubaQueryParams(aggregations=[(aggregations[0],
                                               aggregations[1], "count")],
                                limit=1,
                                **extra_bucket_query_params)

    # We want to include the specific buckets for the incident start and closed times,
    # so that there's no need to interpolate to show them on the frontend. If they're
    # cleanly divisible by the `time_window` then there's no need to fetch, since
    # they'll be included in the standard results anyway.
    start_query_params = None
    extra_buckets = []
    if int(to_timestamp(incident.date_started)) % time_window:
        start_query_params = build_extra_query_params(incident.date_started)
        snuba_params.append(start_query_params)
        extra_buckets.append(incident.date_started)

    if incident.date_closed:
        date_closed = incident.date_closed.replace(second=0, microsecond=0)
        if int(to_timestamp(date_closed)) % time_window:
            snuba_params.append(build_extra_query_params(date_closed))
            extra_buckets.append(date_closed)

    results = bulk_raw_query(snuba_params,
                             referrer="incidents.get_incident_event_stats")
    # Once we receive the results, if we requested extra buckets we now need to label
    # them with timestamp data, since the query we ran only returns the count.
    for extra_start, result in zip(extra_buckets, results[1:]):
        result["data"][0]["time"] = int(to_timestamp(extra_start))
    merged_data = list(chain(*[r["data"] for r in results]))
    merged_data.sort(key=lambda row: row["time"])
    results[0]["data"] = merged_data
    # When an incident has just been created it's possible for the actual incident start
    # date to be greater than the latest bucket for the query. Get the actual end date
    # here.
    end_date = snuba_params[0].end
    if start_query_params:
        end_date = max(end_date, start_query_params.end)

    return SnubaTSResult(results[0], snuba_params[0].start, end_date,
                         snuba_params[0].rollup)
コード例 #5
0
def query_trace_data(
    trace_id: str, params: Mapping[str, str]
) -> Tuple[Sequence[SnubaTransaction], Sequence[SnubaError]]:
    transaction_query = discover.prepare_discover_query(
        selected_columns=[
            "id",
            "transaction.status",
            "transaction.op",
            "transaction.duration",
            "transaction",
            "timestamp",
            # project gets the slug, and project.id gets added automatically
            "project",
            "trace.span",
            "trace.parent_span",
            'to_other(trace.parent_span, "", 0, 1) AS root',
        ],
        # We want to guarantee at least getting the root, and hopefully events near it with timestamp
        # id is just for consistent results
        orderby=["-root", "timestamp", "id"],
        params=params,
        query=f"event.type:transaction trace:{trace_id}",
    )
    error_query = discover.prepare_discover_query(
        selected_columns=[
            "id",
            "project",
            "timestamp",
            "trace.span",
            "transaction",
            "issue",
            "title",
            "tags[level]",
        ],
        # Don't add timestamp to this orderby as snuba will have to split the time range up and make multiple queries
        orderby=["id"],
        params=params,
        query=f"!event.type:transaction trace:{trace_id}",
        auto_fields=False,
    )
    snuba_params = [
        SnubaQueryParams(
            dataset=Dataset.Discover,
            start=snuba_filter.start,
            end=snuba_filter.end,
            groupby=snuba_filter.groupby,
            conditions=snuba_filter.conditions,
            filter_keys=snuba_filter.filter_keys,
            aggregations=snuba_filter.aggregations,
            selected_columns=snuba_filter.selected_columns,
            having=snuba_filter.having,
            orderby=snuba_filter.orderby,
            limit=MAX_TRACE_SIZE,
        )
        for snuba_filter in [transaction_query.filter, error_query.filter]
    ]
    results = bulk_raw_query(
        snuba_params,
        referrer="api.trace-view.get-events",
    )
    transformed_results = [
        discover.transform_results(result, query.fields["functions"], query.columns, query.filter)[
            "data"
        ]
        for result, query in zip(results, [transaction_query, error_query])
    ]
    return cast(Sequence[SnubaTransaction], transformed_results[0]), cast(
        Sequence[SnubaError], transformed_results[1]
    )