示例#1
0
def test_parse_query(mock_indexer, query_string, expected):
    local_indexer = MockIndexer()
    for s in ("", "[email protected]", "transaction", "/bar/:orgId/"):
        local_indexer.record(s)
    mock_indexer.resolve = local_indexer.resolve
    parsed = _resolve_tags(parse_query(query_string))
    assert parsed == expected
示例#2
0
def test_parse_query(monkeypatch, query_string, expected):
    local_indexer = MockIndexer()
    for s in ("", "[email protected]", "transaction", "/bar/:orgId/"):
        local_indexer.record(1, s)
    monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve",
                        local_indexer.resolve)
    parsed = resolve_tags(parse_query(query_string))
    assert parsed == expected
示例#3
0
    def test_sessions_metrics_equal_num_keys(self):
        """
        Tests whether the number of keys in the metrics implementation of
        sessions data is the same as in the sessions implementation.

        Runs twice. Firstly, against sessions implementation to populate the
        cache. Then, against the metrics implementation, and compares with
        cached results.
        """
        interval_days = "1d"
        groupbyes = _session_groupby_powerset()

        for groupby in groupbyes:
            with patch(
                "sentry.api.endpoints.organization_sessions.release_health",
                SessionsReleaseHealthBackend(),
            ):
                sessions_data = result_sorted(self.get_sessions_data(groupby, interval_days))

            with patch(
                "sentry.release_health.metrics_sessions_v2.indexer.resolve", MockIndexer().resolve
            ), patch(
                "sentry.api.endpoints.organization_sessions.release_health",
                MetricsReleaseHealthBackend(),
            ):
                metrics_data = result_sorted(self.get_sessions_data(groupby, interval_days))

            errors = compare_results(
                sessions=sessions_data,
                metrics=metrics_data,
                rollup=interval_days * 24 * 60 * 60,  # days to seconds
            )
            assert len(errors) == 0
示例#4
0
def test_translate_results_missing_slots(_1, _2, monkeypatch):
    monkeypatch.setattr("sentry.sentry_metrics.indexer.reverse_resolve",
                        MockIndexer().reverse_resolve)
    query_params = MultiValueDict({
        "field": [
            "sum(sentry.sessions.session)",
        ],
        "interval": ["1d"],
        "statsPeriod": ["3d"],
    })
    query_definition = QueryDefinition(query_params)

    results = {
        "metrics_counters": {
            "totals": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "sum(sentry.sessions.session)": 400,
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "bucketed_time": "2021-08-23T00:00Z",
                        "sum(sentry.sessions.session)": 100,
                    },
                    # no data for 2021-08-24
                    {
                        "metric_id": 9,  # session
                        "bucketed_time": "2021-08-25T00:00Z",
                        "sum(sentry.sessions.session)": 300,
                    },
                ],
            },
        },
    }

    intervals = list(get_intervals(query_definition))
    assert SnubaResultConverter(1, query_definition, intervals,
                                results).translate_results() == [
                                    {
                                        "by": {},
                                        "totals": {
                                            "sum(sentry.sessions.session)":
                                            400,
                                        },
                                        "series": {
                                            # No data for 2021-08-24
                                            "sum(sentry.sessions.session)":
                                            [100, 0, 300],
                                        },
                                    },
                                ]
示例#5
0
def __translated_payload(
    payload,
) -> Dict[str, Union[str, int, List[int], MutableMapping[int, int]]]:
    """
    Translates strings to ints using the MockIndexer
    in addition to adding the retention_days

    """
    indexer = MockIndexer()
    payload = payload.copy()

    new_tags = {
        indexer.resolve(k): indexer.resolve(v)
        for k, v in payload["tags"].items()
    }
    payload["metric_id"] = indexer.resolve(payload["name"])
    payload["retention_days"] = 90
    payload["tags"] = new_tags

    del payload["name"]
    return payload
示例#6
0
def test_build_snuba_query_orderby(mock_now, mock_now2, mock_indexer):

    mock_indexer.resolve = MockIndexer().resolve
    query_params = MultiValueDict({
        "query":
        ["release:staging"
         ],  # weird release but we need a string exising in mock indexer
        "groupBy": ["session.status", "environment"],
        "field": [
            "sum(sentry.sessions.session)",
        ],
        "orderBy": ["-sum(sentry.sessions.session)"],
        "limit": [3],
    })
    query_definition = QueryDefinition(query_params)
    snuba_queries = SnubaQueryBuilder([PseudoProject(1, 1)],
                                      query_definition).get_snuba_queries()

    counter_queries = snuba_queries.pop("metrics_counters")
    assert not snuba_queries
    assert counter_queries["series"] is None  # No series because of orderBy

    assert counter_queries["totals"] == Query(
        dataset="metrics",
        match=Entity("metrics_counters"),
        select=[Function("sum", [Column("value")], "value")],
        groupby=[
            Column("metric_id"),
            Column("tags[8]"),
            Column("tags[2]"),
        ],
        where=[
            Condition(Column("org_id"), Op.EQ, 1),
            Condition(Column("project_id"), Op.IN, [1]),
            Condition(Column("metric_id"), Op.IN, [9]),
            Condition(Column("timestamp"), Op.GTE,
                      datetime(2021, 5, 28, 0, tzinfo=pytz.utc)),
            Condition(Column("timestamp"), Op.LT,
                      datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
            Condition(Column("tags[6]", entity=None), Op.IN, [10]),
        ],
        orderby=[OrderBy(Column("value"), Direction.DESC)],
        limit=Limit(3),
        offset=Offset(0),
        granularity=Granularity(query_definition.rollup),
    )
示例#7
0
    new_tags = {
        indexer.resolve(k): indexer.resolve(v)
        for k, v in payload["tags"].items()
    }
    payload["metric_id"] = indexer.resolve(payload["name"])
    payload["retention_days"] = 90
    payload["tags"] = new_tags

    del payload["name"]
    return payload


@patch("sentry.sentry_metrics.indexer.tasks.process_indexed_metrics")
@patch("sentry.sentry_metrics.multiprocess.get_indexer",
       return_value=MockIndexer())
def test_process_messages(mock_indexer, mock_task) -> None:
    message_payloads = [counter_payload, distribution_payload, set_payload]
    message_batch = [
        Message(
            Partition(Topic("topic"), 0),
            i + 1,
            KafkaPayload(None,
                         json.dumps(payload).encode("utf-8"), []),
            datetime.now(),
        ) for i, payload in enumerate(message_payloads)
    ]
    # the outer message uses the last message's partition, offset, and timestamp
    last = message_batch[-1]
    outer_message = Message(last.partition, last.offset, message_batch,
                            last.timestamp)
示例#8
0
from sentry.models import Organization
from sentry.sentry_metrics.indexer.mock import MockIndexer

INDEXER = MockIndexer()


def test_resolve():
    mock_org_id = Organization().id
    assert INDEXER.resolve(mock_org_id, "what") is None
    assert INDEXER.resolve(mock_org_id, "user") == 11


def test_reverse_resolve():
    mock_org_id = Organization().id
    assert INDEXER.reverse_resolve(mock_org_id, 666) is None
    assert INDEXER.reverse_resolve(mock_org_id, 11) == "user"
示例#9
0
def test_translate_results(_1, _2, mock_indexer):
    mock_indexer.reverse_resolve = MockIndexer().reverse_resolve

    query_params = MultiValueDict({
        "groupBy": ["session.status"],
        "field": [
            "sum(sentry.sessions.session)",
            "max(sentry.sessions.session.duration)",
            "p50(sentry.sessions.session.duration)",
            "p95(sentry.sessions.session.duration)",
        ],
        "interval": ["1d"],
        "statsPeriod": ["2d"],
    })
    query_definition = QueryDefinition(query_params)

    intervals = list(get_intervals(query_definition))
    results = {
        "metrics_counters": {
            "totals": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,  # session.status:healthy
                        "value": 300,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 14,  # session.status:abnormal
                        "value": 330,
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "value": 100,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "value": 110,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "value": 200,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "value": 220,
                    },
                ],
            },
        },
        "metrics_distributions": {
            "totals": {
                "data": [
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "max": 123.4,
                        "percentiles": [1, 2, 3, 4, 5],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 14,
                        "max": 456.7,
                        "percentiles": [1.5, 2.5, 3.5, 4.5, 5.5],
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "max": 10.1,
                        "percentiles": [1.1, 2.1, 3.1, 4.1, 5.1],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "max": 20.2,
                        "percentiles": [1.2, 2.2, 3.2, 4.2, 5.2],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "max": 30.3,
                        "percentiles": [1.3, 2.3, 3.3, 4.3, 5.3],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "max": 40.4,
                        "percentiles": [1.4, 2.4, 3.4, 4.4, 5.4],
                    },
                ],
            },
        },
    }

    assert SnubaResultConverter(
        1, query_definition, intervals, results).translate_results() == [
            {
                "by": {
                    "session.status": "healthy"
                },
                "totals": {
                    "sum(sentry.sessions.session)": 300,
                    "max(sentry.sessions.session.duration)": 123.4,
                    "p50(sentry.sessions.session.duration)": 1,
                    "p95(sentry.sessions.session.duration)": 4,
                },
                "series": {
                    "sum(sentry.sessions.session)": [100, 200],
                    "max(sentry.sessions.session.duration)": [10.1, 30.3],
                    "p50(sentry.sessions.session.duration)": [1.1, 1.3],
                    "p95(sentry.sessions.session.duration)": [4.1, 4.3],
                },
            },
            {
                "by": {
                    "session.status": "abnormal"
                },
                "totals": {
                    "sum(sentry.sessions.session)": 330,
                    "max(sentry.sessions.session.duration)": 456.7,
                    "p50(sentry.sessions.session.duration)": 1.5,
                    "p95(sentry.sessions.session.duration)": 4.5,
                },
                "series": {
                    "sum(sentry.sessions.session)": [110, 220],
                    "max(sentry.sessions.session.duration)": [20.2, 40.4],
                    "p50(sentry.sessions.session.duration)": [1.2, 1.4],
                    "p95(sentry.sessions.session.duration)": [4.2, 4.4],
                },
            },
        ]
示例#10
0
def test_build_snuba_query(mock_now, mock_now2, mock_indexer):

    mock_indexer.resolve = MockIndexer().resolve
    # Your typical release health query querying everything
    query_params = MultiValueDict({
        "query":
        ["release:staging"
         ],  # weird release but we need a string exising in mock indexer
        "groupBy": ["session.status", "environment"],
        "field": [
            "sum(sentry.sessions.session)",
            "count_unique(sentry.sessions.user)",
            "p95(sentry.sessions.session.duration)",
        ],
    })
    query_definition = QueryDefinition(query_params)
    snuba_queries = SnubaQueryBuilder([PseudoProject(1, 1)],
                                      query_definition).get_snuba_queries()

    def expected_query(match, select, extra_groupby):
        function, column, alias = select
        return Query(
            dataset="metrics",
            match=Entity(match),
            select=[Function(function, [Column(column)], alias)],
            groupby=[
                Column("metric_id"),
                Column("tags[8]"),
                Column("tags[2]")
            ] + extra_groupby,
            where=[
                Condition(Column("org_id"), Op.EQ, 1),
                Condition(Column("project_id"), Op.IN, [1]),
                Condition(Column("metric_id"), Op.IN, [9, 11, 7]),
                Condition(Column("timestamp"), Op.GTE,
                          datetime(2021, 5, 28, 0, tzinfo=pytz.utc)),
                Condition(Column("timestamp"), Op.LT,
                          datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
                Condition(Column("tags[6]"), Op.IN, [10]),
            ],
            limit=Limit(MAX_POINTS),
            offset=Offset(0),
            granularity=Granularity(query_definition.rollup),
        )

    assert snuba_queries["metrics_counters"]["totals"] == expected_query(
        "metrics_counters", ("sum", "value", "value"), [])

    expected_percentile_select = ("quantiles(0.5,0.75,0.9,0.95,0.99)", "value",
                                  "percentiles")
    assert snuba_queries == {
        "metrics_counters": {
            "totals":
            expected_query("metrics_counters", ("sum", "value", "value"), []),
            "series":
            expected_query("metrics_counters", ("sum", "value", "value"),
                           [Column("bucketed_time")]),
        },
        "metrics_sets": {
            "totals":
            expected_query("metrics_sets", ("uniq", "value", "value"), []),
            "series":
            expected_query("metrics_sets", ("uniq", "value", "value"),
                           [Column("bucketed_time")]),
        },
        "metrics_distributions": {
            "totals":
            expected_query("metrics_distributions", expected_percentile_select,
                           []),
            "series":
            expected_query(
                "metrics_distributions",
                expected_percentile_select,
                [Column("bucketed_time")],
            ),
        },
    }
示例#11
0
def test_translate_results_derived_metrics(_1, _2, monkeypatch):
    monkeypatch.setattr("sentry.sentry_metrics.indexer.reverse_resolve",
                        MockIndexer().reverse_resolve)

    query_params = MultiValueDict({
        "groupBy": [],
        "field": [
            "session.errored",
            "session.crash_free_rate",
            "session.all",
        ],
        "interval": ["1d"],
        "statsPeriod": ["2d"],
    })
    query_definition = QueryDefinition(query_params)
    fields_in_entities = {
        "metrics_counters": [
            (None, "session.errored_preaggregated"),
            (None, "session.crash_free_rate"),
            (None, "session.all"),
        ],
        "metrics_sets": [
            (None, "session.errored_set"),
        ],
    }

    intervals = list(get_intervals(query_definition))
    results = {
        "metrics_counters": {
            "totals": {
                "data": [{
                    "session.crash_free_rate": 0.5,
                    "session.all": 8.0,
                    "session.errored_preaggregated": 3,
                }],
            },
            "series": {
                "data": [
                    {
                        "bucketed_time": "2021-08-24T00:00Z",
                        "session.crash_free_rate": 0.5,
                        "session.all": 4,
                        "session.errored_preaggregated": 1,
                    },
                    {
                        "bucketed_time": "2021-08-25T00:00Z",
                        "session.crash_free_rate": 0.5,
                        "session.all": 4,
                        "session.errored_preaggregated": 2,
                    },
                ],
            },
        },
        "metrics_sets": {
            "totals": {
                "data": [
                    {
                        "session.errored_set": 3,
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "bucketed_time": "2021-08-24T00:00Z",
                        "session.errored_set": 2
                    },
                    {
                        "bucketed_time": "2021-08-25T00:00Z",
                        "session.errored_set": 1
                    },
                ],
            },
        },
    }

    assert SnubaResultConverter(1, query_definition, fields_in_entities,
                                intervals, results).translate_results() == [
                                    {
                                        "by": {},
                                        "totals": {
                                            "session.all": 8,
                                            "session.crash_free_rate": 0.5,
                                            "session.errored": 6,
                                        },
                                        "series": {
                                            "session.all": [4, 4],
                                            "session.crash_free_rate":
                                            [0.5, 0.5],
                                            "session.errored": [3, 3],
                                        },
                                    },
                                ]
示例#12
0
def test_translate_results(_1, _2, monkeypatch):
    monkeypatch.setattr("sentry.sentry_metrics.indexer.reverse_resolve",
                        MockIndexer().reverse_resolve)

    query_params = MultiValueDict({
        "groupBy": ["session.status"],
        "field": [
            "sum(sentry.sessions.session)",
            "max(sentry.sessions.session.duration)",
            "p50(sentry.sessions.session.duration)",
            "p95(sentry.sessions.session.duration)",
        ],
        "interval": ["1d"],
        "statsPeriod": ["2d"],
    })
    query_definition = QueryDefinition(query_params)
    fields_in_entities = {
        "metrics_counters": [("sum", "sentry.sessions.session")],
        "metrics_distributions": [
            ("max", "sentry.sessions.session.duration"),
            ("p50", "sentry.sessions.session.duration"),
            ("p95", "sentry.sessions.session.duration"),
        ],
    }

    intervals = list(get_intervals(query_definition))
    results = {
        "metrics_counters": {
            "totals": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,  # session.status:healthy
                        "sum(sentry.sessions.session)": 300,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 14,  # session.status:abnormal
                        "sum(sentry.sessions.session)": 330,
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "sum(sentry.sessions.session)": 100,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "sum(sentry.sessions.session)": 110,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "sum(sentry.sessions.session)": 200,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "sum(sentry.sessions.session)": 220,
                    },
                ],
            },
        },
        "metrics_distributions": {
            "totals": {
                "data": [
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "max(sentry.sessions.session.duration)": 123.4,
                        "p50(sentry.sessions.session.duration)": [1],
                        "p95(sentry.sessions.session.duration)": [4],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 14,
                        "max(sentry.sessions.session.duration)": 456.7,
                        "p50(sentry.sessions.session.duration)": [1.5],
                        "p95(sentry.sessions.session.duration)": [4.5],
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "max(sentry.sessions.session.duration)": 10.1,
                        "p50(sentry.sessions.session.duration)": [1.1],
                        "p95(sentry.sessions.session.duration)": [4.1],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-24T00:00Z",
                        "max(sentry.sessions.session.duration)": 20.2,
                        "p50(sentry.sessions.session.duration)": [1.2],
                        "p95(sentry.sessions.session.duration)": [4.2],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "max(sentry.sessions.session.duration)": 30.3,
                        "p50(sentry.sessions.session.duration)": [1.3],
                        "p95(sentry.sessions.session.duration)": [4.3],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 14,
                        "bucketed_time": "2021-08-25T00:00Z",
                        "max(sentry.sessions.session.duration)": 40.4,
                        "p50(sentry.sessions.session.duration)": [1.4],
                        "p95(sentry.sessions.session.duration)": [4.4],
                    },
                ],
            },
        },
    }

    assert SnubaResultConverter(
        1, query_definition, fields_in_entities, intervals,
        results).translate_results() == [
            {
                "by": {
                    "session.status": "healthy"
                },
                "totals": {
                    "sum(sentry.sessions.session)": 300,
                    "max(sentry.sessions.session.duration)": 123.4,
                    "p50(sentry.sessions.session.duration)": 1,
                    "p95(sentry.sessions.session.duration)": 4,
                },
                "series": {
                    "sum(sentry.sessions.session)": [100, 200],
                    "max(sentry.sessions.session.duration)": [10.1, 30.3],
                    "p50(sentry.sessions.session.duration)": [1.1, 1.3],
                    "p95(sentry.sessions.session.duration)": [4.1, 4.3],
                },
            },
            {
                "by": {
                    "session.status": "abnormal"
                },
                "totals": {
                    "sum(sentry.sessions.session)": 330,
                    "max(sentry.sessions.session.duration)": 456.7,
                    "p50(sentry.sessions.session.duration)": 1.5,
                    "p95(sentry.sessions.session.duration)": 4.5,
                },
                "series": {
                    "sum(sentry.sessions.session)": [110, 220],
                    "max(sentry.sessions.session.duration)": [20.2, 40.4],
                    "p50(sentry.sessions.session.duration)": [1.2, 1.4],
                    "p95(sentry.sessions.session.duration)": [4.2, 4.4],
                },
            },
        ]
示例#13
0
def test_build_snuba_query_orderby(mock_now, mock_now2, monkeypatch):
    monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve",
                        MockIndexer().resolve)
    query_params = MultiValueDict({
        "query":
        ["release:staging"
         ],  # weird release but we need a string exising in mock indexer
        "groupBy": ["session.status", "environment"],
        "field": [
            "sum(sentry.sessions.session)",
        ],
        "orderBy": ["-sum(sentry.sessions.session)"],
    })
    query_definition = QueryDefinition(query_params,
                                       paginator_kwargs={"limit": 3})
    snuba_queries, _ = SnubaQueryBuilder([PseudoProject(1, 1)],
                                         query_definition).get_snuba_queries()

    counter_queries = snuba_queries.pop("metrics_counters")
    assert not snuba_queries

    op = "sum"
    metric_name = "sentry.sessions.session"
    select = Function(
        OP_TO_SNUBA_FUNCTION["metrics_counters"]["sum"],
        [
            Column("value"),
            Function("equals",
                     [Column("metric_id"),
                      resolve_weak(metric_name)])
        ],
        alias=f"{op}({metric_name})",
    )

    assert counter_queries["totals"] == Query(
        dataset="metrics",
        match=Entity("metrics_counters"),
        select=[select],
        groupby=[
            Column("tags[8]"),
            Column("tags[2]"),
        ],
        where=[
            Condition(Column("org_id"), Op.EQ, 1),
            Condition(Column("project_id"), Op.IN, [1]),
            Condition(Column("timestamp"), Op.GTE,
                      datetime(2021, 5, 28, 0, tzinfo=pytz.utc)),
            Condition(Column("timestamp"), Op.LT,
                      datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
            Condition(Column("tags[6]", entity=None), Op.IN, [10]),
            Condition(Column("metric_id"), Op.IN, [9]),
        ],
        orderby=[OrderBy(select, Direction.DESC)],
        limit=Limit(3),
        offset=Offset(0),
        granularity=Granularity(query_definition.rollup),
    )
    assert counter_queries["series"] == Query(
        dataset="metrics",
        match=Entity("metrics_counters"),
        select=[select],
        groupby=[
            Column("tags[8]"),
            Column("tags[2]"),
            Column("bucketed_time"),
        ],
        where=[
            Condition(Column("org_id"), Op.EQ, 1),
            Condition(Column("project_id"), Op.IN, [1]),
            Condition(Column("timestamp"), Op.GTE,
                      datetime(2021, 5, 28, 0, tzinfo=pytz.utc)),
            Condition(Column("timestamp"), Op.LT,
                      datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
            Condition(Column("tags[6]", entity=None), Op.IN, [10]),
            Condition(Column("metric_id"), Op.IN, [9]),
        ],
        orderby=[OrderBy(select, Direction.DESC)],
        limit=Limit(6480),
        offset=Offset(0),
        granularity=Granularity(query_definition.rollup),
    )
示例#14
0
def test_build_snuba_query_derived_metrics(mock_now, mock_now2, monkeypatch):
    monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve",
                        MockIndexer().resolve)
    # Your typical release health query querying everything
    query_params = MultiValueDict({
        "groupBy": [],
        "field": [
            "session.errored",
            "session.crash_free_rate",
            "session.all",
        ],
        "interval": ["1d"],
        "statsPeriod": ["2d"],
    })
    query_definition = QueryDefinition(query_params)
    query_builder = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition)
    snuba_queries, fields_in_entities = query_builder.get_snuba_queries()
    assert fields_in_entities == {
        "metrics_counters": [
            (None, "session.errored_preaggregated"),
            (None, "session.crash_free_rate"),
            (None, "session.all"),
        ],
        "metrics_sets": [
            (None, "session.errored_set"),
        ],
    }
    for key in ("totals", "series"):
        groupby = [] if key == "totals" else [Column("bucketed_time")]
        assert snuba_queries["metrics_counters"][key] == (Query(
            dataset="metrics",
            match=Entity("metrics_counters"),
            select=[
                errored_preaggr_sessions(
                    metric_ids=[resolve_weak("sentry.sessions.session")],
                    alias="session.errored_preaggregated",
                ),
                percentage(
                    crashed_sessions(
                        metric_ids=[resolve_weak("sentry.sessions.session")],
                        alias="session.crashed",
                    ),
                    all_sessions(
                        metric_ids=[resolve_weak("sentry.sessions.session")],
                        alias="session.all",
                    ),
                    alias="session.crash_free_rate",
                ),
                all_sessions(
                    metric_ids=[resolve_weak("sentry.sessions.session")],
                    alias="session.all"),
            ],
            groupby=groupby,
            where=[
                Condition(Column("org_id"), Op.EQ, 1),
                Condition(Column("project_id"), Op.IN, [1]),
                Condition(Column("timestamp"), Op.GTE,
                          datetime(2021, 8, 24, 0, tzinfo=pytz.utc)),
                Condition(Column("timestamp"), Op.LT,
                          datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
                Condition(Column("metric_id"), Op.IN,
                          [resolve_weak("sentry.sessions.session")]),
            ],
            limit=Limit(MAX_POINTS),
            offset=Offset(0),
            granularity=Granularity(query_definition.rollup),
        ))
        assert snuba_queries["metrics_sets"][key] == (Query(
            dataset="metrics",
            match=Entity("metrics_sets"),
            select=[
                sessions_errored_set(
                    metric_ids=[resolve_weak("sentry.sessions.session.error")],
                    alias="session.errored_set",
                ),
            ],
            groupby=groupby,
            where=[
                Condition(Column("org_id"), Op.EQ, 1),
                Condition(Column("project_id"), Op.IN, [1]),
                Condition(Column("timestamp"), Op.GTE,
                          datetime(2021, 8, 24, 0, tzinfo=pytz.utc)),
                Condition(Column("timestamp"), Op.LT,
                          datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
                Condition(Column("metric_id"), Op.IN,
                          [resolve_weak("sentry.sessions.session.error")]),
            ],
            limit=Limit(MAX_POINTS),
            offset=Offset(0),
            granularity=Granularity(query_definition.rollup),
        ))