def test_translate_results_missing_slots(_1, _2): query_params = MultiValueDict( { "field": [ "sum(session)", ], "interval": ["1d"], "statsPeriod": ["3d"], } ) query_definition = QueryDefinition(query_params) results = { "metrics_counters": { "totals": { "data": [ { "metric_id": 9, # session "value": 400, }, ], }, "series": { "data": [ { "metric_id": 9, # session "bucketed_time": datetime(2021, 8, 23, tzinfo=pytz.utc), "value": 100, }, # no data for 2021-08-24 { "metric_id": 9, # session "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc), "value": 300, }, ], }, }, } intervals = list(query_definition.get_intervals()) assert SnubaResultConverter(1, query_definition, intervals, results).translate_results() == [ { "by": {}, "totals": { "sum(session)": 400, }, "series": { # No data for 2021-08-24 "sum(session)": [100, 0, 300], }, }, ]
def test_build_snuba_query(mock_now, mock_now2): # Your typical release health query querying everything query_params = MultiValueDict( { "query": [ "release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(session)", "count_unique(user)", "p95(session.duration)", ], } ) query_definition = QueryDefinition(query_params) snuba_queries = SnubaQueryBuilder(PseudoProject(1, 1), query_definition).get_snuba_queries() def expected_query(match, select, extra_groupby): return Query( dataset="metrics", match=Entity(match), select=[Column(select)], groupby=[Column("metric_id"), Column("tags[8]"), Column("tags[2]")] + extra_groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.EQ, 1), Condition(Column("metric_id"), Op.IN, [9, 11, 7]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]"), Op.EQ, 10), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ) assert snuba_queries["metrics_counters"]["totals"] == expected_query( "metrics_counters", "value", [] ) assert snuba_queries == { "metrics_counters": { "totals": expected_query("metrics_counters", "value", []), "series": expected_query("metrics_counters", "value", [Column("bucketed_time")]), }, "metrics_sets": { "totals": expected_query("metrics_sets", "value", []), "series": expected_query("metrics_sets", "value", [Column("bucketed_time")]), }, "metrics_distributions": { "totals": expected_query("metrics_distributions", "percentiles", []), "series": expected_query( "metrics_distributions", "percentiles", [Column("bucketed_time")] ), }, }
def test_translate_results_missing_slots(_1, _2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.reverse_resolve", MockIndexer().reverse_resolve) query_params = MultiValueDict({ "field": [ "sum(sentry.sessions.session)", ], "interval": ["1d"], "statsPeriod": ["3d"], }) query_definition = QueryDefinition(query_params) results = { "metrics_counters": { "totals": { "data": [ { "metric_id": 9, # session "sum(sentry.sessions.session)": 400, }, ], }, "series": { "data": [ { "metric_id": 9, # session "bucketed_time": "2021-08-23T00:00Z", "sum(sentry.sessions.session)": 100, }, # no data for 2021-08-24 { "metric_id": 9, # session "bucketed_time": "2021-08-25T00:00Z", "sum(sentry.sessions.session)": 300, }, ], }, }, } intervals = list(get_intervals(query_definition)) assert SnubaResultConverter(1, query_definition, intervals, results).translate_results() == [ { "by": {}, "totals": { "sum(sentry.sessions.session)": 400, }, "series": { # No data for 2021-08-24 "sum(sentry.sessions.session)": [100, 0, 300], }, }, ]
def data_fn(offset: int, limit: int): try: query = QueryDefinition(request.GET, paginator_kwargs={ "limit": limit, "offset": offset }) data = get_series(projects, query) except (InvalidField, InvalidParams, DerivedMetricParseException) as exc: raise (ParseError(detail=str(exc))) return data
def get(self, request: Request, organization) -> Response: if not features.has( "organizations:metrics", organization, actor=request.user): return Response(status=404) projects = self.get_projects(request, organization) try: query = QueryDefinition(request.GET) data = get_datasource(request).get_series(projects, query) except (InvalidField, InvalidParams) as exc: raise (ParseError(detail=str(exc))) return Response(data, status=200)
def get(self, request, project): if not features.has("organizations:metrics", project.organization, actor=request.user): return Response(status=404) try: query = QueryDefinition(request.GET, allow_minute_resolution=False) data = get_datasource(request).get_series(project, query) except (InvalidField, InvalidParams) as exc: raise (ParseError(detail=str(exc))) return Response(data, status=200)
def test_build_snuba_query_orderby(mock_now, mock_now2, mock_indexer): mock_indexer.resolve = MockIndexer().resolve query_params = MultiValueDict({ "query": ["release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(sentry.sessions.session)", ], "orderBy": ["-sum(sentry.sessions.session)"], "limit": [3], }) query_definition = QueryDefinition(query_params) snuba_queries = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition).get_snuba_queries() counter_queries = snuba_queries.pop("metrics_counters") assert not snuba_queries assert counter_queries["series"] is None # No series because of orderBy assert counter_queries["totals"] == Query( dataset="metrics", match=Entity("metrics_counters"), select=[Function("sum", [Column("value")], "value")], groupby=[ Column("metric_id"), Column("tags[8]"), Column("tags[2]"), ], where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("metric_id"), Op.IN, [9]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]", entity=None), Op.IN, [10]), ], orderby=[OrderBy(Column("value"), Direction.DESC)], limit=Limit(3), offset=Offset(0), granularity=Granularity(query_definition.rollup), )
def test_translate_results(_1, _2): query_params = MultiValueDict( { "groupBy": ["session.status"], "field": [ "sum(session)", "max(session.duration)", "p50(session.duration)", "p95(session.duration)", ], "interval": ["1d"], "statsPeriod": ["2d"], } ) query_definition = QueryDefinition(query_params) intervals = list(get_intervals(query_definition)) results = { "metrics_counters": { "totals": { "data": [ { "metric_id": 9, # session "tags[8]": 4, # session.status:healthy "value": 300, }, { "metric_id": 9, # session "tags[8]": 0, # session.status:abnormal "value": 330, }, ], }, "series": { "data": [ { "metric_id": 9, # session "tags[8]": 4, "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc), "value": 100, }, { "metric_id": 9, # session "tags[8]": 0, "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc), "value": 110, }, { "metric_id": 9, # session "tags[8]": 4, "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc), "value": 200, }, { "metric_id": 9, # session "tags[8]": 0, "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc), "value": 220, }, ], }, }, "metrics_distributions": { "totals": { "data": [ { "metric_id": 7, # session.duration "tags[8]": 4, "max": 123.4, "percentiles": [1, 2, 3, 4, 5], }, { "metric_id": 7, # session.duration "tags[8]": 0, "max": 456.7, "percentiles": [1.5, 2.5, 3.5, 4.5, 5.5], }, ], }, "series": { "data": [ { "metric_id": 7, # session.duration "tags[8]": 4, "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc), "max": 10.1, "percentiles": [1.1, 2.1, 3.1, 4.1, 5.1], }, { "metric_id": 7, # session.duration "tags[8]": 0, "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc), "max": 20.2, "percentiles": [1.2, 2.2, 3.2, 4.2, 5.2], }, { "metric_id": 7, # session.duration "tags[8]": 4, "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc), "max": 30.3, "percentiles": [1.3, 2.3, 3.3, 4.3, 5.3], }, { "metric_id": 7, # session.duration "tags[8]": 0, "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc), "max": 40.4, "percentiles": [1.4, 2.4, 3.4, 4.4, 5.4], }, ], }, }, } assert SnubaResultConverter(1, query_definition, intervals, results).translate_results() == [ { "by": {"session.status": "healthy"}, "totals": { "sum(session)": 300, "max(session.duration)": 123.4, "p50(session.duration)": 1, "p95(session.duration)": 4, }, "series": { "sum(session)": [100, 200], "max(session.duration)": [10.1, 30.3], "p50(session.duration)": [1.1, 1.3], "p95(session.duration)": [4.1, 4.3], }, }, { "by": {"session.status": "abnormal"}, "totals": { "sum(session)": 330, "max(session.duration)": 456.7, "p50(session.duration)": 1.5, "p95(session.duration)": 4.5, }, "series": { "sum(session)": [110, 220], "max(session.duration)": [20.2, 40.4], "p50(session.duration)": [1.2, 1.4], "p95(session.duration)": [4.2, 4.4], }, }, ]
def test_translate_results_derived_metrics(_1, _2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.reverse_resolve", MockIndexer().reverse_resolve) query_params = MultiValueDict({ "groupBy": [], "field": [ "session.errored", "session.crash_free_rate", "session.all", ], "interval": ["1d"], "statsPeriod": ["2d"], }) query_definition = QueryDefinition(query_params) fields_in_entities = { "metrics_counters": [ (None, "session.errored_preaggregated"), (None, "session.crash_free_rate"), (None, "session.all"), ], "metrics_sets": [ (None, "session.errored_set"), ], } intervals = list(get_intervals(query_definition)) results = { "metrics_counters": { "totals": { "data": [{ "session.crash_free_rate": 0.5, "session.all": 8.0, "session.errored_preaggregated": 3, }], }, "series": { "data": [ { "bucketed_time": "2021-08-24T00:00Z", "session.crash_free_rate": 0.5, "session.all": 4, "session.errored_preaggregated": 1, }, { "bucketed_time": "2021-08-25T00:00Z", "session.crash_free_rate": 0.5, "session.all": 4, "session.errored_preaggregated": 2, }, ], }, }, "metrics_sets": { "totals": { "data": [ { "session.errored_set": 3, }, ], }, "series": { "data": [ { "bucketed_time": "2021-08-24T00:00Z", "session.errored_set": 2 }, { "bucketed_time": "2021-08-25T00:00Z", "session.errored_set": 1 }, ], }, }, } assert SnubaResultConverter(1, query_definition, fields_in_entities, intervals, results).translate_results() == [ { "by": {}, "totals": { "session.all": 8, "session.crash_free_rate": 0.5, "session.errored": 6, }, "series": { "session.all": [4, 4], "session.crash_free_rate": [0.5, 0.5], "session.errored": [3, 3], }, }, ]
def test_translate_results(_1, _2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.reverse_resolve", MockIndexer().reverse_resolve) query_params = MultiValueDict({ "groupBy": ["session.status"], "field": [ "sum(sentry.sessions.session)", "max(sentry.sessions.session.duration)", "p50(sentry.sessions.session.duration)", "p95(sentry.sessions.session.duration)", ], "interval": ["1d"], "statsPeriod": ["2d"], }) query_definition = QueryDefinition(query_params) fields_in_entities = { "metrics_counters": [("sum", "sentry.sessions.session")], "metrics_distributions": [ ("max", "sentry.sessions.session.duration"), ("p50", "sentry.sessions.session.duration"), ("p95", "sentry.sessions.session.duration"), ], } intervals = list(get_intervals(query_definition)) results = { "metrics_counters": { "totals": { "data": [ { "metric_id": 9, # session "tags[8]": 4, # session.status:healthy "sum(sentry.sessions.session)": 300, }, { "metric_id": 9, # session "tags[8]": 14, # session.status:abnormal "sum(sentry.sessions.session)": 330, }, ], }, "series": { "data": [ { "metric_id": 9, # session "tags[8]": 4, "bucketed_time": "2021-08-24T00:00Z", "sum(sentry.sessions.session)": 100, }, { "metric_id": 9, # session "tags[8]": 14, "bucketed_time": "2021-08-24T00:00Z", "sum(sentry.sessions.session)": 110, }, { "metric_id": 9, # session "tags[8]": 4, "bucketed_time": "2021-08-25T00:00Z", "sum(sentry.sessions.session)": 200, }, { "metric_id": 9, # session "tags[8]": 14, "bucketed_time": "2021-08-25T00:00Z", "sum(sentry.sessions.session)": 220, }, ], }, }, "metrics_distributions": { "totals": { "data": [ { "metric_id": 7, # session.duration "tags[8]": 4, "max(sentry.sessions.session.duration)": 123.4, "p50(sentry.sessions.session.duration)": [1], "p95(sentry.sessions.session.duration)": [4], }, { "metric_id": 7, # session.duration "tags[8]": 14, "max(sentry.sessions.session.duration)": 456.7, "p50(sentry.sessions.session.duration)": [1.5], "p95(sentry.sessions.session.duration)": [4.5], }, ], }, "series": { "data": [ { "metric_id": 7, # session.duration "tags[8]": 4, "bucketed_time": "2021-08-24T00:00Z", "max(sentry.sessions.session.duration)": 10.1, "p50(sentry.sessions.session.duration)": [1.1], "p95(sentry.sessions.session.duration)": [4.1], }, { "metric_id": 7, # session.duration "tags[8]": 14, "bucketed_time": "2021-08-24T00:00Z", "max(sentry.sessions.session.duration)": 20.2, "p50(sentry.sessions.session.duration)": [1.2], "p95(sentry.sessions.session.duration)": [4.2], }, { "metric_id": 7, # session.duration "tags[8]": 4, "bucketed_time": "2021-08-25T00:00Z", "max(sentry.sessions.session.duration)": 30.3, "p50(sentry.sessions.session.duration)": [1.3], "p95(sentry.sessions.session.duration)": [4.3], }, { "metric_id": 7, # session.duration "tags[8]": 14, "bucketed_time": "2021-08-25T00:00Z", "max(sentry.sessions.session.duration)": 40.4, "p50(sentry.sessions.session.duration)": [1.4], "p95(sentry.sessions.session.duration)": [4.4], }, ], }, }, } assert SnubaResultConverter( 1, query_definition, fields_in_entities, intervals, results).translate_results() == [ { "by": { "session.status": "healthy" }, "totals": { "sum(sentry.sessions.session)": 300, "max(sentry.sessions.session.duration)": 123.4, "p50(sentry.sessions.session.duration)": 1, "p95(sentry.sessions.session.duration)": 4, }, "series": { "sum(sentry.sessions.session)": [100, 200], "max(sentry.sessions.session.duration)": [10.1, 30.3], "p50(sentry.sessions.session.duration)": [1.1, 1.3], "p95(sentry.sessions.session.duration)": [4.1, 4.3], }, }, { "by": { "session.status": "abnormal" }, "totals": { "sum(sentry.sessions.session)": 330, "max(sentry.sessions.session.duration)": 456.7, "p50(sentry.sessions.session.duration)": 1.5, "p95(sentry.sessions.session.duration)": 4.5, }, "series": { "sum(sentry.sessions.session)": [110, 220], "max(sentry.sessions.session.duration)": [20.2, 40.4], "p50(sentry.sessions.session.duration)": [1.2, 1.4], "p95(sentry.sessions.session.duration)": [4.2, 4.4], }, }, ]
def test_build_snuba_query_orderby(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) query_params = MultiValueDict({ "query": ["release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(sentry.sessions.session)", ], "orderBy": ["-sum(sentry.sessions.session)"], }) query_definition = QueryDefinition(query_params, paginator_kwargs={"limit": 3}) snuba_queries, _ = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition).get_snuba_queries() counter_queries = snuba_queries.pop("metrics_counters") assert not snuba_queries op = "sum" metric_name = "sentry.sessions.session" select = Function( OP_TO_SNUBA_FUNCTION["metrics_counters"]["sum"], [ Column("value"), Function("equals", [Column("metric_id"), resolve_weak(metric_name)]) ], alias=f"{op}({metric_name})", ) assert counter_queries["totals"] == Query( dataset="metrics", match=Entity("metrics_counters"), select=[select], groupby=[ Column("tags[8]"), Column("tags[2]"), ], where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]", entity=None), Op.IN, [10]), Condition(Column("metric_id"), Op.IN, [9]), ], orderby=[OrderBy(select, Direction.DESC)], limit=Limit(3), offset=Offset(0), granularity=Granularity(query_definition.rollup), ) assert counter_queries["series"] == Query( dataset="metrics", match=Entity("metrics_counters"), select=[select], groupby=[ Column("tags[8]"), Column("tags[2]"), Column("bucketed_time"), ], where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]", entity=None), Op.IN, [10]), Condition(Column("metric_id"), Op.IN, [9]), ], orderby=[OrderBy(select, Direction.DESC)], limit=Limit(6480), offset=Offset(0), granularity=Granularity(query_definition.rollup), )
def test_build_snuba_query_derived_metrics(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) # Your typical release health query querying everything query_params = MultiValueDict({ "groupBy": [], "field": [ "session.errored", "session.crash_free_rate", "session.all", ], "interval": ["1d"], "statsPeriod": ["2d"], }) query_definition = QueryDefinition(query_params) query_builder = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition) snuba_queries, fields_in_entities = query_builder.get_snuba_queries() assert fields_in_entities == { "metrics_counters": [ (None, "session.errored_preaggregated"), (None, "session.crash_free_rate"), (None, "session.all"), ], "metrics_sets": [ (None, "session.errored_set"), ], } for key in ("totals", "series"): groupby = [] if key == "totals" else [Column("bucketed_time")] assert snuba_queries["metrics_counters"][key] == (Query( dataset="metrics", match=Entity("metrics_counters"), select=[ errored_preaggr_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.errored_preaggregated", ), percentage( crashed_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.crashed", ), all_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.all", ), alias="session.crash_free_rate", ), all_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.all"), ], groupby=groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 8, 24, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("metric_id"), Op.IN, [resolve_weak("sentry.sessions.session")]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), )) assert snuba_queries["metrics_sets"][key] == (Query( dataset="metrics", match=Entity("metrics_sets"), select=[ sessions_errored_set( metric_ids=[resolve_weak("sentry.sessions.session.error")], alias="session.errored_set", ), ], groupby=groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 8, 24, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("metric_id"), Op.IN, [resolve_weak("sentry.sessions.session.error")]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ))
def test_build_snuba_query(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) # Your typical release health query querying everything query_params = MultiValueDict({ "query": ["release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(sentry.sessions.session)", "count_unique(sentry.sessions.user)", "p95(sentry.sessions.session.duration)", ], }) query_definition = QueryDefinition(query_params) snuba_queries, _ = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition).get_snuba_queries() def expected_query(match, select, extra_groupby, metric_name): function, column, alias = select return Query( dataset="metrics", match=Entity(match), select=[ Function( OP_TO_SNUBA_FUNCTION[match][alias], [ Column("value"), Function( "equals", [Column("metric_id"), resolve_weak(metric_name)]), ], alias=f"{alias}({metric_name})", ) ], groupby=[Column("tags[8]"), Column("tags[2]")] + extra_groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]"), Op.IN, [10]), Condition(Column("metric_id"), Op.IN, [resolve_weak(metric_name)]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ) assert snuba_queries["metrics_counters"]["totals"] == expected_query( "metrics_counters", ("sum", "value", "sum"), [], "sentry.sessions.session") expected_percentile_select = ("quantiles(0.95)", "value", "p95") assert snuba_queries == { "metrics_counters": { "totals": expected_query("metrics_counters", ("sum", "value", "sum"), [], "sentry.sessions.session"), "series": expected_query( "metrics_counters", ("sum", "value", "sum"), [Column("bucketed_time")], "sentry.sessions.session", ), }, "metrics_sets": { "totals": expected_query("metrics_sets", ("uniq", "value", "count_unique"), [], "sentry.sessions.user"), "series": expected_query( "metrics_sets", ("uniq", "value", "count_unique"), [Column("bucketed_time")], "sentry.sessions.user", ), }, "metrics_distributions": { "totals": expected_query( "metrics_distributions", expected_percentile_select, [], "sentry.sessions.session.duration", ), "series": expected_query( "metrics_distributions", expected_percentile_select, [Column("bucketed_time")], "sentry.sessions.session.duration", ), }, }