def test_build_snuba_query_orderby(mock_now, mock_now2, mock_indexer): mock_indexer.resolve = MockIndexer().resolve query_params = MultiValueDict({ "query": ["release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(sentry.sessions.session)", ], "orderBy": ["-sum(sentry.sessions.session)"], "limit": [3], }) query_definition = QueryDefinition(query_params) snuba_queries = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition).get_snuba_queries() counter_queries = snuba_queries.pop("metrics_counters") assert not snuba_queries assert counter_queries["series"] is None # No series because of orderBy assert counter_queries["totals"] == Query( dataset="metrics", match=Entity("metrics_counters"), select=[Function("sum", [Column("value")], "value")], groupby=[ Column("metric_id"), Column("tags[8]"), Column("tags[2]"), ], where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("metric_id"), Op.IN, [9]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]", entity=None), Op.IN, [10]), ], orderby=[OrderBy(Column("value"), Direction.DESC)], limit=Limit(3), offset=Offset(0), granularity=Granularity(query_definition.rollup), )
def test_build_snuba_query(mock_now, mock_now2): # Your typical release health query querying everything query_params = MultiValueDict( { "query": [ "release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(session)", "count_unique(user)", "p95(session.duration)", ], } ) query_definition = QueryDefinition(query_params) snuba_queries = SnubaQueryBuilder(PseudoProject(1, 1), query_definition).get_snuba_queries() def expected_query(match, select, extra_groupby): return Query( dataset="metrics", match=Entity(match), select=[Column(select)], groupby=[Column("metric_id"), Column("tags[8]"), Column("tags[2]")] + extra_groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.EQ, 1), Condition(Column("metric_id"), Op.IN, [9, 11, 7]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]"), Op.EQ, 10), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ) assert snuba_queries["metrics_counters"]["totals"] == expected_query( "metrics_counters", "value", [] ) assert snuba_queries == { "metrics_counters": { "totals": expected_query("metrics_counters", "value", []), "series": expected_query("metrics_counters", "value", [Column("bucketed_time")]), }, "metrics_sets": { "totals": expected_query("metrics_sets", "value", []), "series": expected_query("metrics_sets", "value", [Column("bucketed_time")]), }, "metrics_distributions": { "totals": expected_query("metrics_distributions", "percentiles", []), "series": expected_query( "metrics_distributions", "percentiles", [Column("bucketed_time")] ), }, }
def test_build_snuba_query_derived_metrics(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) # Your typical release health query querying everything query_params = MultiValueDict({ "groupBy": [], "field": [ "session.errored", "session.crash_free_rate", "session.all", ], "interval": ["1d"], "statsPeriod": ["2d"], }) query_definition = QueryDefinition(query_params) query_builder = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition) snuba_queries, fields_in_entities = query_builder.get_snuba_queries() assert fields_in_entities == { "metrics_counters": [ (None, "session.errored_preaggregated"), (None, "session.crash_free_rate"), (None, "session.all"), ], "metrics_sets": [ (None, "session.errored_set"), ], } for key in ("totals", "series"): groupby = [] if key == "totals" else [Column("bucketed_time")] assert snuba_queries["metrics_counters"][key] == (Query( dataset="metrics", match=Entity("metrics_counters"), select=[ errored_preaggr_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.errored_preaggregated", ), percentage( crashed_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.crashed", ), all_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.all", ), alias="session.crash_free_rate", ), all_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.all"), ], groupby=groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 8, 24, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("metric_id"), Op.IN, [resolve_weak("sentry.sessions.session")]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), )) assert snuba_queries["metrics_sets"][key] == (Query( dataset="metrics", match=Entity("metrics_sets"), select=[ sessions_errored_set( metric_ids=[resolve_weak("sentry.sessions.session.error")], alias="session.errored_set", ), ], groupby=groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 8, 24, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("metric_id"), Op.IN, [resolve_weak("sentry.sessions.session.error")]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ))
def test_build_snuba_query_orderby(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) query_params = MultiValueDict({ "query": ["release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(sentry.sessions.session)", ], "orderBy": ["-sum(sentry.sessions.session)"], }) query_definition = QueryDefinition(query_params, paginator_kwargs={"limit": 3}) snuba_queries, _ = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition).get_snuba_queries() counter_queries = snuba_queries.pop("metrics_counters") assert not snuba_queries op = "sum" metric_name = "sentry.sessions.session" select = Function( OP_TO_SNUBA_FUNCTION["metrics_counters"]["sum"], [ Column("value"), Function("equals", [Column("metric_id"), resolve_weak(metric_name)]) ], alias=f"{op}({metric_name})", ) assert counter_queries["totals"] == Query( dataset="metrics", match=Entity("metrics_counters"), select=[select], groupby=[ Column("tags[8]"), Column("tags[2]"), ], where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]", entity=None), Op.IN, [10]), Condition(Column("metric_id"), Op.IN, [9]), ], orderby=[OrderBy(select, Direction.DESC)], limit=Limit(3), offset=Offset(0), granularity=Granularity(query_definition.rollup), ) assert counter_queries["series"] == Query( dataset="metrics", match=Entity("metrics_counters"), select=[select], groupby=[ Column("tags[8]"), Column("tags[2]"), Column("bucketed_time"), ], where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]", entity=None), Op.IN, [10]), Condition(Column("metric_id"), Op.IN, [9]), ], orderby=[OrderBy(select, Direction.DESC)], limit=Limit(6480), offset=Offset(0), granularity=Granularity(query_definition.rollup), )
def test_build_snuba_query(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) # Your typical release health query querying everything query_params = MultiValueDict({ "query": ["release:staging" ], # weird release but we need a string exising in mock indexer "groupBy": ["session.status", "environment"], "field": [ "sum(sentry.sessions.session)", "count_unique(sentry.sessions.user)", "p95(sentry.sessions.session.duration)", ], }) query_definition = QueryDefinition(query_params) snuba_queries, _ = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition).get_snuba_queries() def expected_query(match, select, extra_groupby, metric_name): function, column, alias = select return Query( dataset="metrics", match=Entity(match), select=[ Function( OP_TO_SNUBA_FUNCTION[match][alias], [ Column("value"), Function( "equals", [Column("metric_id"), resolve_weak(metric_name)]), ], alias=f"{alias}({metric_name})", ) ], groupby=[Column("tags[8]"), Column("tags[2]")] + extra_groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 5, 28, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("tags[6]"), Op.IN, [10]), Condition(Column("metric_id"), Op.IN, [resolve_weak(metric_name)]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ) assert snuba_queries["metrics_counters"]["totals"] == expected_query( "metrics_counters", ("sum", "value", "sum"), [], "sentry.sessions.session") expected_percentile_select = ("quantiles(0.95)", "value", "p95") assert snuba_queries == { "metrics_counters": { "totals": expected_query("metrics_counters", ("sum", "value", "sum"), [], "sentry.sessions.session"), "series": expected_query( "metrics_counters", ("sum", "value", "sum"), [Column("bucketed_time")], "sentry.sessions.session", ), }, "metrics_sets": { "totals": expected_query("metrics_sets", ("uniq", "value", "count_unique"), [], "sentry.sessions.user"), "series": expected_query( "metrics_sets", ("uniq", "value", "count_unique"), [Column("bucketed_time")], "sentry.sessions.user", ), }, "metrics_distributions": { "totals": expected_query( "metrics_distributions", expected_percentile_select, [], "sentry.sessions.session.duration", ), "series": expected_query( "metrics_distributions", expected_percentile_select, [Column("bucketed_time")], "sentry.sessions.session.duration", ), }, }