def setUp(self): self.crash_free_fake = SingularEntityDerivedMetric( metric_name="crash_free_fake", metrics=["session.crashed", "session.errored_set"], unit="percentage", snql=lambda *args, metric_ids, alias=None: percentage( *args, metric_ids, alias="crash_free_fake"), ) DERIVED_METRICS.update({"crash_free_fake": self.crash_free_fake})
def test_generate_select_snql_of_derived_metric(self): """ Test that ensures that method generate_select_statements generates the equivalent SnQL required to query for the instance of DerivedMetric """ org_id = self.project.organization_id for status in ("init", "crashed"): indexer.record(org_id, status) session_ids = [indexer.record(org_id, "sentry.sessions.session")] derived_name_snql = { "session.init": (init_sessions, session_ids), "session.crashed": (crashed_sessions, session_ids), "session.errored_preaggregated": (errored_preaggr_sessions, session_ids), "session.errored_set": ( sessions_errored_set, [indexer.record(org_id, "sentry.sessions.session.error")], ), } for metric_name, (func, metric_ids_list) in derived_name_snql.items(): assert DERIVED_METRICS[metric_name].generate_select_statements( [self.project]) == [ func(metric_ids=metric_ids_list, alias=metric_name), ] assert DERIVED_METRICS[ "session.crash_free_rate"].generate_select_statements( [self.project]) == [ percentage( crashed_sessions(metric_ids=session_ids, alias="session.crashed"), init_sessions(metric_ids=session_ids, alias="session.init"), alias="session.crash_free_rate", ) ] # Test that ensures that even if `generate_select_statements` is called before # `get_entity` is called, and thereby the entity validation logic, we throw an exception with pytest.raises(DerivedMetricParseException): self.crash_free_fake.generate_select_statements([self.project])
def get_entity_of_metric_mocked(_, metric_name): return { "sentry.sessions.session": EntityKey.MetricsCounters, "sentry.sessions.user": EntityKey.MetricsSets, "sentry.sessions.session.error": EntityKey.MetricsSets, }[metric_name] MOCKED_DERIVED_METRICS = copy.deepcopy(DERIVED_METRICS) MOCKED_DERIVED_METRICS.update({ "crash_free_fake": SingularEntityDerivedMetric( metric_name="crash_free_fake", metrics=["session.crashed", "session.errored_set"], unit="percentage", snql=lambda *args, entity, metric_ids, alias=None: percentage( *args, entity, metric_ids, alias="crash_free_fake"), ), "random_composite": CompositeEntityDerivedMetric( metric_name="random_composite", metrics=["session.errored"], unit="sessions", ), }) @patch("sentry.snuba.metrics.fields.base.DERIVED_METRICS", MOCKED_DERIVED_METRICS) class SingleEntityDerivedMetricTestCase(TestCase): def setUp(self): self.crash_free_fake = MOCKED_DERIVED_METRICS["crash_free_fake"]
unit="sessions", snql=lambda *_, metric_ids, alias=None: init_sessions(metric_ids, alias=alias), ), SingularEntityDerivedMetric( metric_name="session.crashed", metrics=["sentry.sessions.session"], unit="sessions", snql=lambda *_, metric_ids, alias=None: crashed_sessions( metric_ids, alias=alias), ), SingularEntityDerivedMetric( metric_name="session.crash_free_rate", metrics=["session.crashed", "session.init"], unit="percentage", snql=lambda *args, metric_ids, alias=None: percentage( *args, alias="session.crash_free_rate"), ), SingularEntityDerivedMetric( metric_name="session.errored_preaggregated", metrics=["sentry.sessions.session"], unit="sessions", snql=lambda *_, metric_ids, alias=None: errored_preaggr_sessions( metric_ids, alias=alias), ), SingularEntityDerivedMetric( metric_name="session.errored_set", metrics=["sentry.sessions.session.error"], unit="sessions", snql=lambda *_, metric_ids, alias=None: sessions_errored_set( metric_ids, alias=alias), ),
), SingularEntityDerivedMetric( metric_name=DerivedMetricKey.SESSION_CRASHED_USER.value, metrics=[SessionMetricKey.USER.value], unit="users", snql=lambda *_, metric_ids, alias=None: crashed_users(metric_ids, alias=alias), ), SingularEntityDerivedMetric( metric_name=DerivedMetricKey.SESSION_CRASH_FREE_RATE.value, metrics=[ DerivedMetricKey.SESSION_CRASHED.value, DerivedMetricKey.SESSION_ALL.value ], unit="percentage", snql=lambda *args, metric_ids, alias=None: percentage(*args, alias=alias), ), SingularEntityDerivedMetric( metric_name=DerivedMetricKey.SESSION_CRASH_FREE_USER_RATE.value, metrics=[ DerivedMetricKey.SESSION_CRASHED_USER.value, DerivedMetricKey.SESSION_ALL_USER.value, ], unit="percentage", snql=lambda *args, metric_ids, alias=None: percentage(*args, alias=alias), ), SingularEntityDerivedMetric( metric_name=DerivedMetricKey.SESSION_ERRORED_PREAGGREGATED.value, metrics=[SessionMetricKey.SESSION.value], unit="sessions",
def test_build_snuba_query_derived_metrics(mock_now, mock_now2, monkeypatch): monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve", MockIndexer().resolve) # Your typical release health query querying everything query_params = MultiValueDict({ "groupBy": [], "field": [ "session.errored", "session.crash_free_rate", "session.all", ], "interval": ["1d"], "statsPeriod": ["2d"], }) query_definition = QueryDefinition(query_params) query_builder = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition) snuba_queries, fields_in_entities = query_builder.get_snuba_queries() assert fields_in_entities == { "metrics_counters": [ (None, "session.errored_preaggregated"), (None, "session.crash_free_rate"), (None, "session.all"), ], "metrics_sets": [ (None, "session.errored_set"), ], } for key in ("totals", "series"): groupby = [] if key == "totals" else [Column("bucketed_time")] assert snuba_queries["metrics_counters"][key] == (Query( dataset="metrics", match=Entity("metrics_counters"), select=[ errored_preaggr_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.errored_preaggregated", ), percentage( crashed_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.crashed", ), all_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.all", ), alias="session.crash_free_rate", ), all_sessions( metric_ids=[resolve_weak("sentry.sessions.session")], alias="session.all"), ], groupby=groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 8, 24, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("metric_id"), Op.IN, [resolve_weak("sentry.sessions.session")]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), )) assert snuba_queries["metrics_sets"][key] == (Query( dataset="metrics", match=Entity("metrics_sets"), select=[ sessions_errored_set( metric_ids=[resolve_weak("sentry.sessions.session.error")], alias="session.errored_set", ), ], groupby=groupby, where=[ Condition(Column("org_id"), Op.EQ, 1), Condition(Column("project_id"), Op.IN, [1]), Condition(Column("timestamp"), Op.GTE, datetime(2021, 8, 24, 0, tzinfo=pytz.utc)), Condition(Column("timestamp"), Op.LT, datetime(2021, 8, 26, 0, tzinfo=pytz.utc)), Condition(Column("metric_id"), Op.IN, [resolve_weak("sentry.sessions.session.error")]), ], limit=Limit(MAX_POINTS), offset=Offset(0), granularity=Granularity(query_definition.rollup), ))