Beispiel #1
0
    def test_generate_select_snql_of_derived_metric(self):
        """
        Test that ensures that method generate_select_statements generates the equivalent SnQL
        required to query for the instance of DerivedMetric
        """
        org_id = self.project.organization_id
        for status in ("init", "crashed"):
            indexer.record(org_id, status)
        session_ids = [indexer.record(org_id, "sentry.sessions.session")]

        derived_name_snql = {
            "session.init": (init_sessions, session_ids),
            "session.crashed": (crashed_sessions, session_ids),
            "session.errored_preaggregated":
            (errored_preaggr_sessions, session_ids),
            "session.errored_set": (
                sessions_errored_set,
                [indexer.record(org_id, "sentry.sessions.session.error")],
            ),
        }
        for metric_name, (func, metric_ids_list) in derived_name_snql.items():
            assert DERIVED_METRICS[metric_name].generate_select_statements(
                [self.project]) == [
                    func(metric_ids=metric_ids_list, alias=metric_name),
                ]

        assert DERIVED_METRICS[
            "session.crash_free_rate"].generate_select_statements(
                [self.project]) == [
                    percentage(
                        crashed_sessions(metric_ids=session_ids,
                                         alias="session.crashed"),
                        init_sessions(metric_ids=session_ids,
                                      alias="session.init"),
                        alias="session.crash_free_rate",
                    )
                ]

        # Test that ensures that even if `generate_select_statements` is called before
        # `get_entity` is called, and thereby the entity validation logic, we throw an exception
        with pytest.raises(DerivedMetricParseException):
            self.crash_free_fake.generate_select_statements([self.project])
Beispiel #2
0
# ToDo(ahmed): Replace the metric_names with Enums
DERIVED_METRICS = {
    derived_metric.metric_name: derived_metric
    for derived_metric in [
        SingularEntityDerivedMetric(
            metric_name="session.init",
            metrics=["sentry.sessions.session"],
            unit="sessions",
            snql=lambda *_, metric_ids, alias=None: init_sessions(metric_ids,
                                                                  alias=alias),
        ),
        SingularEntityDerivedMetric(
            metric_name="session.crashed",
            metrics=["sentry.sessions.session"],
            unit="sessions",
            snql=lambda *_, metric_ids, alias=None: crashed_sessions(
                metric_ids, alias=alias),
        ),
        SingularEntityDerivedMetric(
            metric_name="session.crash_free_rate",
            metrics=["session.crashed", "session.init"],
            unit="percentage",
            snql=lambda *args, metric_ids, alias=None: percentage(
                *args, alias="session.crash_free_rate"),
        ),
        SingularEntityDerivedMetric(
            metric_name="session.errored_preaggregated",
            metrics=["sentry.sessions.session"],
            unit="sessions",
            snql=lambda *_, metric_ids, alias=None: errored_preaggr_sessions(
                metric_ids, alias=alias),
        ),
Beispiel #3
0
def test_build_snuba_query_derived_metrics(mock_now, mock_now2, monkeypatch):
    monkeypatch.setattr("sentry.sentry_metrics.indexer.resolve",
                        MockIndexer().resolve)
    # Your typical release health query querying everything
    query_params = MultiValueDict({
        "groupBy": [],
        "field": [
            "session.errored",
            "session.crash_free_rate",
            "session.all",
        ],
        "interval": ["1d"],
        "statsPeriod": ["2d"],
    })
    query_definition = QueryDefinition(query_params)
    query_builder = SnubaQueryBuilder([PseudoProject(1, 1)], query_definition)
    snuba_queries, fields_in_entities = query_builder.get_snuba_queries()
    assert fields_in_entities == {
        "metrics_counters": [
            (None, "session.errored_preaggregated"),
            (None, "session.crash_free_rate"),
            (None, "session.all"),
        ],
        "metrics_sets": [
            (None, "session.errored_set"),
        ],
    }
    for key in ("totals", "series"):
        groupby = [] if key == "totals" else [Column("bucketed_time")]
        assert snuba_queries["metrics_counters"][key] == (Query(
            dataset="metrics",
            match=Entity("metrics_counters"),
            select=[
                errored_preaggr_sessions(
                    metric_ids=[resolve_weak("sentry.sessions.session")],
                    alias="session.errored_preaggregated",
                ),
                percentage(
                    crashed_sessions(
                        metric_ids=[resolve_weak("sentry.sessions.session")],
                        alias="session.crashed",
                    ),
                    all_sessions(
                        metric_ids=[resolve_weak("sentry.sessions.session")],
                        alias="session.all",
                    ),
                    alias="session.crash_free_rate",
                ),
                all_sessions(
                    metric_ids=[resolve_weak("sentry.sessions.session")],
                    alias="session.all"),
            ],
            groupby=groupby,
            where=[
                Condition(Column("org_id"), Op.EQ, 1),
                Condition(Column("project_id"), Op.IN, [1]),
                Condition(Column("timestamp"), Op.GTE,
                          datetime(2021, 8, 24, 0, tzinfo=pytz.utc)),
                Condition(Column("timestamp"), Op.LT,
                          datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
                Condition(Column("metric_id"), Op.IN,
                          [resolve_weak("sentry.sessions.session")]),
            ],
            limit=Limit(MAX_POINTS),
            offset=Offset(0),
            granularity=Granularity(query_definition.rollup),
        ))
        assert snuba_queries["metrics_sets"][key] == (Query(
            dataset="metrics",
            match=Entity("metrics_sets"),
            select=[
                sessions_errored_set(
                    metric_ids=[resolve_weak("sentry.sessions.session.error")],
                    alias="session.errored_set",
                ),
            ],
            groupby=groupby,
            where=[
                Condition(Column("org_id"), Op.EQ, 1),
                Condition(Column("project_id"), Op.IN, [1]),
                Condition(Column("timestamp"), Op.GTE,
                          datetime(2021, 8, 24, 0, tzinfo=pytz.utc)),
                Condition(Column("timestamp"), Op.LT,
                          datetime(2021, 8, 26, 0, tzinfo=pytz.utc)),
                Condition(Column("metric_id"), Op.IN,
                          [resolve_weak("sentry.sessions.session.error")]),
            ],
            limit=Limit(MAX_POINTS),
            offset=Offset(0),
            granularity=Granularity(query_definition.rollup),
        ))