コード例 #1
0
 def test_non_existing_filter(self):
     indexer.record("bar")
     response = self.get_response(self.project.organization.slug,
                                  "bar",
                                  metric="bad")
     assert response.status_code == 200
     assert response.data == []
コード例 #2
0
    def test_generate_metric_ids(self):
        org_id = self.project.organization_id
        session_metric_id = indexer.record(org_id, "sentry.sessions.session")
        session_error_metric_id = indexer.record(
            org_id, "sentry.sessions.session.error")
        session_user_id = indexer.record(org_id, "sentry.sessions.user")

        for derived_metric_name in [
                "session.all",
                "session.crashed",
                "session.abnormal",
                "session.crash_free_rate",
                "session.errored_preaggregated",
        ]:
            assert MOCKED_DERIVED_METRICS[
                derived_metric_name].generate_metric_ids() == {
                    session_metric_id
                }
        for derived_metric_name in [
                "session.all_user",
                "session.crashed_user",
                "session.abnormal_user",
                "session.crash_free_user_rate",
                "session.crashed_and_abnormal_user",
                "session.errored_user_all",
                "session.errored_user",
        ]:
            assert MOCKED_DERIVED_METRICS[
                derived_metric_name].generate_metric_ids() == {
                    session_user_id
                }
        assert MOCKED_DERIVED_METRICS[
            "session.errored_set"].generate_metric_ids() == {
                session_error_metric_id
            }
コード例 #3
0
    def test_unknown_groupby(self):
        """Use a tag name in groupby that does not exist in the indexer"""
        # Insert session metrics:
        self.store_session(self.build_session(project_id=self.project.id))

        # "foo" is known by indexer, "bar" is not
        indexer.record("foo")

        response = self.get_success_response(
            self.organization.slug,
            field="sum(sentry.sessions.session)",
            statsPeriod="1h",
            interval="1h",
            groupBy=["session.status", "foo"],
        )

        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"] == {"session.status": "init", "foo": None}

        response = self.get_response(
            self.organization.slug,
            field="sum(sentry.sessions.session)",
            statsPeriod="1h",
            interval="1h",
            groupBy=["session.status", "bar"],
        )
        assert response.status_code == 400
コード例 #4
0
ファイル: test_tasks.py プロジェクト: wangjianweiwei/sentry
 def test_simple_users_for_metrics(self):
     for tag in [
             SessionMetricKey.USER.value, "session.status", "crashed",
             "init"
     ]:
         indexer.record(tag)
     entity_subscription = get_entity_subscription_for_dataset(
         dataset=QueryDatasets.METRICS,
         time_window=3600,
         aggregate=
         "percentage(users_crashed, users) AS _crash_rate_alert_aggregate",
         extra_fields={"org_id": self.organization.id},
     )
     snuba_filter = build_snuba_filter(
         entity_subscription,
         query="",
         environment=None,
     )
     org_id = self.organization.id
     session_status = tag_key(org_id, "session.status")
     session_status_tag_values = get_tag_values_list(
         org_id, ["crashed", "init"])
     assert snuba_filter
     assert snuba_filter.aggregations == [["uniq(value)", None, "value"]]
     assert snuba_filter.conditions == [
         ["metric_id", "=",
          metric_id(org_id, SessionMetricKey.USER)],
         [session_status, "IN", session_status_tag_values],
     ]
     assert snuba_filter.groupby == [session_status]
コード例 #5
0
ファイル: test_tasks.py プロジェクト: wangjianweiwei/sentry
    def test_granularity_on_metrics_crash_rate_alerts(self):
        for tag in [
                SessionMetricKey.SESSION.value, SessionMetricKey.USER.value,
                "session.status"
        ]:
            indexer.record(tag)
        for (time_window, expected_granularity) in [
            (30, 10),
            (90, 60),
            (5 * 60, 3600),
            (25 * 60, 3600 * 24),
        ]:
            for idx, aggregate in enumerate(["sessions", "users"]):
                sub = self.create_subscription(
                    dataset=QueryDatasets.METRICS,
                    aggregate=f"percentage({aggregate}_crashed, {aggregate}) AS "
                    f"_crash_rate_alert_aggregate",
                    time_window=int(
                        timedelta(minutes=time_window).total_seconds()),
                    status=QuerySubscription.Status.CREATING,
                )
                with patch("sentry.snuba.tasks._snuba_pool") as pool:
                    resp = Mock()
                    resp.status = 202
                    resp.data = json.dumps(
                        {"subscription_id": "123" + f"{time_window + idx}"})
                    pool.urlopen.return_value = resp

                    create_subscription_in_snuba(sub.id)
                    request_body = json.loads(
                        pool.urlopen.call_args[1]["body"])
                    assert request_body["granularity"] == expected_granularity
コード例 #6
0
    def test_orderby_percentile_with_many_fields_one_entity_no_data(self):
        """
        Test that ensures that when metrics data is available then an empty response is returned
        gracefully
        """
        for metric in [
                "sentry.transactions.measurements.lcp",
                "sentry.transactions.measurements.fcp",
                "transaction",
        ]:
            indexer.record(metric)

        response = self.get_success_response(
            self.organization.slug,
            field=[
                "p50(sentry.transactions.measurements.lcp)",
                "p50(sentry.transactions.measurements.fcp)",
            ],
            statsPeriod="1h",
            interval="1h",
            groupBy=["project_id", "transaction"],
            orderBy="p50(sentry.transactions.measurements.lcp)",
        )
        groups = response.data["groups"]
        assert len(groups) == 0
コード例 #7
0
 def test_simple_sessions_for_metrics(self):
     org_id = self.organization.id
     for tag in [
             SessionMetricKey.SESSION.value, "session.status", "crashed",
             "init"
     ]:
         indexer.record(org_id, tag)
     entity_subscription = get_entity_subscription_for_dataset(
         dataset=QueryDatasets.METRICS,
         time_window=3600,
         aggregate=
         "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate",
         extra_fields={"org_id": org_id},
     )
     snuba_filter = build_snuba_filter(
         entity_subscription,
         query="",
         environment=None,
     )
     session_status = resolve_tag_key("session.status")
     session_status_tag_values = resolve_many_weak(["crashed", "init"])
     assert snuba_filter
     assert snuba_filter.aggregations == [["sum(value)", None, "value"]]
     assert snuba_filter.conditions == [
         ["metric_id", "=",
          resolve(SessionMetricKey.SESSION.value)],
         [session_status, "IN", session_status_tag_values],
     ]
     assert snuba_filter.groupby == [session_status]
コード例 #8
0
    def test_orderby_percentile_with_pagination(self):
        metric_id = indexer.record("sentry.transactions.measurements.lcp")
        tag1 = indexer.record("tag1")
        value1 = indexer.record("value1")
        value2 = indexer.record("value2")

        self._send_buckets(
            [{
                "org_id": self.organization.id,
                "project_id": self.project.id,
                "metric_id": metric_id,
                "timestamp": int(time.time()),
                "type": "d",
                "value": numbers,
                "tags": {
                    tag: value
                },
                "retention_days": 90,
            } for tag, value, numbers in (
                (tag1, value1, [4, 5, 6]),
                (tag1, value2, [1, 2, 3]),
            )],
            entity="metrics_distributions",
        )

        response = self.get_success_response(
            self.organization.slug,
            field="p50(sentry.transactions.measurements.lcp)",
            statsPeriod="1h",
            interval="1h",
            groupBy="tag1",
            orderBy="p50(sentry.transactions.measurements.lcp)",
            per_page=1,
        )
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"] == {"tag1": "value2"}
        assert groups[0]["totals"] == {
            "p50(sentry.transactions.measurements.lcp)": 2
        }

        response = self.get_success_response(
            self.organization.slug,
            field="p50(sentry.transactions.measurements.lcp)",
            statsPeriod="1h",
            interval="1h",
            groupBy="tag1",
            orderBy="p50(sentry.transactions.measurements.lcp)",
            per_page=1,
            cursor=Cursor(0, 1),
        )
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"] == {"tag1": "value1"}
        assert groups[0]["totals"] == {
            "p50(sentry.transactions.measurements.lcp)": 5
        }
コード例 #9
0
    def test_groupby_single(self):
        indexer.record("environment")
        response = self.get_response(
            self.project.organization.slug,
            field="sum(sentry.sessions.session)",
            groupBy="environment",
        )

        assert response.status_code == 200
コード例 #10
0
 def setUp(self) -> None:
     super().setUp()
     for tag in [
             SessionMetricKey.SESSION.value,
             SessionMetricKey.USER.value,
             "session.status",
             "init",
             "crashed",
     ]:
         indexer.record(self.organization.id, tag)
コード例 #11
0
 def setUp(self):
     super().setUp()
     self.valid_alert_rule["dataset"] = Dataset.Metrics.value
     for tag in [
             SessionMetricKey.SESSION.value,
             SessionMetricKey.USER.value,
             "session.status",
             "init",
             "crashed",
     ]:
         indexer.record(tag)
コード例 #12
0
 def test_same_entity_multiple_metric_ids(self):
     """
     Test that ensures that if a derived metric is defined with constituent metrics that
     belong to the same entity but have different ids, then we are able to correctly return
     its detail info
     """
     self.store_session(
         self.build_session(
             project_id=self.project.id,
             started=(time.time() // 60) * 60,
             status="ok",
             release="[email protected]",
             errors=2,
         )
     )
     response = self.get_response(
         self.organization.slug,
         "derived_metric.multiple_metrics",
     )
     assert response.status_code == 404
     assert response.json()["detail"] == (
         "Not all the requested metrics or the constituent metrics in "
         "['derived_metric.multiple_metrics'] have data in the dataset"
     )
     org_id = self.organization.id
     self._send_buckets(
         [
             {
                 "org_id": org_id,
                 "project_id": self.project.id,
                 "metric_id": indexer.record(org_id, "metric_foo_doe"),
                 "timestamp": int(time.time()),
                 "tags": {
                     resolve_weak("release"): indexer.record(org_id, "foo"),
                 },
                 "type": "c",
                 "value": 1,
                 "retention_days": 90,
             },
         ],
         entity="metrics_counters",
     )
     response = self.get_success_response(
         self.organization.slug,
         "derived_metric.multiple_metrics",
     )
     assert response.data == {
         "name": "derived_metric.multiple_metrics",
         "type": "numeric",
         "operations": [],
         "unit": "percentage",
         "tags": [{"key": "release"}],
     }
コード例 #13
0
    def test_orderby_percentile(self):
        # Record some strings
        metric_id = indexer.record("sentry.transactions.measurements.lcp")
        tag1 = indexer.record("tag1")
        value1 = indexer.record("value1")
        value2 = indexer.record("value2")

        self._send_buckets(
            [{
                "org_id": self.organization.id,
                "project_id": self.project.id,
                "metric_id": metric_id,
                "timestamp": int(time.time()),
                "type": "d",
                "value": numbers,
                "tags": {
                    tag: value
                },
                "retention_days": 90,
            } for tag, value, numbers in (
                (tag1, value1, [4, 5, 6]),
                (tag1, value2, [1, 2, 3]),
            )],
            entity="metrics_distributions",
        )

        response = self.get_success_response(
            self.organization.slug,
            field="p50(sentry.transactions.measurements.lcp)",
            statsPeriod="1h",
            interval="1h",
            groupBy="tag1",
            orderBy="p50(sentry.transactions.measurements.lcp)",
        )
        groups = response.data["groups"]
        assert len(groups) == 2

        expected = [
            ("value2",
             2),  # value2 comes first because it has the smaller median
            ("value1", 5),
        ]
        for (expected_tag_value,
             expected_count), group in zip(expected, groups):
            # With orderBy, you only get totals:
            assert group["by"] == {"tag1": expected_tag_value}
            assert group["totals"] == {
                "p50(sentry.transactions.measurements.lcp)": expected_count
            }
            assert group["series"] == {
                "p50(sentry.transactions.measurements.lcp)": [expected_count]
            }
コード例 #14
0
 def test_valid_filter(self):
     for tag in ("release", "environment"):
         indexer.record(tag)
     query = "release:[email protected]"
     response = self.get_success_response(
         self.project.organization.slug,
         field="sum(sentry.sessions.session)",
         groupBy="environment",
         query=query,
     )
     assert response.data.keys() == {
         "start", "end", "query", "intervals", "groups"
     }
コード例 #15
0
 def test_crash_free_rate_when_no_session_metrics_data_with_orderby_and_groupby(
         self):
     indexer.record("release")
     response = self.get_success_response(
         self.organization.slug,
         project=[self.project.id],
         field=["session.crash_free_rate", "sum(sentry.sessions.session)"],
         statsPeriod="6m",
         interval="6m",
         groupBy=["release"],
         orderBy="-session.crash_free_rate",
     )
     assert response.data["groups"] == []
コード例 #16
0
ファイル: test_fields.py プロジェクト: littlekign/sentry
    def test_generate_metric_ids(self):
        session_metric_id = indexer.record("sentry.sessions.session")
        session_error_metric_id = indexer.record("sentry.sessions.session.error")

        for derived_metric_name in [
            "session.init",
            "session.crashed",
            "session.crash_free_rate",
            "session.errored_preaggregated",
        ]:
            assert DERIVED_METRICS[derived_metric_name].generate_metric_ids() == {session_metric_id}
        assert DERIVED_METRICS["session.errored_set"].generate_metric_ids() == {
            session_error_metric_id
        }
コード例 #17
0
 def test_no_limit_with_series(self):
     """Pagination args do not apply to series"""
     indexer.record("session.status")
     for minute in range(4):
         self.store_session(
             self.build_session(project_id=self.project.id,
                                started=(time.time() // 60 - minute) * 60))
     response = self.get_success_response(
         self.organization.slug,
         field="sum(sentry.sessions.session)",
         statsPeriod="4m",
         interval="1m",
     )
     group = response.data["groups"][0]
     assert group["totals"]["sum(sentry.sessions.session)"] == 4
     assert group["series"]["sum(sentry.sessions.session)"] == [1, 1, 1, 1]
コード例 #18
0
    def test_metric_tag_details(self):
        response = self.get_success_response(
            self.organization.slug,
            "tag1",
        )
        assert response.data == [
            {
                "key": "tag1",
                "value": "value1"
            },
            {
                "key": "tag1",
                "value": "value2"
            },
        ]

        # When single metric_name is supplied, get only tag values for that metric:
        response = self.get_success_response(
            self.organization.slug,
            "tag1",
            metric=["metric1"],
        )
        assert response.data == [
            {
                "key": "tag1",
                "value": "value1"
            },
        ]

        # When metric names are supplied, get intersection of tags:
        response = self.get_success_response(
            self.organization.slug,
            "tag1",
            metric=["metric1", "metric2"],
        )
        assert response.data == []

        # We need to ensure that if the tag is present in the indexer but has no values in the
        # dataset, the intersection of it and other tags should not yield any results
        indexer.record("random_tag")
        response = self.get_success_response(
            self.organization.slug,
            "tag1",
            metric=["metric1", "random_tag"],
        )
        assert response.data == []
コード例 #19
0
    def test_metric_details_metric_does_not_have_data(self):
        indexer.record(self.organization.id, "foo.bar")
        response = self.get_response(
            self.organization.slug,
            "foo.bar",
        )
        assert response.status_code == 404

        indexer.record(self.organization.id, "sentry.sessions.session")
        response = self.get_response(
            self.organization.slug,
            "session.crash_free_rate",
        )
        assert response.status_code == 404
        assert (
            response.data["detail"]
            == "The following metrics ['session.crash_free_rate'] do not exist in the dataset"
        )
コード例 #20
0
ファイル: test_tasks.py プロジェクト: wangjianweiwei/sentry
 def test_query_and_environment_users_metrics(self):
     env = self.create_environment(self.project, name="development")
     for tag in [
             SessionMetricKey.USER.value,
             "session.status",
             "environment",
             "development",
             "init",
             "crashed",
             "release",
             "[email protected]",
     ]:
         indexer.record(tag)
     entity_subscription = get_entity_subscription_for_dataset(
         dataset=QueryDatasets.METRICS,
         time_window=3600,
         aggregate=
         "percentage(users_crashed, users) AS _crash_rate_alert_aggregate",
         extra_fields={"org_id": self.organization.id},
     )
     snuba_filter = build_snuba_filter(
         entity_subscription,
         query="release:[email protected]",
         environment=env,
     )
     org_id = self.organization.id
     assert snuba_filter
     assert snuba_filter.aggregations == [["uniq(value)", None, "value"]]
     assert snuba_filter.groupby == [tag_key(org_id, "session.status")]
     assert snuba_filter.conditions == [
         ["metric_id", "=",
          metric_id(org_id, SessionMetricKey.USER)],
         [
             tag_key(org_id, "session.status"),
             "IN",
             get_tag_values_list(org_id, ["crashed", "init"]),
         ],
         [
             tag_key(org_id, "environment"), "=",
             tag_value(org_id, "development")
         ],
         [tag_key(org_id, "release"), "=",
          tag_value(org_id, "[email protected]")],
     ]
コード例 #21
0
    def test_generate_select_snql_of_derived_metric(self):
        """
        Test that ensures that method generate_select_statements generates the equivalent SnQL
        required to query for the instance of DerivedMetric
        """
        org_id = self.project.organization_id
        for status in ("init", "crashed"):
            indexer.record(org_id, status)
        session_ids = [indexer.record(org_id, "sentry.sessions.session")]

        derived_name_snql = {
            "session.init": (init_sessions, session_ids),
            "session.crashed": (crashed_sessions, session_ids),
            "session.errored_preaggregated":
            (errored_preaggr_sessions, session_ids),
            "session.errored_set": (
                sessions_errored_set,
                [indexer.record(org_id, "sentry.sessions.session.error")],
            ),
        }
        for metric_name, (func, metric_ids_list) in derived_name_snql.items():
            assert DERIVED_METRICS[metric_name].generate_select_statements(
                [self.project]) == [
                    func(metric_ids=metric_ids_list, alias=metric_name),
                ]

        assert DERIVED_METRICS[
            "session.crash_free_rate"].generate_select_statements(
                [self.project]) == [
                    percentage(
                        crashed_sessions(metric_ids=session_ids,
                                         alias="session.crashed"),
                        init_sessions(metric_ids=session_ids,
                                      alias="session.init"),
                        alias="session.crash_free_rate",
                    )
                ]

        # Test that ensures that even if `generate_select_statements` is called before
        # `get_entity` is called, and thereby the entity validation logic, we throw an exception
        with pytest.raises(DerivedMetricParseException):
            self.crash_free_fake.generate_select_statements([self.project])
コード例 #22
0
 def test_query_and_environment_sessions_metrics(self):
     env = self.create_environment(self.project, name="development")
     org_id = self.organization.id
     for tag in [
             SessionMetricKey.SESSION.value,
             "session.status",
             "environment",
             "development",
             "init",
             "crashed",
             "release",
             "[email protected]",
     ]:
         indexer.record(org_id, tag)
     entity_subscription = get_entity_subscription_for_dataset(
         dataset=QueryDatasets.METRICS,
         time_window=3600,
         aggregate=
         "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate",
         extra_fields={"org_id": org_id},
     )
     snuba_filter = build_snuba_filter(
         entity_subscription,
         query="release:[email protected]",
         environment=env,
     )
     assert snuba_filter
     assert snuba_filter.aggregations == [["sum(value)", None, "value"]]
     assert snuba_filter.groupby == [resolve_tag_key("session.status")]
     assert snuba_filter.conditions == [
         ["metric_id", "=",
          resolve(SessionMetricKey.SESSION.value)],
         [
             resolve_tag_key("session.status"),
             "IN",
             resolve_many_weak(["crashed", "init"]),
         ],
         [resolve_tag_key("environment"), "=",
          resolve_weak("development")],
         [resolve_tag_key("release"), "=",
          resolve_weak("[email protected]")],
     ]
コード例 #23
0
    def test_limit_with_orderby_is_overridden_by_paginator_limit(self):
        """
        Test that ensures when an `orderBy` clause is set, then the paginator limit overrides the
        `limit` parameter
        """
        metric_id = indexer.record("sentry.transactions.measurements.lcp")
        tag1 = indexer.record("tag1")
        value1 = indexer.record("value1")
        value2 = indexer.record("value2")

        self._send_buckets(
            [{
                "org_id": self.organization.id,
                "project_id": self.project.id,
                "metric_id": metric_id,
                "timestamp": int(time.time()),
                "type": "d",
                "value": numbers,
                "tags": {
                    tag: value
                },
                "retention_days": 90,
            } for tag, value, numbers in (
                (tag1, value1, [4, 5, 6]),
                (tag1, value2, [1, 2, 3]),
            )],
            entity="metrics_distributions",
        )
        response = self.get_success_response(
            self.organization.slug,
            field="p50(sentry.transactions.measurements.lcp)",
            statsPeriod="1h",
            interval="1h",
            groupBy="tag1",
            orderBy="p50(sentry.transactions.measurements.lcp)",
            per_page=1,
            limit=2,
        )
        groups = response.data["groups"]
        assert len(groups) == 1
コード例 #24
0
    def test_orderby_percentile_with_many_fields_one_entity(self):
        """
        Test that ensures when transactions are ordered correctly when all the fields requested
        are from the same entity
        """
        metric_id = indexer.record("sentry.transactions.measurements.lcp")
        metric_id_fcp = indexer.record("sentry.transactions.measurements.fcp")
        transaction_id = indexer.record("transaction")
        transaction_1 = indexer.record("/foo/")
        transaction_2 = indexer.record("/bar/")

        self._send_buckets(
            [{
                "org_id": self.organization.id,
                "project_id": self.project.id,
                "metric_id": metric_id,
                "timestamp": int(time.time()),
                "type": "d",
                "value": numbers,
                "tags": {
                    tag: value
                },
                "retention_days": 90,
            } for tag, value, numbers in (
                (transaction_id, transaction_1, [10, 11, 12]),
                (transaction_id, transaction_2, [4, 5, 6]),
            )],
            entity="metrics_distributions",
        )
        self._send_buckets(
            [{
                "org_id": self.organization.id,
                "project_id": self.project.id,
                "metric_id": metric_id_fcp,
                "timestamp": int(time.time()),
                "type": "d",
                "value": numbers,
                "tags": {
                    tag: value
                },
                "retention_days": 90,
            } for tag, value, numbers in (
                (transaction_id, transaction_1, [1, 2, 3]),
                (transaction_id, transaction_2, [13, 14, 15]),
            )],
            entity="metrics_distributions",
        )

        response = self.get_success_response(
            self.organization.slug,
            field=[
                "p50(sentry.transactions.measurements.lcp)",
                "p50(sentry.transactions.measurements.fcp)",
            ],
            statsPeriod="1h",
            interval="1h",
            groupBy=["project_id", "transaction"],
            orderBy="p50(sentry.transactions.measurements.lcp)",
        )
        groups = response.data["groups"]
        assert len(groups) == 2

        expected = [
            ("/bar/", 5.0, 14.0),
            ("/foo/", 11.0, 2.0),
        ]
        for (expected_tag_value, expected_lcp_count,
             expected_fcp_count), group in zip(expected, groups):
            # With orderBy, you only get totals:
            assert group["by"] == {
                "transaction": expected_tag_value,
                "project_id": self.project.id
            }
            assert group["totals"] == {
                "p50(sentry.transactions.measurements.lcp)":
                expected_lcp_count,
                "p50(sentry.transactions.measurements.fcp)":
                expected_fcp_count,
            }
            assert group["series"] == {
                "p50(sentry.transactions.measurements.lcp)":
                [expected_lcp_count],
                "p50(sentry.transactions.measurements.fcp)":
                [expected_fcp_count],
            }
コード例 #25
0
 def test_unknown_tag(self):
     indexer.record("bar")
     response = self.get_success_response(self.project.organization.slug,
                                          "bar")
     assert response.data == []
コード例 #26
0
 def tag_key(name):
     res = indexer.record(name)
     assert res is not None, name
     return res
コード例 #27
0
 def metric_id(name):
     res = indexer.record(name)
     assert res is not None, name
     return res
コード例 #28
0
    def test_orderby(self):
        # Record some strings
        metric_id = indexer.record("sentry.transactions.measurements.lcp")
        k_transaction = indexer.record("transaction")
        v_foo = indexer.record("/foo")
        v_bar = indexer.record("/bar")
        v_baz = indexer.record("/baz")
        k_rating = indexer.record("measurement_rating")
        v_good = indexer.record("good")
        v_meh = indexer.record("meh")
        v_poor = indexer.record("poor")

        self._send_buckets(
            [
                {
                    "org_id": self.organization.id,
                    "project_id": self.project.id,
                    "metric_id": metric_id,
                    "timestamp": int(time.time()),
                    "tags": {
                        k_transaction: v_transaction,
                        k_rating: v_rating,
                    },
                    "type": "d",
                    "value": count * [
                        123.4
                    ],  # count decides the cardinality of this distribution bucket
                    "retention_days": 90,
                } for v_transaction, count in ((v_foo, 1), (v_bar, 3),
                                               (v_baz, 2))
                for v_rating in (v_good, v_meh, v_poor)
            ],
            entity="metrics_distributions",
        )

        response = self.get_success_response(
            self.organization.slug,
            field="count(sentry.transactions.measurements.lcp)",
            query="measurement_rating:poor",
            statsPeriod="1h",
            interval="1h",
            groupBy="transaction",
            orderBy="-count(sentry.transactions.measurements.lcp)",
            per_page=2,
        )
        groups = response.data["groups"]
        assert len(groups) == 2

        expected = [
            ("/bar", 3),
            ("/baz", 2),
        ]
        for (expected_transaction,
             expected_count), group in zip(expected, groups):
            # With orderBy, you only get totals:
            assert group["by"] == {"transaction": expected_transaction}
            assert group["series"] == {
                "count(sentry.transactions.measurements.lcp)":
                [expected_count]
            }
            assert group["totals"] == {
                "count(sentry.transactions.measurements.lcp)": expected_count
            }
コード例 #29
0
    def test_orderby_percentile_with_many_fields_multiple_entities_with_paginator(
            self):
        """
        Test that ensures when transactions are ordered correctly when all the fields requested
        are from multiple entities
        """
        transaction_id = indexer.record("transaction")
        transaction_1 = indexer.record("/foo/")
        transaction_2 = indexer.record("/bar/")

        self._send_buckets(
            [{
                "org_id":
                self.organization.id,
                "project_id":
                self.project.id,
                "metric_id":
                indexer.record("sentry.transactions.measurements.lcp"),
                "timestamp":
                int(time.time()),
                "type":
                "d",
                "value":
                numbers,
                "tags": {
                    tag: value
                },
                "retention_days":
                90,
            } for tag, value, numbers in (
                (transaction_id, transaction_1, [10, 11, 12]),
                (transaction_id, transaction_2, [4, 5, 6]),
            )],
            entity="metrics_distributions",
        )
        user_metric = indexer.record("sentry.transactions.user")
        user_ts = time.time()
        for ts, ranges in [
            (int(user_ts), [range(4, 5), range(6, 11)]),
            (int(user_ts // 60 - 15) * 60, [range(3), range(6)]),
        ]:
            self._send_buckets(
                [{
                    "org_id": self.organization.id,
                    "project_id": self.project.id,
                    "metric_id": user_metric,
                    "timestamp": ts,
                    "tags": {
                        tag: value
                    },
                    "type": "s",
                    "value": numbers,
                    "retention_days": 90,
                } for tag, value, numbers in (
                    (transaction_id, transaction_1, list(ranges[0])),
                    (transaction_id, transaction_2, list(ranges[1])),
                )],
                entity="metrics_sets",
            )

        request_args = {
            "field": [
                "p50(sentry.transactions.measurements.lcp)",
                "count_unique(sentry.transactions.user)",
            ],
            "statsPeriod":
            "1h",
            "interval":
            "10m",
            "datasource":
            "snuba",
            "groupBy": ["project_id", "transaction"],
            "orderBy":
            "p50(sentry.transactions.measurements.lcp)",
            "per_page":
            1,
        }

        response = self.get_success_response(self.organization.slug,
                                             **request_args)
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"]["transaction"] == "/bar/"
        assert groups[0]["totals"] == {
            "count_unique(sentry.transactions.user)": 11,
            "p50(sentry.transactions.measurements.lcp)": 5.0,
        }
        assert groups[0]["series"] == {
            "p50(sentry.transactions.measurements.lcp)":
            [None, None, None, None, None, 5.0],
            "count_unique(sentry.transactions.user)": [0, 0, 0, 6, 0, 5],
        }

        request_args["cursor"] = Cursor(0, 1)

        response = self.get_success_response(self.organization.slug,
                                             **request_args)
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"]["transaction"] == "/foo/"
        assert groups[0]["totals"] == {
            "count_unique(sentry.transactions.user)": 4,
            "p50(sentry.transactions.measurements.lcp)": 11.0,
        }
        assert groups[0]["series"] == {
            "p50(sentry.transactions.measurements.lcp)":
            [None, None, None, None, None, 11.0],
            "count_unique(sentry.transactions.user)": [0, 0, 0, 3, 0, 1],
        }
コード例 #30
0
    def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(
            self):
        """
        Test that ensures when transactions table has null values for some fields (i.e. fields
        with a different entity than the entity of the field in the order by), then the table gets
        populated accordingly
        """
        transaction_id = indexer.record("transaction")
        transaction_1 = indexer.record("/foo/")
        transaction_2 = indexer.record("/bar/")

        self._send_buckets(
            [{
                "org_id":
                self.organization.id,
                "project_id":
                self.project.id,
                "metric_id":
                indexer.record("sentry.transactions.measurements.lcp"),
                "timestamp":
                int(time.time()),
                "type":
                "d",
                "value":
                numbers,
                "tags": {
                    tag: value
                },
                "retention_days":
                90,
            } for tag, value, numbers in (
                (transaction_id, transaction_1, [10, 11, 12]),
                (transaction_id, transaction_2, [4, 5, 6]),
            )],
            entity="metrics_distributions",
        )
        response = self.get_success_response(
            self.organization.slug,
            field=[
                "p50(sentry.transactions.measurements.lcp)",
                "count_unique(sentry.transactions.user)",
            ],
            statsPeriod="1h",
            interval="1h",
            groupBy=["project_id", "transaction"],
            orderBy="p50(sentry.transactions.measurements.lcp)",
        )
        groups = response.data["groups"]
        assert len(groups) == 2

        expected = [
            ("/bar/", 5.0, 5),
            ("/foo/", 11.0, 1),
        ]
        for (expected_tag_value, expected_lcp_count,
             users), group in zip(expected, groups):
            # With orderBy, you only get totals:
            assert group["by"] == {
                "transaction": expected_tag_value,
                "project_id": self.project.id
            }
            assert group["totals"] == {
                "count_unique(sentry.transactions.user)": 0,
                "p50(sentry.transactions.measurements.lcp)":
                expected_lcp_count,
            }
            assert group["series"] == {
                "count_unique(sentry.transactions.user)": [0],
                "p50(sentry.transactions.measurements.lcp)":
                [expected_lcp_count],
            }