示例#1
0
    def get_result(self, limit, cursor=None):
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        # Request 1 more than limit so we can tell if there is another page
        data = self.data_fn(offset=offset, limit=limit + 1)

        if isinstance(data, list):
            has_more = len(data) == limit + 1
            if has_more:
                data.pop()
        elif isinstance(data.get("data"), list):
            has_more = len(data["data"]) == limit + 1
            if has_more:
                data["data"].pop()
        else:
            raise NotImplementedError

        # Since we are not issuing ranged queries, our cursors always have
        # `value=0` (ie. all rows have the same value), and so offset naturally
        # becomes the absolute row offset from the beginning of the entire
        # dataset, which is the same meaning as SQLs `OFFSET`.
        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
示例#2
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        queryset = self.queryset
        if self.key:
            queryset = queryset.order_by(*self.key)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        stop = offset + (cursor.value or limit) + 1

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        results = list(queryset[offset:stop])
        if cursor.value != limit:
            results = results[-(limit + 1):]

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        results = list(results[:limit])
        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
示例#3
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        queryset = self.queryset
        if self.desc:
            queryset = queryset.order_by('-{}'.format(self.key))
        else:
            queryset = queryset.order_by(self.key)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        stop = offset + (cursor.value or limit) + 1

        results = list(queryset[offset:stop])
        if cursor.value != limit:
            results = results[-(limit + 1):]

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        return CursorResult(
            results=results[:limit],
            next=next_cursor,
            prev=prev_cursor,
        )
示例#4
0
    def get_result(self, limit, cursor=None, count_hits=False):
        limit = min(limit, self.max_limit)

        if cursor is None:
            cursor = Cursor(0, 0, False)

        assert cursor.offset > -1

        if cursor.value == 0:
            position = len(self.scores) if cursor.is_prev else 0
        else:
            position = self.search(cursor.value)

        position = position + cursor.offset

        if cursor.is_prev:
            # TODO: It might make sense to ensure that this hi value is at
            # least the length of the page + 1 if we want to ensure we return a
            # full page of results when paginating backwards while data is
            # being mutated.
            hi = min(position, len(self.scores))
            lo = max(hi - limit, 0)
        else:
            lo = max(position, 0)
            hi = min(lo + limit, len(self.scores))

        if self.scores:
            prev_score = self.scores[min(lo, len(self.scores) - 1)]
            prev_cursor = Cursor(
                prev_score,
                lo - self.search(prev_score, hi=lo),
                True,
                True if lo > 0 else False,
            )

            next_score = self.scores[min(hi, len(self.scores) - 1)]
            next_cursor = Cursor(
                next_score,
                hi - self.search(next_score, hi=hi),
                False,
                True if hi < len(self.scores) else False,
            )
        else:
            prev_cursor = Cursor(cursor.value, cursor.offset, True, False)
            next_cursor = Cursor(cursor.value, cursor.offset, False, False)

        results = self.values[lo:hi]
        if self.on_results:
            results = self.on_results(results)

        return CursorResult(
            results,
            prev=prev_cursor,
            next=next_cursor,
            hits=min(len(self.scores), MAX_HITS_LIMIT) if count_hits else None,
            max_hits=MAX_HITS_LIMIT if count_hits else None,
        )
示例#5
0
    def get_result(self, limit=100, cursor=None):
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        limit = cursor.value or limit

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        primary_results = self.data_load_func(offset=offset,
                                              limit=self.max_limit + 1)

        queryset = self.apply_to_queryset(self.queryset, primary_results)

        mapping = {}
        for model in queryset:
            mapping[self.key_from_model(model)] = model

        results = []
        for row in primary_results:
            model = mapping.get(self.key_from_data(row))
            if model is not None:
                results.append(model)

        if self.queryset_load_func and self.data_count_func and len(
                results) < limit:
            # If we hit the end of the results from the data load func, check whether there are
            # any additional results in the queryset_load_func, if one is provided.
            extra_limit = limit - len(results) + 1
            total_data_count = self.data_count_func()
            total_offset = offset + len(results)
            qs_offset = max(0, total_offset - total_data_count)
            qs_results = self.queryset_load_func(self.queryset, total_offset,
                                                 qs_offset, extra_limit)
            results.extend(qs_results)
            has_more = len(qs_results) == extra_limit
        else:
            has_more = len(primary_results) > limit

        results = results[:limit]
        next_cursor = Cursor(limit, page + 1, False, has_more)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
示例#6
0
    def test_negative_offset(self):
        self.create_user("*****@*****.**")
        queryset = User.objects.all()
        paginator = OffsetPaginator(queryset)
        cursor = Cursor(10, -1)
        with self.assertRaises(BadPaginationError):
            paginator.get_result(cursor=cursor)

        cursor = Cursor(-10, 1)
        with self.assertRaises(BadPaginationError):
            paginator.get_result(cursor=cursor)
    def test_empty_results(self):
        paginator = SequencePaginator([])
        result = paginator.get_result(5)
        assert list(result) == []
        assert result.prev == Cursor(0, 0, True, False)
        assert result.next == Cursor(0, 0, False, False)

        paginator = SequencePaginator([], reverse=True)
        result = paginator.get_result(5)
        assert list(result) == []
        assert result.prev == Cursor(0, 0, True, False)
        assert result.next == Cursor(0, 0, False, False)
示例#8
0
    def get_result(self, limit: int, cursor: Optional[Cursor] = None) -> CursorResult:
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        # Request 1 more than limit so we can tell if there is another page
        data = self.data_fn(offset, limit + 1)

        has_more = any(len(result["examples"]) == limit + 1 for result in data)
        for result in data:
            result["examples"] = result["examples"][:limit]

        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
示例#9
0
    def get_result(self, limit=100, cursor=None):
        # cursors are:
        #   (identifier(integer), row offset, is_prev)
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        queryset = self._get_results_from_qs(cursor.value, cursor.is_prev)

        # this effectively gets us the before post, and the current (after) post
        # every time
        if cursor.is_prev:
            stop = cursor.offset + limit + 2
        else:
            stop = cursor.offset + limit + 1

        results = list(queryset[cursor.offset:stop])

        if cursor.is_prev:
            results = results[1:][::-1]

        return build_cursor(
            results=results,
            limit=limit,
            cursor=cursor,
            key=self._get_item_key,
        )
示例#10
0
    def get_result(self, limit=100, cursor=None):
        # cursors are:
        #   (identifier(integer), row offset, is_prev)
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        if cursor.value:
            cursor_value = self.value_from_cursor(cursor)
        else:
            cursor_value = 0

        queryset = self._build_queryset(cursor_value, cursor.is_prev)

        # TODO(dcramer): this does not yet work correctly for ``is_prev`` when
        # the key is not unique
        offset = cursor.offset
        if cursor.is_prev:
            offset += 1
        stop = offset + limit + 1
        results = list(queryset[offset:stop])
        if cursor.is_prev:
            results.reverse()

        return build_cursor(
            results=results,
            limit=limit,
            cursor=cursor,
            key=self.get_item_key,
        )
    def test_histogram_pagination(self):
        self.setup_transactions()
        request = {
            "aggregateColumn": "transaction.duration",
            "per_page": 3,
            "numBucketsPerKey": 2,
            "tagKey": "color",
        }

        data_response = self.do_request(
            request,
            feature_list=self.feature_list +
            ("organizations:performance-tag-page", ))

        tag_data = data_response.data["tags"]["data"]
        assert len(tag_data) == 3

        request["cursor"] = Cursor(0, 3)

        data_response = self.do_request(
            request,
            feature_list=self.feature_list +
            ("organizations:performance-tag-page", ))

        tag_data = data_response.data["tags"]["data"]
        assert len(tag_data) == 1
示例#12
0
    def get_result(self, limit, cursor=None):
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        data = self.data_fn(offset=offset, limit=limit + 1)

        if isinstance(data.get("groups"), list):
            has_more = len(data["groups"]) == limit + 1
            if has_more:
                data["groups"].pop()
        else:
            raise NotImplementedError

        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
示例#13
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if limit <= 0:
            raise BadPaginationError("Limit must be positive")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        results = []
        # note: we shouldn't use itertools.islice(itertools.chain.from_iterable(self.sources))
        # because source may be a QuerySet which is much more efficient to slice directly
        for source in self.sources:
            # Get an additional item so we can check for a next page.
            remaining = limit - len(results) + 1
            results.extend(source[offset:offset + remaining])
            # don't do offset = max(0, offset - len(source)) because len(source) may be expensive
            if len(results) == 0:
                offset -= len(source)
            else:
                offset = 0
            if len(results) > limit:
                assert len(results) == limit + 1
                break

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        if next_cursor.has_results:
            results.pop()

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
    def test_simple(self):
        def data_fn(offset=None, limit=None):
            return [i for i in range(offset, limit)]

        paginator = GenericOffsetPaginator(data_fn=data_fn)

        result = paginator.get_result(5)

        assert list(result) == [0, 1, 2, 3, 4]
        assert result.prev == Cursor(0, 0, True, False)
        assert result.next == Cursor(0, 5, False, True)

        result2 = paginator.get_result(5, result.next)

        assert list(result2) == [5]
        assert result2.prev == Cursor(0, 0, True, True)
        assert result2.next == Cursor(0, 10, False, False)
示例#15
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        results = []
        # Get an addition item so we can check for a next page.
        remaining = limit + 1
        for source in self.sources:
            source_results = list(source[offset:remaining])
            results.extend(source_results)
            result_count = len(results)
            if result_count == 0 and result_count < remaining:
                # Advance the offset based on the rows we skipped.
                offset = offset - len(source)
            elif result_count > 0 and result_count < remaining:
                # Start at the beginning of the next source
                offset = 0
                remaining = remaining - result_count
            elif result_count >= limit:
                break

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        if next_cursor.has_results:
            results.pop()

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
示例#16
0
 def test_results_from_last_source(self):
     sources = [[1, 2, 3, 4], [5, 6, 7, 8]]
     cursor = Cursor(3, 2)
     paginator = self.cls(sources=sources)
     result = paginator.get_result(limit=3, cursor=cursor)
     assert len(result.results) == 2
     assert result.results == [7, 8]
     assert result.next.has_results is False
     assert result.prev.has_results
示例#17
0
 def test_results_from_two_sources(self):
     sources = [[1, 2, 3, 4], [5, 6, 7, 8]]
     cursor = Cursor(3, 1)
     paginator = self.cls(sources=sources)
     result = paginator.get_result(limit=3, cursor=cursor)
     assert len(result.results) == 3
     assert result.results == [4, 5, 6]
     assert result.next.has_results
     assert result.prev.has_results
    def test_orderby_percentile_with_pagination(self):
        metric_id = indexer.record("sentry.transactions.measurements.lcp")
        tag1 = indexer.record("tag1")
        value1 = indexer.record("value1")
        value2 = indexer.record("value2")

        self._send_buckets(
            [{
                "org_id": self.organization.id,
                "project_id": self.project.id,
                "metric_id": metric_id,
                "timestamp": int(time.time()),
                "type": "d",
                "value": numbers,
                "tags": {
                    tag: value
                },
                "retention_days": 90,
            } for tag, value, numbers in (
                (tag1, value1, [4, 5, 6]),
                (tag1, value2, [1, 2, 3]),
            )],
            entity="metrics_distributions",
        )

        response = self.get_success_response(
            self.organization.slug,
            field="p50(sentry.transactions.measurements.lcp)",
            statsPeriod="1h",
            interval="1h",
            groupBy="tag1",
            orderBy="p50(sentry.transactions.measurements.lcp)",
            per_page=1,
        )
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"] == {"tag1": "value2"}
        assert groups[0]["totals"] == {
            "p50(sentry.transactions.measurements.lcp)": 2
        }

        response = self.get_success_response(
            self.organization.slug,
            field="p50(sentry.transactions.measurements.lcp)",
            statsPeriod="1h",
            interval="1h",
            groupBy="tag1",
            orderBy="p50(sentry.transactions.measurements.lcp)",
            per_page=1,
            cursor=Cursor(0, 1),
        )
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"] == {"tag1": "value1"}
        assert groups[0]["totals"] == {
            "p50(sentry.transactions.measurements.lcp)": 5
        }
    def get_result(self, limit, cursor=None):
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        # Request 1 more than limit so we can tell if there is another page
        # Use raw_limit for the histogram itself so bucket calculations are correct
        data = self.data_fn(offset=offset, limit=limit + 1, raw_limit=limit)

        if isinstance(data["tags"], list):
            has_more = len(data["tags"]) == limit + 1
            if has_more:
                data["tags"].pop()
        else:
            raise NotImplementedError

        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
示例#20
0
    def get_result(self, limit=100, cursor=None):
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        limit = (cursor.value or limit) + 1

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        primary_results = self.data_load_func(offset=offset,
                                              limit=self.max_limit)

        queryset = self.apply_to_queryset(self.queryset, primary_results)

        mapping = {}
        for model in queryset:
            mapping[self.key_from_model(model)] = model

        results = []
        for row in primary_results:
            model = mapping.get(self.key_from_data(row))
            if model is not None:
                results.append(model)

        next_cursor = Cursor(limit, page + 1, False,
                             len(primary_results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)
        results = list(results[:limit])

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
 def test_pagination_offset_without_orderby(self):
     """
     Test that ensures an exception is raised when pagination `per_page` parameter is sent
     without order by being set
     """
     response = self.get_response(
         self.organization.slug,
         field="count(sentry.transactions.measurements.lcp)",
         groupBy="transaction",
         cursor=Cursor(0, 1),
     )
     assert response.status_code == 400
     assert response.json()["detail"] == (
         "'cursor' is only supported in combination with 'orderBy'")
示例#22
0
    def get_result(self, limit=100, cursor=None, count_hits=False):
        # cursors are:
        #   (identifier(integer), row offset, is_prev)
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        if cursor.value:
            cursor_value = self.value_from_cursor(cursor)
        else:
            cursor_value = 0

        queryset = self._build_queryset(cursor_value, cursor.is_prev)

        # TODO(dcramer): this does not yet work correctly for ``is_prev`` when
        # the key is not unique
        if count_hits:
            max_hits = 1000
            hits = self.count_hits(max_hits)
        else:
            hits = None
            max_hits = None

        offset = cursor.offset
        # this effectively gets us the before row, and the current (after) row
        # every time. Do not offset if the provided cursor value was empty since
        # there is nothing to traverse past.
        if cursor.is_prev and cursor.value:
            offset += 1

        # The + 1 is needed so we can decide in the ResultCursor if there is
        # more on the next page.
        stop = offset + limit + 1
        results = list(queryset[offset:stop])
        if cursor.is_prev:
            results.reverse()

        return build_cursor(
            results=results,
            limit=limit,
            hits=hits,
            max_hits=max_hits,
            cursor=cursor,
            is_desc=self.desc,
            key=self.get_item_key,
        )
示例#23
0
    def test_ascending_simple(self):
        paginator = SequencePaginator([(i, i) for i in range(10)], reverse=False)

        result = paginator.get_result(5)
        assert list(result) == [0, 1, 2, 3, 4]
        assert result.prev == Cursor(0, 0, True, False)
        assert result.next == Cursor(5, 0, False, True)

        result = paginator.get_result(5, result.next)
        assert list(result) == [5, 6, 7, 8, 9]
        assert result.prev == Cursor(5, 0, True, True)
        assert result.next == Cursor(9, 1, False, False)

        result = paginator.get_result(5, result.prev)
        assert list(result) == [0, 1, 2, 3, 4]
        assert result.prev == Cursor(0, 0, True, False)
        assert result.next == Cursor(5, 0, False, True)

        result = paginator.get_result(5, Cursor(100, 0, False))
        assert list(result) == []
        assert result.prev == Cursor(9, 1, True, True)
        assert result.next == Cursor(9, 1, False, False)
示例#24
0
    def test_descending_simple(self):
        paginator = SequencePaginator([(i, i) for i in range(10)], reverse=True)

        result = paginator.get_result(5)
        assert list(result) == [9, 8, 7, 6, 5]
        assert result.prev == Cursor(9, 0, True, False)
        assert result.next == Cursor(4, 0, False, True)

        result = paginator.get_result(5, result.next)
        assert list(result) == [4, 3, 2, 1, 0]
        assert result.prev == Cursor(4, 0, True, True)
        assert result.next == Cursor(0, 1, False, False)

        result = paginator.get_result(5, result.prev)
        assert list(result) == [9, 8, 7, 6, 5]
        assert result.prev == Cursor(9, 0, True, False)
        assert result.next == Cursor(4, 0, False, True)

        result = paginator.get_result(5, Cursor(-10, 0, False))
        assert list(result) == []
        assert result.prev == Cursor(0, 1, True, True)
        assert result.next == Cursor(0, 1, False, False)
示例#25
0
    def test_descending_repeated_scores(self):
        paginator = SequencePaginator([(1, i) for i in range(10)], reverse=True)

        result = paginator.get_result(5)
        assert list(result) == [9, 8, 7, 6, 5]
        assert result.prev == Cursor(1, 0, True, False)
        assert result.next == Cursor(1, 5, False, True)

        result = paginator.get_result(5, result.next)
        assert list(result) == [4, 3, 2, 1, 0]
        assert result.prev == Cursor(1, 5, True, True)
        assert result.next == Cursor(1, 10, False, False)

        result = paginator.get_result(5, result.prev)
        assert list(result) == [9, 8, 7, 6, 5]
        assert result.prev == Cursor(1, 0, True, False)
        assert result.next == Cursor(1, 5, False, True)

        result = paginator.get_result(5, Cursor(-10, 0, False))
        assert list(result) == []
        assert result.prev == Cursor(1, 10, True, True)
        assert result.next == Cursor(1, 10, False, False)
示例#26
0
    def get_result(self, cursor=None, limit=100):
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        if cursor.value:
            cursor_value = self.value_from_cursor(cursor)
        else:
            cursor_value = None

        limit = min(limit, MAX_LIMIT)

        offset = cursor.offset
        extra = 1
        if cursor.is_prev and cursor.value:
            extra += 1
        combined_querysets = self._build_combined_querysets(
            cursor_value, cursor.is_prev, limit, extra)

        stop = offset + limit + extra
        results = (list(combined_querysets[offset:stop]) if self.using_dates
                   else list(combined_querysets[:(limit + extra)]))

        if cursor.is_prev and cursor.value:
            # If the first result is equal to the cursor_value then it's safe to filter
            # it out, since the value hasn't been updated
            if results and self.get_item_key(results[0],
                                             for_prev=True) == cursor.value:
                results = results[1:]
            # Otherwise we may have fetched an extra row, just drop it off the end if so.
            elif len(results) == offset + limit + extra:
                results = results[:-1]

        # We reversed the results when generating the querysets, so we need to reverse back now.
        if cursor.is_prev:
            results.reverse()

        return build_cursor(
            results=results,
            cursor=cursor,
            key=self.get_item_key,
            limit=limit,
            is_desc=self.desc,
            on_results=self.on_results,
        )
示例#27
0
    def test_no_duplicates_in_pagination(self):
        sources = [[1, 2, 3, 4], [5, 6, 7, 8]]
        cursor = Cursor(3, 0)
        paginator = self.cls(sources=sources)

        first = paginator.get_result(limit=3, cursor=cursor)
        assert len(first.results) == 3
        assert first.results == [1, 2, 3]
        assert first.next.has_results

        second = paginator.get_result(limit=3, cursor=first.next)
        assert len(second.results) == 3
        assert second.results == [4, 5, 6]
        assert second.next.has_results

        third = paginator.get_result(limit=3, cursor=second.next)
        assert len(third.results) == 2
        assert third.results == [7, 8]
        assert third.next.has_results is False
示例#28
0
    def get_result(self, limit=100, cursor=None, count_hits=False):
        # cursors are:
        #   (identifier(integer), row offset, is_prev)
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        if cursor.value:
            cursor_value = self.value_from_cursor(cursor)
        else:
            cursor_value = 0

        queryset = self._build_queryset(cursor_value, cursor.is_prev)

        # TODO(dcramer): this does not yet work correctly for ``is_prev`` when
        # the key is not unique
        if count_hits:
            max_hits = 1000
            hits = self.count_hits(max_hits)
        else:
            hits = None
            max_hits = None

        offset = cursor.offset
        # this effectively gets us the before row, and the current (after) row
        # every time
        if cursor.is_prev:
            offset += 1
        stop = offset + limit + 1
        results = list(queryset[offset:stop])
        if cursor.is_prev:
            results.reverse()

        return build_cursor(
            results=results,
            limit=limit,
            hits=hits,
            max_hits=max_hits,
            cursor=cursor,
            key=self.get_item_key,
        )
示例#29
0
    def get_result(self,
                   limit=100,
                   cursor=None,
                   count_hits=False,
                   known_hits=None,
                   max_hits=None):
        # cursors are:
        #   (identifier(integer), row offset, is_prev)
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        if cursor.value:
            cursor_value = self.value_from_cursor(cursor)
        else:
            cursor_value = 0

        queryset = self.build_queryset(cursor_value, cursor.is_prev)

        # TODO(dcramer): this does not yet work correctly for ``is_prev`` when
        # the key is not unique

        # max_hits can be limited to speed up the query
        if max_hits is None:
            max_hits = MAX_HITS_LIMIT
        if count_hits:
            hits = self.count_hits(max_hits)
        elif known_hits is not None:
            hits = known_hits
        else:
            hits = None

        offset = cursor.offset
        # The extra amount is needed so we can decide in the ResultCursor if there is
        # more on the next page.
        extra = 1
        # this effectively gets us the before row, and the current (after) row
        # every time. Do not offset if the provided cursor value was empty since
        # there is nothing to traverse past.
        # We need to actually fetch the before row so that we can compare it to the
        # cursor value. This allows us to handle an edge case where the first row
        # for a given cursor is the same row that generated the cursor on the
        # previous page, but we want to display since it has had its its sort value
        # updated.
        if cursor.is_prev and cursor.value:
            extra += 1

        stop = offset + limit + extra
        results = list(queryset[offset:stop])

        if cursor.is_prev and cursor.value:
            # If the first result is equal to the cursor_value then it's safe to filter
            # it out, since the value hasn't been updated
            if results and self.get_item_key(results[0],
                                             for_prev=True) == cursor.value:
                results = results[1:]
            # Otherwise we may have fetched an extra row, just drop it off the end if so.
            elif len(results) == offset + limit + extra:
                results = results[:-1]

        if cursor.is_prev:
            results.reverse()

        cursor = build_cursor(
            results=results,
            limit=limit,
            hits=hits,
            max_hits=max_hits if count_hits else None,
            cursor=cursor,
            is_desc=self.desc,
            key=self.get_item_key,
            on_results=self.on_results,
        )

        # Note that this filter is just to remove unwanted rows from the result set.
        # This will reduce the number of rows returned rather than fill a full page,
        # and could result in an empty page being returned
        if self.post_query_filter:
            cursor.results = self.post_query_filter(cursor.results)

        return cursor
    def test_orderby_percentile_with_many_fields_multiple_entities_with_paginator(
            self):
        """
        Test that ensures when transactions are ordered correctly when all the fields requested
        are from multiple entities
        """
        transaction_id = indexer.record("transaction")
        transaction_1 = indexer.record("/foo/")
        transaction_2 = indexer.record("/bar/")

        self._send_buckets(
            [{
                "org_id":
                self.organization.id,
                "project_id":
                self.project.id,
                "metric_id":
                indexer.record("sentry.transactions.measurements.lcp"),
                "timestamp":
                int(time.time()),
                "type":
                "d",
                "value":
                numbers,
                "tags": {
                    tag: value
                },
                "retention_days":
                90,
            } for tag, value, numbers in (
                (transaction_id, transaction_1, [10, 11, 12]),
                (transaction_id, transaction_2, [4, 5, 6]),
            )],
            entity="metrics_distributions",
        )
        user_metric = indexer.record("sentry.transactions.user")
        user_ts = time.time()
        for ts, ranges in [
            (int(user_ts), [range(4, 5), range(6, 11)]),
            (int(user_ts // 60 - 15) * 60, [range(3), range(6)]),
        ]:
            self._send_buckets(
                [{
                    "org_id": self.organization.id,
                    "project_id": self.project.id,
                    "metric_id": user_metric,
                    "timestamp": ts,
                    "tags": {
                        tag: value
                    },
                    "type": "s",
                    "value": numbers,
                    "retention_days": 90,
                } for tag, value, numbers in (
                    (transaction_id, transaction_1, list(ranges[0])),
                    (transaction_id, transaction_2, list(ranges[1])),
                )],
                entity="metrics_sets",
            )

        request_args = {
            "field": [
                "p50(sentry.transactions.measurements.lcp)",
                "count_unique(sentry.transactions.user)",
            ],
            "statsPeriod":
            "1h",
            "interval":
            "10m",
            "datasource":
            "snuba",
            "groupBy": ["project_id", "transaction"],
            "orderBy":
            "p50(sentry.transactions.measurements.lcp)",
            "per_page":
            1,
        }

        response = self.get_success_response(self.organization.slug,
                                             **request_args)
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"]["transaction"] == "/bar/"
        assert groups[0]["totals"] == {
            "count_unique(sentry.transactions.user)": 11,
            "p50(sentry.transactions.measurements.lcp)": 5.0,
        }
        assert groups[0]["series"] == {
            "p50(sentry.transactions.measurements.lcp)":
            [None, None, None, None, None, 5.0],
            "count_unique(sentry.transactions.user)": [0, 0, 0, 6, 0, 5],
        }

        request_args["cursor"] = Cursor(0, 1)

        response = self.get_success_response(self.organization.slug,
                                             **request_args)
        groups = response.data["groups"]
        assert len(groups) == 1
        assert groups[0]["by"]["transaction"] == "/foo/"
        assert groups[0]["totals"] == {
            "count_unique(sentry.transactions.user)": 4,
            "p50(sentry.transactions.measurements.lcp)": 11.0,
        }
        assert groups[0]["series"] == {
            "p50(sentry.transactions.measurements.lcp)":
            [None, None, None, None, None, 11.0],
            "count_unique(sentry.transactions.user)": [0, 0, 0, 3, 0, 1],
        }