def get_result(self, limit=100, cursor=None): # cursors are: # (identifier(integer), row offset, is_prev) if cursor is None: cursor = Cursor(0, 0, 0) if cursor.value: cursor_value = self.value_from_cursor(cursor) else: cursor_value = 0 queryset = self._build_queryset(cursor_value, cursor.is_prev) # TODO(dcramer): this does not yet work correctly for ``is_prev`` when # the key is not unique offset = cursor.offset if cursor.is_prev: offset += 1 stop = offset + limit + 1 results = list(queryset[offset:stop]) if cursor.is_prev: results.reverse() return build_cursor( results=results, limit=limit, cursor=cursor, key=self.get_item_key, )
def build_query_params_from_request(request, organization, projects, environments): query_kwargs = {"projects": projects, "sort_by": request.GET.get("sort", DEFAULT_SORT_OPTION)} limit = request.GET.get("limit") if limit: try: query_kwargs["limit"] = int(limit) except ValueError: raise ValidationError("invalid limit") # TODO: proper pagination support if request.GET.get("cursor"): try: query_kwargs["cursor"] = Cursor.from_string(request.GET.get("cursor")) except ValueError: raise ParseError(detail="Invalid cursor parameter.") query = request.GET.get("query", "is:unresolved").strip() if query: try: search_filters = convert_query_values( parse_search_query(query), projects, request.user, environments ) except InvalidSearchQuery as e: raise ValidationError( u"Your search query could not be parsed: {}".format(six.text_type(e)) ) validate_search_filter_permissions(organization, search_filters, request.user) query_kwargs["search_filters"] = search_filters return query_kwargs
def _build_query_params_from_request(self, request, project): query_kwargs = { 'projects': [project], 'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION), } limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: try: query_kwargs.update(parse_query(project, query, request.user)) except InvalidQuery as e: raise ValidationError( u'Your search query could not be parsed: {}'.format( e.message)) return query_kwargs
def _build_query_params_from_request(self, request, project): query_kwargs = { 'project': project, 'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION), } limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: try: query_kwargs.update(parse_query(project, query, request.user)) except InvalidQuery as e: raise ValidationError( u'Your search query could not be parsed: {}'.format( e.message) ) return query_kwargs
def get_result(self, limit=100, cursor=None): # cursors are: # (identifier(integer), row offset, is_prev) if cursor is None: cursor = Cursor(0, 0, 0) queryset = self._get_results_from_qs(cursor.value, cursor.is_prev) # this effectively gets us the before post, and the current (after) post # every time if cursor.is_prev: stop = cursor.offset + limit + 2 else: stop = cursor.offset + limit + 1 results = list(queryset[cursor.offset:stop]) if cursor.is_prev: results = results[1:][::-1] return build_cursor( results=results, limit=limit, cursor=cursor, key=self._get_item_key, )
def paginate( self, request, on_results=None, paginator=None, paginator_cls=Paginator, default_per_page=100, max_per_page=100, **paginator_kwargs ): assert (paginator and not paginator_kwargs) or (paginator_cls and paginator_kwargs) per_page = int(request.GET.get('per_page', default_per_page)) input_cursor = request.GET.get('cursor') if input_cursor: input_cursor = Cursor.from_string(input_cursor) else: input_cursor = None assert per_page <= max(max_per_page, default_per_page) if not paginator: paginator = paginator_cls(**paginator_kwargs) cursor_result = paginator.get_result( limit=per_page, cursor=input_cursor, ) # map results based on callback if on_results: results = on_results(cursor_result.results) else: results = cursor_result.results response = Response(results) self.add_cursor_headers(request, response, cursor_result) return response
def paginate(self, request, on_results=None, paginator_cls=Paginator, default_per_page=100, **kwargs): per_page = int(request.GET.get("per_page", default_per_page)) input_cursor = request.GET.get("cursor") if input_cursor: input_cursor = Cursor.from_string(input_cursor) else: input_cursor = None assert per_page <= max(100, default_per_page) paginator = paginator_cls(**kwargs) cursor_result = paginator.get_result(limit=per_page, cursor=input_cursor) # map results based on callback if on_results: results = on_results(cursor_result.results) headers = {} headers["Link"] = ", ".join( [ self.build_cursor_link(request, "previous", cursor_result.prev), self.build_cursor_link(request, "next", cursor_result.next), ] ) return Response(results, headers=headers)
def paginate(self, request, on_results=None, paginator_cls=Paginator, default_per_page=100, **kwargs): per_page = int(request.GET.get('per_page', default_per_page)) input_cursor = request.GET.get('cursor') if input_cursor: input_cursor = Cursor.from_string(input_cursor) else: input_cursor = None assert per_page <= max(100, default_per_page) paginator = paginator_cls(**kwargs) cursor_result = paginator.get_result( limit=per_page, cursor=input_cursor, ) # map results based on callback if on_results: results = on_results(cursor_result.results) response = Response(results) self.add_cursor_headers(request, response, cursor_result) return response
def paginate(self, request, on_results=None, paginator_cls=Paginator, **kwargs): per_page = int(request.GET.get('per_page', 100)) input_cursor = request.GET.get('cursor') if input_cursor: input_cursor = Cursor.from_string(input_cursor) assert per_page <= 100 paginator = paginator_cls(**kwargs) cursor_result = paginator.get_result( limit=per_page, cursor=input_cursor, ) # map results based on callback if on_results: results = on_results(cursor_result.results) headers = {} headers['Link'] = ', '.join([ self.build_cursor_link(request, 'previous', cursor_result.prev), self.build_cursor_link(request, 'next', cursor_result.next), ]) return Response(results, headers=headers)
def test_histogram_pagination(self): self.setup_transactions() request = { "aggregateColumn": "transaction.duration", "per_page": 3, "numBucketsPerKey": 2, "tagKey": "color", } data_response = self.do_request( request, feature_list=self.feature_list + ("organizations:performance-tag-page", )) tag_data = data_response.data["tags"]["data"] assert len(tag_data) == 3 request["cursor"] = Cursor(0, 3) data_response = self.do_request( request, feature_list=self.feature_list + ("organizations:performance-tag-page", )) tag_data = data_response.data["tags"]["data"] assert len(tag_data) == 1
def build_query_params_from_request(request, organization, projects, environments): query_kwargs = { 'projects': projects, 'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION), } limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: try: search_filters = convert_query_values( parse_search_query(query), projects, request.user, environments, ) except InvalidSearchQuery as e: raise ValidationError(u'Your search query could not be parsed: {}'.format(e.message)) validate_search_filter_permissions(organization, search_filters) query_kwargs['search_filters'] = search_filters return query_kwargs
def paginate(self, request, on_results=None, paginator_cls=Paginator, default_per_page=100, **kwargs): per_page = int(request.GET.get('per_page', default_per_page)) input_cursor = request.GET.get('cursor') if input_cursor: input_cursor = Cursor.from_string(input_cursor) else: input_cursor = None assert per_page <= max(100, default_per_page) paginator = paginator_cls(**kwargs) cursor_result = paginator.get_result( limit=per_page, cursor=input_cursor, ) # map results based on callback if on_results: results = on_results(cursor_result.results) headers = {} headers['Link'] = ', '.join([ self.build_cursor_link(request, 'previous', cursor_result.prev), self.build_cursor_link(request, 'next', cursor_result.next), ]) return Response(results, headers=headers)
def get_result(self, limit: int, cursor: Optional[Cursor] = None) -> CursorResult: assert limit > 0 offset = cursor.offset if cursor is not None else 0 # Request 1 more than limit so we can tell if there is another page data = self.data_fn(offset, limit + 1) has_more = any(len(result["examples"]) == limit + 1 for result in data) for result in data: result["examples"] = result["examples"][:limit] return CursorResult( data, prev=Cursor(0, max(0, offset - limit), True, offset > 0), next=Cursor(0, max(0, offset + limit), False, has_more), )
def get_result(self, limit=100, cursor=None): # offset is page # # value is page limit if cursor is None: cursor = Cursor(0, 0, 0) limit = min(limit, self.max_limit) page = cursor.offset offset = cursor.offset * cursor.value if self.max_offset is not None and offset >= self.max_offset: raise BadPaginationError("Pagination offset too large") if limit <= 0: raise BadPaginationError("Limit must be positive") if offset < 0: raise BadPaginationError("Pagination offset cannot be negative") results = [] # note: we shouldn't use itertools.islice(itertools.chain.from_iterable(self.sources)) # because source may be a QuerySet which is much more efficient to slice directly for source in self.sources: # Get an additional item so we can check for a next page. remaining = limit - len(results) + 1 results.extend(source[offset:offset + remaining]) # don't do offset = max(0, offset - len(source)) because len(source) may be expensive if len(results) == 0: offset -= len(source) else: offset = 0 if len(results) > limit: assert len(results) == limit + 1 break next_cursor = Cursor(limit, page + 1, False, len(results) > limit) prev_cursor = Cursor(limit, page - 1, True, page > 0) if next_cursor.has_results: results.pop() if self.on_results: results = self.on_results(results) return CursorResult(results=results, next=next_cursor, prev=prev_cursor)
def test_simple(self): def data_fn(offset=None, limit=None): return [i for i in range(offset, limit)] paginator = GenericOffsetPaginator(data_fn=data_fn) result = paginator.get_result(5) assert list(result) == [0, 1, 2, 3, 4] assert result.prev == Cursor(0, 0, True, False) assert result.next == Cursor(0, 5, False, True) result2 = paginator.get_result(5, result.next) assert list(result2) == [5] assert result2.prev == Cursor(0, 0, True, True) assert result2.next == Cursor(0, 10, False, False)
def get_result(self, limit, cursor=None): assert limit > 0 offset = cursor.offset if cursor is not None else 0 data = self.data_fn(offset=offset, limit=limit + 1) if isinstance(data.get("groups"), list): has_more = len(data["groups"]) == limit + 1 if has_more: data["groups"].pop() else: raise NotImplementedError return CursorResult( data, prev=Cursor(0, max(0, offset - limit), True, offset > 0), next=Cursor(0, max(0, offset + limit), False, has_more), )
def get(self, request, project): """ Fetches alert rules and legacy rules for an organization """ if not features.has("organizations:incidents", project.organization, actor=request.user): raise ResourceDoesNotExist cursor_string = request.GET.get( "cursor", six.binary_type(int(time.time() * 1000000)) + ":0:0") try: limit = min(100, int(request.GET.get("limit", 25))) except ValueError as e: return Response( { "detail": "Invalid input for `limit`. Error: %s" % six.text_type(e) }, status=400) cursor = Cursor.from_string(cursor_string) cursor_date = datetime.fromtimestamp( float(cursor.value) / 1000000).replace(tzinfo=timezone.utc) alert_rule_queryset = ( AlertRule.objects.fetch_for_project(project).filter( date_added__lte=cursor_date).order_by("-date_added")[:limit + 1]) legacy_rule_queryset = (Rule.objects.filter( project=project, status__in=[ RuleStatus.ACTIVE, RuleStatus.INACTIVE ]).select_related("project").filter( date_added__lte=cursor_date).order_by("-date_added")[:(limit + 1)]) combined_rules = list(alert_rule_queryset) + list(legacy_rule_queryset) combined_rules.sort(key=lambda instance: (instance.date_added, type(instance)), reverse=True) combined_rules = combined_rules[cursor.offset:cursor.offset + limit + 1] def get_item_key(item, for_prev=False): return 1000000 * float(item.date_added.strftime("%s.%f")) cursor_result = build_cursor(results=combined_rules, cursor=cursor, key=get_item_key, limit=limit, is_desc=True) results = list(cursor_result) context = serialize(results, request.user, CombinedRuleSerializer()) response = Response(context) self.add_cursor_headers(request, response, cursor_result) return response
def get_result(self, limit=100, cursor=None): # offset is page # # value is page limit if cursor is None: cursor = Cursor(0, 0, 0) limit = min(limit, self.max_limit) page = cursor.offset offset = cursor.offset * cursor.value if self.max_offset is not None and offset >= self.max_offset: raise BadPaginationError("Pagination offset too large") if offset < 0: raise BadPaginationError("Pagination offset cannot be negative") results = [] # Get an addition item so we can check for a next page. remaining = limit + 1 for source in self.sources: source_results = list(source[offset:remaining]) results.extend(source_results) result_count = len(results) if result_count == 0 and result_count < remaining: # Advance the offset based on the rows we skipped. offset = offset - len(source) elif result_count > 0 and result_count < remaining: # Start at the beginning of the next source offset = 0 remaining = remaining - result_count elif result_count >= limit: break next_cursor = Cursor(limit, page + 1, False, len(results) > limit) prev_cursor = Cursor(limit, page - 1, True, page > 0) if next_cursor.has_results: results.pop() if self.on_results: results = self.on_results(results) return CursorResult(results=results, next=next_cursor, prev=prev_cursor)
def test_results_from_last_source(self): sources = [[1, 2, 3, 4], [5, 6, 7, 8]] cursor = Cursor(3, 2) paginator = self.cls(sources=sources) result = paginator.get_result(limit=3, cursor=cursor) assert len(result.results) == 2 assert result.results == [7, 8] assert result.next.has_results is False assert result.prev.has_results
def get_result(self, limit, cursor=None): assert limit > 0 offset = cursor.offset if cursor is not None else 0 # Request 1 more than limit so we can tell if there is another page data = self.data_fn(offset=offset, limit=limit + 1) has_more = (len(data) == limit + 1) if has_more: data.pop() # Since we are not issuing ranged queries, our cursors always have # `value=0` (ie. all rows have the same value), and so offset naturally # becomes the absolute row offset from the beginning of the entire # dataset, which is the same meaning as SQLs `OFFSET`. return CursorResult(data, prev=Cursor(0, max(0, offset - limit), True, offset > 0), next=Cursor(0, max(0, offset + limit), False, has_more))
def test_orderby_percentile_with_pagination(self): metric_id = indexer.record("sentry.transactions.measurements.lcp") tag1 = indexer.record("tag1") value1 = indexer.record("value1") value2 = indexer.record("value2") self._send_buckets( [{ "org_id": self.organization.id, "project_id": self.project.id, "metric_id": metric_id, "timestamp": int(time.time()), "type": "d", "value": numbers, "tags": { tag: value }, "retention_days": 90, } for tag, value, numbers in ( (tag1, value1, [4, 5, 6]), (tag1, value2, [1, 2, 3]), )], entity="metrics_distributions", ) response = self.get_success_response( self.organization.slug, field="p50(sentry.transactions.measurements.lcp)", statsPeriod="1h", interval="1h", groupBy="tag1", orderBy="p50(sentry.transactions.measurements.lcp)", per_page=1, ) groups = response.data["groups"] assert len(groups) == 1 assert groups[0]["by"] == {"tag1": "value2"} assert groups[0]["totals"] == { "p50(sentry.transactions.measurements.lcp)": 2 } response = self.get_success_response( self.organization.slug, field="p50(sentry.transactions.measurements.lcp)", statsPeriod="1h", interval="1h", groupBy="tag1", orderBy="p50(sentry.transactions.measurements.lcp)", per_page=1, cursor=Cursor(0, 1), ) groups = response.data["groups"] assert len(groups) == 1 assert groups[0]["by"] == {"tag1": "value1"} assert groups[0]["totals"] == { "p50(sentry.transactions.measurements.lcp)": 5 }
def test_results_from_two_sources(self): sources = [[1, 2, 3, 4], [5, 6, 7, 8]] cursor = Cursor(3, 1) paginator = self.cls(sources=sources) result = paginator.get_result(limit=3, cursor=cursor) assert len(result.results) == 3 assert result.results == [4, 5, 6] assert result.next.has_results assert result.prev.has_results
def paginate(self, request, on_results=None, paginator=None, paginator_cls=Paginator, default_per_page=100, max_per_page=100, **paginator_kwargs): assert (paginator and not paginator_kwargs) or (paginator_cls and paginator_kwargs) try: per_page = int(request.GET.get("per_page", default_per_page)) except ValueError: raise ParseError(detail="Invalid per_page parameter.") input_cursor = None if request.GET.get("cursor"): try: input_cursor = Cursor.from_string(request.GET.get("cursor")) except ValueError: raise ParseError(detail="Invalid cursor parameter.") max_per_page = max(max_per_page, default_per_page) if per_page > max_per_page: raise ParseError( detail="Invalid per_page value. Cannot exceed {}.".format( max_per_page)) if not paginator: paginator = paginator_cls(**paginator_kwargs) try: with sentry_sdk.start_span( op="base.paginate.get_result", description=type(self).__name__, ) as span: span.set_data("Limit", per_page) cursor_result = paginator.get_result(limit=per_page, cursor=input_cursor) except BadPaginationError as e: raise ParseError(detail=six.text_type(e)) # map results based on callback if on_results: with sentry_sdk.start_span( op="base.paginate.on_results", description=type(self).__name__, ): results = on_results(cursor_result.results) else: results = cursor_result.results response = Response(results) self.add_cursor_headers(request, response, cursor_result) return response
def get_result(self, limit, cursor=None): assert limit > 0 offset = cursor.offset if cursor is not None else 0 # Request 1 more than limit so we can tell if there is another page # Use raw_limit for the histogram itself so bucket calculations are correct data = self.data_fn(offset=offset, limit=limit + 1, raw_limit=limit) if isinstance(data["tags"], list): has_more = len(data["tags"]) == limit + 1 if has_more: data["tags"].pop() else: raise NotImplementedError return CursorResult( data, prev=Cursor(0, max(0, offset - limit), True, offset > 0), next=Cursor(0, max(0, offset + limit), False, has_more), )
def get_result(self, limit=100, cursor=None): if cursor is None: cursor = Cursor(0, 0, 0) limit = min(limit, self.max_limit) page = cursor.offset offset = cursor.offset * cursor.value limit = (cursor.value or limit) + 1 if self.max_offset is not None and offset >= self.max_offset: raise BadPaginationError("Pagination offset too large") if offset < 0: raise BadPaginationError("Pagination offset cannot be negative") primary_results = self.data_load_func(offset=offset, limit=self.max_limit) queryset = self.apply_to_queryset(self.queryset, primary_results) mapping = {} for model in queryset: mapping[self.key_from_model(model)] = model results = [] for row in primary_results: model = mapping.get(self.key_from_data(row)) if model is not None: results.append(model) next_cursor = Cursor(limit, page + 1, False, len(primary_results) > limit) prev_cursor = Cursor(limit, page - 1, True, page > 0) results = list(results[:limit]) if self.on_results: results = self.on_results(results) return CursorResult(results=results, next=next_cursor, prev=prev_cursor)
def _build_query_params_from_request(self, request, project): query_kwargs = { 'project': project, } if request.GET.get('status'): try: query_kwargs['status'] = STATUS_CHOICES[request.GET['status']] except KeyError: raise ValidationError('invalid status') if request.user.is_authenticated() and request.GET.get('bookmarks'): query_kwargs['bookmarked_by'] = request.user if request.user.is_authenticated() and request.GET.get('assigned'): query_kwargs['assigned_to'] = request.user sort_by = request.GET.get('sort') if sort_by is None: sort_by = DEFAULT_SORT_OPTION query_kwargs['sort_by'] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs['tags'] = tags limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: try: query_kwargs.update(parse_query(project, query, request.user)) except InvalidQuery as e: raise ValidationError( u'Your search query could not be parsed: {}'.format( e.message) ) return query_kwargs
def _build_query_params_from_request(self, request, project): query_kwargs = { 'project': project, } if request.GET.get('status'): try: query_kwargs['status'] = STATUS_CHOICES[request.GET['status']] except KeyError: raise ValidationError('invalid status') if request.user.is_authenticated() and request.GET.get('bookmarks'): query_kwargs['bookmarked_by'] = request.user if request.user.is_authenticated() and request.GET.get('assigned'): query_kwargs['assigned_to'] = request.user sort_by = request.GET.get('sort') if sort_by is None: sort_by = DEFAULT_SORT_OPTION query_kwargs['sort_by'] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs['tags'] = tags limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: try: query_kwargs.update(parse_query(project, query, request.user)) except InvalidQuery as e: raise ValidationError(u'Your search query could not be parsed: {}'.format(e.message)) return query_kwargs
def build_query_params_from_request(request, organization, projects, environments): query_kwargs = { 'projects': projects, 'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION), } limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() use_new_filters = request.GET.get('use_new_filters', '1') == '1' if query: try: query_kwargs.update( parse_query(projects, query, request.user, environments)) except InvalidQuery as e: raise ValidationError( u'Your search query could not be parsed: {}'.format(e.message)) try: search_filters = convert_query_values( parse_search_query(query), projects, request.user, environments, ) except Exception: # TODO: Catch less broad exceptions when we're confident in these # new filters logging.exception( 'Error occurred while parsing new style search query') search_filters = [] # If something goes wrong here we just want to use the working # filters use_new_filters = False if use_new_filters: validate_search_filter_permissions(organization, search_filters) query_kwargs['search_filters'] = search_filters query_kwargs['use_new_filters'] = use_new_filters return query_kwargs
def test_pagination_offset_without_orderby(self): """ Test that ensures an exception is raised when pagination `per_page` parameter is sent without order by being set """ response = self.get_response( self.organization.slug, field="count(sentry.transactions.measurements.lcp)", groupBy="transaction", cursor=Cursor(0, 1), ) assert response.status_code == 400 assert response.json()["detail"] == ( "'cursor' is only supported in combination with 'orderBy'")
def build_query_params_from_request( request: Request, organization: "Organization", projects: Sequence["Project"], environments: Optional[Sequence["Environment"]], ) -> MutableMapping[str, Any]: query_kwargs = { "projects": projects, "sort_by": request.GET.get("sort", DEFAULT_SORT_OPTION) } limit = request.GET.get("limit") if limit: try: query_kwargs["limit"] = int(limit) except ValueError: raise ValidationError("invalid limit") # TODO: proper pagination support if request.GET.get("cursor"): try: query_kwargs["cursor"] = Cursor.from_string( request.GET.get("cursor")) except ValueError: raise ParseError(detail="Invalid cursor parameter.") query = request.GET.get("query", "is:unresolved").strip() sentry_sdk.set_tag("search.query", query) sentry_sdk.set_tag("search.sort", query) if projects: sentry_sdk.set_tag("search.projects", len(projects) if len(projects) <= 5 else ">5") if environments: sentry_sdk.set_tag( "search.environments", len(environments) if len(environments) <= 5 else ">5") if query: try: search_filters = convert_query_values(parse_search_query(query), projects, request.user, environments) except InvalidSearchQuery as e: raise ValidationError(f"Error parsing search query: {e}") validate_search_filter_permissions(organization, search_filters, request.user) query_kwargs["search_filters"] = search_filters return query_kwargs
def get_result(self, limit=100, cursor=None, count_hits=False): # cursors are: # (identifier(integer), row offset, is_prev) if cursor is None: cursor = Cursor(0, 0, 0) limit = min(limit, self.max_limit) if cursor.value: cursor_value = self.value_from_cursor(cursor) else: cursor_value = 0 queryset = self._build_queryset(cursor_value, cursor.is_prev) # TODO(dcramer): this does not yet work correctly for ``is_prev`` when # the key is not unique if count_hits: max_hits = 1000 hits = self.count_hits(max_hits) else: hits = None max_hits = None offset = cursor.offset # this effectively gets us the before row, and the current (after) row # every time. Do not offset if the provided cursor value was empty since # there is nothing to traverse past. if cursor.is_prev and cursor.value: offset += 1 # The + 1 is needed so we can decide in the ResultCursor if there is # more on the next page. stop = offset + limit + 1 results = list(queryset[offset:stop]) if cursor.is_prev: results.reverse() return build_cursor( results=results, limit=limit, hits=hits, max_hits=max_hits, cursor=cursor, is_desc=self.desc, key=self.get_item_key, )
def test_descending_simple(self): paginator = SequencePaginator([(i, i) for i in range(10)], reverse=True) result = paginator.get_result(5) assert list(result) == [9, 8, 7, 6, 5] assert result.prev == Cursor(9, 0, True, False) assert result.next == Cursor(4, 0, False, True) result = paginator.get_result(5, result.next) assert list(result) == [4, 3, 2, 1, 0] assert result.prev == Cursor(4, 0, True, True) assert result.next == Cursor(0, 1, False, False) result = paginator.get_result(5, result.prev) assert list(result) == [9, 8, 7, 6, 5] assert result.prev == Cursor(9, 0, True, False) assert result.next == Cursor(4, 0, False, True) result = paginator.get_result(5, Cursor(-10, 0, False)) assert list(result) == [] assert result.prev == Cursor(0, 1, True, True) assert result.next == Cursor(0, 1, False, False)
def test_descending_repeated_scores(self): paginator = SequencePaginator([(1, i) for i in range(10)], reverse=True) result = paginator.get_result(5) assert list(result) == [9, 8, 7, 6, 5] assert result.prev == Cursor(1, 0, True, False) assert result.next == Cursor(1, 5, False, True) result = paginator.get_result(5, result.next) assert list(result) == [4, 3, 2, 1, 0] assert result.prev == Cursor(1, 5, True, True) assert result.next == Cursor(1, 10, False, False) result = paginator.get_result(5, result.prev) assert list(result) == [9, 8, 7, 6, 5] assert result.prev == Cursor(1, 0, True, False) assert result.next == Cursor(1, 5, False, True) result = paginator.get_result(5, Cursor(-10, 0, False)) assert list(result) == [] assert result.prev == Cursor(1, 10, True, True) assert result.next == Cursor(1, 10, False, False)
def test_ascending_simple(self): paginator = SequencePaginator([(i, i) for i in range(10)], reverse=False) result = paginator.get_result(5) assert list(result) == [0, 1, 2, 3, 4] assert result.prev == Cursor(0, 0, True, False) assert result.next == Cursor(5, 0, False, True) result = paginator.get_result(5, result.next) assert list(result) == [5, 6, 7, 8, 9] assert result.prev == Cursor(5, 0, True, True) assert result.next == Cursor(9, 1, False, False) result = paginator.get_result(5, result.prev) assert list(result) == [0, 1, 2, 3, 4] assert result.prev == Cursor(0, 0, True, False) assert result.next == Cursor(5, 0, False, True) result = paginator.get_result(5, Cursor(100, 0, False)) assert list(result) == [] assert result.prev == Cursor(9, 1, True, True) assert result.next == Cursor(9, 1, False, False)
def get_result(self, cursor=None, limit=100): if cursor is None: cursor = Cursor(0, 0, 0) if cursor.value: cursor_value = self.value_from_cursor(cursor) else: cursor_value = None limit = min(limit, MAX_LIMIT) offset = cursor.offset extra = 1 if cursor.is_prev and cursor.value: extra += 1 combined_querysets = self._build_combined_querysets( cursor_value, cursor.is_prev, limit, extra) stop = offset + limit + extra results = (list(combined_querysets[offset:stop]) if self.using_dates else list(combined_querysets[:(limit + extra)])) if cursor.is_prev and cursor.value: # If the first result is equal to the cursor_value then it's safe to filter # it out, since the value hasn't been updated if results and self.get_item_key(results[0], for_prev=True) == cursor.value: results = results[1:] # Otherwise we may have fetched an extra row, just drop it off the end if so. elif len(results) == offset + limit + extra: results = results[:-1] # We reversed the results when generating the querysets, so we need to reverse back now. if cursor.is_prev: results.reverse() return build_cursor( results=results, cursor=cursor, key=self.get_item_key, limit=limit, is_desc=self.desc, on_results=self.on_results, )
def paginate(self, request, on_results=None, paginator=None, paginator_cls=Paginator, default_per_page=100, max_per_page=100, **paginator_kwargs): assert (paginator and not paginator_kwargs) or (paginator_cls and paginator_kwargs) try: per_page = int(request.GET.get("per_page", default_per_page)) except ValueError: raise ParseError(detail="Invalid per_page parameter.") input_cursor = None if request.GET.get("cursor"): try: input_cursor = Cursor.from_string(request.GET.get("cursor")) except ValueError: raise ParseError(detail="Invalid cursor parameter.") assert per_page <= max(max_per_page, default_per_page) if not paginator: paginator = paginator_cls(**paginator_kwargs) try: cursor_result = paginator.get_result(limit=per_page, cursor=input_cursor) except BadPaginationError as e: return ParseError(detail=six.text_type(e)) # map results based on callback if on_results: results = on_results(cursor_result.results) else: results = cursor_result.results response = Response(results) self.add_cursor_headers(request, response, cursor_result) return response
def _get_group_list(request, project): query_kwargs = { 'project': project, } status = request.GET.get('status', '0') if status: query_kwargs['status'] = int(status) if request.user.is_authenticated() and request.GET.get('bookmarks'): query_kwargs['bookmarked_by'] = request.user if request.user.is_authenticated() and request.GET.get('assigned'): query_kwargs['assigned_to'] = request.user sort_by = request.GET.get('sort') or request.session.get('streamsort') if sort_by is None: sort_by = DEFAULT_SORT_OPTION # Save last sort in session if sort_by != request.session.get('streamsort'): request.session['streamsort'] = sort_by query_kwargs['sort_by'] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs['tags'] = tags else: query_kwargs['tags'] = {} date_from = request.GET.get('df') time_from = request.GET.get('tf') date_to = request.GET.get('dt') time_to = request.GET.get('tt') date_filter = request.GET.get('date_type') today = timezone.now() # date format is Y-m-d if any(x is not None for x in [date_from, time_from, date_to, time_to]): date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to) else: date_from = today - datetime.timedelta(days=5) date_to = None query_kwargs['date_from'] = date_from query_kwargs['date_to'] = date_to if date_filter: query_kwargs['date_filter'] = date_filter cursor = request.GET.get('cursor') if cursor: try: query_kwargs['cursor'] = Cursor.from_string(cursor) except ValueError: # XXX(dcramer): ideally we'd error, but this is an internal API so # we'd rather just throw it away logging.info('Throwing away invalid cursor: %s', cursor) query_kwargs['limit'] = EVENTS_PER_PAGE query = request.GET.get('query', '') if query is not None: query_result = parse_query(query, request.user) # Disclaimer: the following code is disgusting if query_result.get('query'): query_kwargs['query'] = query_result['query'] if query_result.get('tags'): query_kwargs['tags'].update(query_result['tags']) results = app.search.query(**query_kwargs) return { 'event_list': results[:EVENTS_PER_PAGE], 'date_from': date_from, 'date_to': date_to, 'today': today, 'sort': sort_by, 'date_type': date_filter, 'next_cursor': results.next, 'prev_cursor': results.prev, }
def _build_query_params_from_request(self, request, project): query_kwargs = { 'project': project, } if request.GET.get('status'): try: query_kwargs['status'] = STATUS_CHOICES[request.GET['status']] except KeyError: raise ValidationError('invalid status') if request.user.is_authenticated() and request.GET.get('bookmarks'): query_kwargs['bookmarked_by'] = request.user if request.user.is_authenticated() and request.GET.get('assigned'): query_kwargs['assigned_to'] = request.user sort_by = request.GET.get('sort') if sort_by is None: sort_by = DEFAULT_SORT_OPTION query_kwargs['sort_by'] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs['tags'] = tags # TODO: dates should include timestamps date_from = request.GET.get('since') date_to = request.GET.get('until') date_filter = request.GET.get('date_filter') limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') if date_from: date_from = self._parse_date(date_from) if date_to: date_to = self._parse_date(date_to) query_kwargs['date_from'] = date_from query_kwargs['date_to'] = date_to if date_filter: query_kwargs['date_filter'] = date_filter # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: query_kwargs.update(parse_query(project, query, request.user)) return query_kwargs
def get(self, request, project): """ List a project's aggregates Return a list of aggregates bound to a project. {method} {path} A default query of 'is:resolved' is applied. To return results with other statuses send an new query value (i.e. ?query= for all results). Any standard Sentry structured search query can be passed via the ``query`` parameter. The ``statsPeriod`` parameter can be used to select the timeline stats which should be present. Possible values are: '' (disable), '24h', '14d' """ query_kwargs = { 'project': project, } stats_period = request.GET.get('statsPeriod') if stats_period not in (None, '', '24h', '14d'): return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400) elif stats_period is None: # default stats_period = '24h' elif stats_period == '': # disable stats stats_period = None if request.GET.get('status'): try: query_kwargs['status'] = STATUS_CHOICES[request.GET['status']] except KeyError: return Response('{"detail": "invalid status"}', status=400) if request.user.is_authenticated() and request.GET.get('bookmarks'): query_kwargs['bookmarked_by'] = request.user if request.user.is_authenticated() and request.GET.get('assigned'): query_kwargs['assigned_to'] = request.user sort_by = request.GET.get('sort') if sort_by is None: sort_by = DEFAULT_SORT_OPTION query_kwargs['sort_by'] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs['tags'] = tags # TODO: dates should include timestamps date_from = request.GET.get('since') date_to = request.GET.get('until') date_filter = request.GET.get('date_filter') limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: return Response('{"detail": "invalid limit"}', status=400) if date_from: date_from = self._parse_date(date_from) if date_to: date_to = self._parse_date(date_to) query_kwargs['date_from'] = date_from query_kwargs['date_to'] = date_to if date_filter: query_kwargs['date_filter'] = date_filter # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if len(query) == 32: # check to see if we've got an event ID try: matching_event = EventMapping.objects.filter( project=project, event_id=query, ).select_related('group')[0] except IndexError: pass else: return Response(serialize( [matching_event.group], request.user, StreamGroupSerializer( stats_period=stats_period ) )) if query is not None: query_kwargs.update(parse_query(query, request.user)) cursor_result = search.query(**query_kwargs) results = list(cursor_result) # HACK: remove auto resolved entries if query_kwargs.get('status') == STATUS_UNRESOLVED: results = [ r for r in results if not r.is_resolved() ] response = Response(serialize( results, request.user, StreamGroupSerializer( stats_period=stats_period ) )) response['Link'] = ', '.join([ self.build_cursor_link(request, 'previous', cursor_result.prev), self.build_cursor_link(request, 'next', cursor_result.next), ]) return response
def get(self, request, project_id): """ List a project's aggregates Return a list of aggregates bound to a project. {method} {path}?id=1&id=2&id=3 A default query of 'is:resolved' is applied. To return results with other statuses send an new query value (i.e. ?query= for all results). Any standard Sentry structured search query can be passed via the ``query`` parameter. """ project = Project.objects.get_from_cache( id=project_id, ) assert_perm(project, request.user, request.auth) query_kwargs = { 'project': project, } if request.GET.get('status'): try: query_kwargs['status'] = STATUS_CHOICES[request.GET['status']] except KeyError: return Response('{"error": "invalid status"}', status=400) if request.user.is_authenticated() and request.GET.get('bookmarks'): query_kwargs['bookmarked_by'] = request.user if request.user.is_authenticated() and request.GET.get('assigned'): query_kwargs['assigned_to'] = request.user sort_by = request.GET.get('sort') if sort_by is None: sort_by = DEFAULT_SORT_OPTION query_kwargs['sort_by'] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs['tags'] = tags # TODO: dates should include timestamps date_from = request.GET.get('since') time_from = request.GET.get('until') date_filter = request.GET.get('date_filter') date_to = request.GET.get('dt') time_to = request.GET.get('tt') limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: return Response('{"error": "invalid limit"}', status=400) today = timezone.now() # date format is Y-m-d if any(x is not None for x in [date_from, time_from, date_to, time_to]): date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to) else: date_from = today - timedelta(days=5) date_to = None query_kwargs['date_from'] = date_from query_kwargs['date_to'] = date_to if date_filter: query_kwargs['date_filter'] = date_filter # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved') if query is not None: query_kwargs.update(parse_query(query, request.user)) cursor_result = search.query(**query_kwargs) context = list(cursor_result) GroupMeta.objects.populate_cache(context) response = Response(serialize(context, request.user)) response['Link'] = ', '.join([ self.build_cursor_link(request, 'previous', cursor_result.prev), self.build_cursor_link(request, 'next', cursor_result.next), ]) return response
def _get_group_list(request, project): query_kwargs = {"project": project} status = request.GET.get("status", "0") if status: query_kwargs["status"] = int(status) if request.user.is_authenticated() and request.GET.get("bookmarks"): query_kwargs["bookmarked_by"] = request.user if request.user.is_authenticated() and request.GET.get("assigned"): query_kwargs["assigned_to"] = request.user sort_by = request.GET.get("sort") or request.session.get("streamsort") if sort_by is None: sort_by = DEFAULT_SORT_OPTION # Save last sort in session if sort_by != request.session.get("streamsort"): request.session["streamsort"] = sort_by query_kwargs["sort_by"] = sort_by tags = {} for tag_key in TagKey.objects.all_keys(project): if request.GET.get(tag_key): tags[tag_key] = request.GET[tag_key] if tags: query_kwargs["tags"] = tags else: query_kwargs["tags"] = {} date_from = request.GET.get("df") time_from = request.GET.get("tf") date_to = request.GET.get("dt") time_to = request.GET.get("tt") date_filter = request.GET.get("date_type") today = timezone.now() # date format is Y-m-d if any(x is not None for x in [date_from, time_from, date_to, time_to]): date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to) else: date_from = today - datetime.timedelta(days=5) date_to = None query_kwargs["date_from"] = date_from query_kwargs["date_to"] = date_to if date_filter: query_kwargs["date_filter"] = date_filter cursor = request.GET.get("cursor") if cursor: try: query_kwargs["cursor"] = Cursor.from_string(cursor) except ValueError: # XXX(dcramer): ideally we'd error, but this is an internal API so # we'd rather just throw it away logging.info("Throwing away invalid cursor: %s", cursor) query_kwargs["limit"] = EVENTS_PER_PAGE query = request.GET.get("query", "") if query is not None: query_result = parse_query(query, request.user) # Disclaimer: the following code is disgusting if query_result.get("query"): query_kwargs["query"] = query_result["query"] if query_result.get("tags"): query_kwargs["tags"].update(query_result["tags"]) results = app.search.query(**query_kwargs) return { "event_list": results[:EVENTS_PER_PAGE], "date_from": date_from, "date_to": date_to, "today": today, "sort": sort_by, "date_type": date_filter, "next_cursor": results.next, "prev_cursor": results.prev, }