Ejemplo n.º 1
0
    def get_result(self, limit, cursor=None):
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        # Request 1 more than limit so we can tell if there is another page
        data = self.data_fn(offset=offset, limit=limit + 1)

        if isinstance(data, list):
            has_more = len(data) == limit + 1
            if has_more:
                data.pop()
        elif isinstance(data.get("data"), list):
            has_more = len(data["data"]) == limit + 1
            if has_more:
                data["data"].pop()
        else:
            raise NotImplementedError

        # Since we are not issuing ranged queries, our cursors always have
        # `value=0` (ie. all rows have the same value), and so offset naturally
        # becomes the absolute row offset from the beginning of the entire
        # dataset, which is the same meaning as SQLs `OFFSET`.
        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
Ejemplo n.º 2
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        queryset = self.queryset
        if self.key:
            queryset = queryset.order_by(*self.key)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        stop = offset + (cursor.value or limit) + 1

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        results = list(queryset[offset:stop])
        if cursor.value != limit:
            results = results[-(limit + 1):]

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        results = list(results[:limit])
        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
Ejemplo n.º 3
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        queryset = self.queryset
        if self.key:
            if self.desc:
                queryset = queryset.order_by(u'-{}'.format(self.key))
            else:
                queryset = queryset.order_by(self.key)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        stop = offset + (cursor.value or limit) + 1

        results = list(queryset[offset:stop])
        if cursor.value != limit:
            results = results[-(limit + 1):]

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        results = list(results[:limit])
        if self.on_results:
            results = self.on_results(results)

        return CursorResult(
            results=results,
            next=next_cursor,
            prev=prev_cursor,
        )
Ejemplo n.º 4
0
def prep_search(
    cls: Any,
    request: Request,
    project: "Project",
    extra_query_kwargs: Optional[Mapping[str, Any]] = None,
) -> Tuple[CursorResult, Mapping[str, Any]]:
    try:
        environment = cls._get_environment_from_request(
            request, project.organization_id)
    except Environment.DoesNotExist:
        result = CursorResult([], None, None, hits=0, max_hits=SEARCH_MAX_HITS)
        query_kwargs: MutableMapping[str, Any] = {}
    else:
        environments = [environment
                        ] if environment is not None else environment
        query_kwargs = build_query_params_from_request(request,
                                                       project.organization,
                                                       [project], environments)
        if extra_query_kwargs is not None:
            assert "environment" not in extra_query_kwargs
            query_kwargs.update(extra_query_kwargs)

        query_kwargs["environments"] = environments
        result = search.query(**query_kwargs)
    return result, query_kwargs
Ejemplo n.º 5
0
    def _search(self, request, project, extra_query_kwargs=None):
        try:
            environment = self._get_environment_from_request(
                request,
                project.organization_id,
            )
        except Environment.DoesNotExist:
            # XXX: The 1000 magic number for `max_hits` is an abstraction leak
            # from `sentry.api.paginator.BasePaginator.get_result`.
            result = CursorResult([], None, None, hits=0, max_hits=1000)
            query_kwargs = {}
        else:
            environments = [environment
                            ] if environment is not None else environment
            query_kwargs = build_query_params_from_request(
                request,
                project.organization,
                [project],
                environments,
            )
            if extra_query_kwargs is not None:
                assert 'environment' not in extra_query_kwargs
                query_kwargs.update(extra_query_kwargs)

            query_kwargs['environments'] = environments
            result = search.query(**query_kwargs)
        return result, query_kwargs
Ejemplo n.º 6
0
    def get_result(self, limit, cursor=None, count_hits=False):
        limit = min(limit, self.max_limit)

        if cursor is None:
            cursor = Cursor(0, 0, False)

        assert cursor.offset > -1

        if cursor.value == 0:
            position = len(self.scores) if cursor.is_prev else 0
        else:
            position = self.search(cursor.value)

        position = position + cursor.offset

        if cursor.is_prev:
            # TODO: It might make sense to ensure that this hi value is at
            # least the length of the page + 1 if we want to ensure we return a
            # full page of results when paginating backwards while data is
            # being mutated.
            hi = min(position, len(self.scores))
            lo = max(hi - limit, 0)
        else:
            lo = max(position, 0)
            hi = min(lo + limit, len(self.scores))

        if self.scores:
            prev_score = self.scores[min(lo, len(self.scores) - 1)]
            prev_cursor = Cursor(
                prev_score,
                lo - self.search(prev_score, hi=lo),
                True,
                True if lo > 0 else False,
            )

            next_score = self.scores[min(hi, len(self.scores) - 1)]
            next_cursor = Cursor(
                next_score,
                hi - self.search(next_score, hi=hi),
                False,
                True if hi < len(self.scores) else False,
            )
        else:
            prev_cursor = Cursor(cursor.value, cursor.offset, True, False)
            next_cursor = Cursor(cursor.value, cursor.offset, False, False)

        results = self.values[lo:hi]
        if self.on_results:
            results = self.on_results(results)

        return CursorResult(
            results,
            prev=prev_cursor,
            next=next_cursor,
            hits=min(len(self.scores), MAX_HITS_LIMIT) if count_hits else None,
            max_hits=MAX_HITS_LIMIT if count_hits else None,
        )
Ejemplo n.º 7
0
    def get_result(self, limit=100, cursor=None):
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        limit = cursor.value or limit

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        primary_results = self.data_load_func(offset=offset,
                                              limit=self.max_limit + 1)

        queryset = self.apply_to_queryset(self.queryset, primary_results)

        mapping = {}
        for model in queryset:
            mapping[self.key_from_model(model)] = model

        results = []
        for row in primary_results:
            model = mapping.get(self.key_from_data(row))
            if model is not None:
                results.append(model)

        if self.queryset_load_func and self.data_count_func and len(
                results) < limit:
            # If we hit the end of the results from the data load func, check whether there are
            # any additional results in the queryset_load_func, if one is provided.
            extra_limit = limit - len(results) + 1
            total_data_count = self.data_count_func()
            total_offset = offset + len(results)
            qs_offset = max(0, total_offset - total_data_count)
            qs_results = self.queryset_load_func(self.queryset, total_offset,
                                                 qs_offset, extra_limit)
            results.extend(qs_results)
            has_more = len(qs_results) == extra_limit
        else:
            has_more = len(primary_results) > limit

        results = results[:limit]
        next_cursor = Cursor(limit, page + 1, False, has_more)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
Ejemplo n.º 8
0
    def get_result(self, limit: int, cursor: Optional[Cursor] = None) -> CursorResult:
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        # Request 1 more than limit so we can tell if there is another page
        data = self.data_fn(offset, limit + 1)

        has_more = any(len(result["examples"]) == limit + 1 for result in data)
        for result in data:
            result["examples"] = result["examples"][:limit]

        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
Ejemplo n.º 9
0
    def get_result(self, limit, cursor=None):
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        data = self.data_fn(offset=offset, limit=limit + 1)

        if isinstance(data.get("groups"), list):
            has_more = len(data["groups"]) == limit + 1
            if has_more:
                data["groups"].pop()
        else:
            raise NotImplementedError

        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
Ejemplo n.º 10
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if limit <= 0:
            raise BadPaginationError("Limit must be positive")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        results = []
        # note: we shouldn't use itertools.islice(itertools.chain.from_iterable(self.sources))
        # because source may be a QuerySet which is much more efficient to slice directly
        for source in self.sources:
            # Get an additional item so we can check for a next page.
            remaining = limit - len(results) + 1
            results.extend(source[offset:offset + remaining])
            # don't do offset = max(0, offset - len(source)) because len(source) may be expensive
            if len(results) == 0:
                offset -= len(source)
            else:
                offset = 0
            if len(results) > limit:
                assert len(results) == limit + 1
                break

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        if next_cursor.has_results:
            results.pop()

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
Ejemplo n.º 11
0
    def get_result(self, limit=100, cursor=None):
        # offset is page #
        # value is page limit
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        results = []
        # Get an addition item so we can check for a next page.
        remaining = limit + 1
        for source in self.sources:
            source_results = list(source[offset:remaining])
            results.extend(source_results)
            result_count = len(results)
            if result_count == 0 and result_count < remaining:
                # Advance the offset based on the rows we skipped.
                offset = offset - len(source)
            elif result_count > 0 and result_count < remaining:
                # Start at the beginning of the next source
                offset = 0
                remaining = remaining - result_count
            elif result_count >= limit:
                break

        next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)

        if next_cursor.has_results:
            results.pop()

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
    def get_result(self, limit, cursor=None):
        assert limit > 0
        offset = cursor.offset if cursor is not None else 0
        # Request 1 more than limit so we can tell if there is another page
        # Use raw_limit for the histogram itself so bucket calculations are correct
        data = self.data_fn(offset=offset, limit=limit + 1, raw_limit=limit)

        if isinstance(data["tags"], list):
            has_more = len(data["tags"]) == limit + 1
            if has_more:
                data["tags"].pop()
        else:
            raise NotImplementedError

        return CursorResult(
            data,
            prev=Cursor(0, max(0, offset - limit), True, offset > 0),
            next=Cursor(0, max(0, offset + limit), False, has_more),
        )
Ejemplo n.º 13
0
    def _search(self, request, project, extra_query_kwargs=None):
        query_kwargs = self._build_query_params_from_request(request, project)
        if extra_query_kwargs is not None:
            assert 'environment' not in extra_query_kwargs
            query_kwargs.update(extra_query_kwargs)

        try:
            if features.has('organizations:environments', project.organization, actor=request.user):
                query_kwargs['environment'] = self._get_environment_from_request(
                    request,
                    project.organization_id,
                )
        except Environment.DoesNotExist:
            # XXX: The 1000 magic number for `max_hits` is an abstraction leak
            # from `sentry.api.paginator.BasePaginator.get_result`.
            result = CursorResult([], None, None, hits=0, max_hits=1000)
        else:
            result = search.query(**query_kwargs)
        return result, query_kwargs
Ejemplo n.º 14
0
    def get_result(self, limit=100, cursor=None):
        if cursor is None:
            cursor = Cursor(0, 0, 0)

        limit = min(limit, self.max_limit)

        page = cursor.offset
        offset = cursor.offset * cursor.value
        limit = (cursor.value or limit) + 1

        if self.max_offset is not None and offset >= self.max_offset:
            raise BadPaginationError("Pagination offset too large")
        if offset < 0:
            raise BadPaginationError("Pagination offset cannot be negative")

        primary_results = self.data_load_func(offset=offset,
                                              limit=self.max_limit)

        queryset = self.apply_to_queryset(self.queryset, primary_results)

        mapping = {}
        for model in queryset:
            mapping[self.key_from_model(model)] = model

        results = []
        for row in primary_results:
            model = mapping.get(self.key_from_data(row))
            if model is not None:
                results.append(model)

        next_cursor = Cursor(limit, page + 1, False,
                             len(primary_results) > limit)
        prev_cursor = Cursor(limit, page - 1, True, page > 0)
        results = list(results[:limit])

        if self.on_results:
            results = self.on_results(results)

        return CursorResult(results=results,
                            next=next_cursor,
                            prev=prev_cursor)
Ejemplo n.º 15
0
    def query(
        self,
        project,
        query=None,
        status=None,
        tags=None,
        bookmarked_by=None,
        assigned_to=None,
        sort_by="date",
        date_filter="last_seen",
        date_from=None,
        date_to=None,
        cursor=None,
        limit=100,
    ):

        query_body = {"filter": {"and": [{"term": {"project_id": project.id}}]}}
        if query:
            query_body["query"] = {"match": {"message": query}}

        if status is not None:
            query_body["filter"]["and"].append({"term": {"status": status}})

        if tags:
            # TODO(dcramer): filter might be too expensive here, need to confirm
            query_body["filter"]["and"].append(
                {
                    "has_child": {
                        "type": "event",
                        "filter": {"and": [{"term": {"tag:{0}".format(k): v}} for k, v in tags.iteritems()]},
                    }
                }
            )

        # TODO(dcramer): filter might be too expensive here, need to confirm
        if date_to and date_from:
            query_body["filter"]["and"].append({"range": {date_filter: {"gte": date_from, "lte": date_to}}})
        elif date_from:
            query_body["filter"]["and"].append({"range": {date_filter: {"gte": date_from}}})
        elif date_to:
            query_body["filter"]["and"].append({"range": {date_filter: {"lte": date_to}}})

        if bookmarked_by:
            # TODO(dcramer): we could store an array on each event similar to how
            # we are doing tags? should we just make bookmarked events a special
            # thing that isn't searchable?
            raise NotImplementedError

        if assigned_to:
            # TODO(dcramer):
            raise NotImplementedError

        if sort_by == "date":
            sort_clause = [{"last_seen": {"order": "desc"}}]
        elif sort_by == "new":
            sort_clause = [{"first_seen": {"order": "desc"}}]
        elif sort_by == "priority":
            sort_clause = [{"score": {"order": "desc"}}]
        elif sort_by == "freq":
            sort_clause = [{"times_seen": {"order": "desc"}}]
        elif sort_by == "tottime":
            raise NotImplementedError
        elif sort_by == "avgtime":
            raise NotImplementedError
        else:
            raise ValueError("Invalid sort_by: %s" % (sort_by,))

        results = self.backend.search(
            index=self.index_prefix + "sentry-1",
            doc_type="group",
            body={
                "query": {"filtered": query_body},
                "sort": sort_clause,
                "size": limit,
                # 'from': offset,
            },
        )
        if not results.get("hits"):
            return CursorResult(results=[], cursor=cursor, limit=limit)

        instance_ids = [int(n["_id"]) for n in results["hits"]["hits"]]

        return CursorResult.from_ids(
            id_list=instance_ids,
            cursor=cursor,
            limit=limit,
            # TODO(dcramer): implement cursors
            key="id",
        )
Ejemplo n.º 16
0
    def query(self, project, query=None, status=None, tags=None,
              bookmarked_by=None, assigned_to=None, sort_by='date',
              date_filter='last_seen', date_from=None, date_to=None,
              cursor=None, limit=100):

        query_body = {
            'filter': {
                'and': [
                    {'term': {'project_id': project.id}},
                ],
            },
        }
        if query:
            query_body['query'] = {'match': {'message': query}}

        if status is not None:
            query_body['filter']['and'].append({'term': {'status': status}})

        if tags:
            # TODO(dcramer): filter might be too expensive here, need to confirm
            query_body['filter']['and'].append({'has_child': {
                'type': 'event',
                'filter': {
                    'and': [
                        {'term': {'tag:{0}'.format(k): v}}
                        for k, v in tags.iteritems()
                    ]
                },
            }})

        # TODO(dcramer): filter might be too expensive here, need to confirm
        if date_to and date_from:
            query_body['filter']['and'].append({
                'range': {date_filter: {
                    'gte': date_from,
                    'lte': date_to,
                }}
            })
        elif date_from:
            query_body['filter']['and'].append({
                'range': {date_filter: {
                    'gte': date_from,
                }}
            })
        elif date_to:
            query_body['filter']['and'].append({
                'range': {date_filter: {
                    'lte': date_to,
                }}
            })

        if bookmarked_by:
            # TODO(dcramer): we could store an array on each event similar to how
            # we are doing tags? should we just make bookmarked events a special
            # thing that isn't searchable?
            raise NotImplementedError

        if assigned_to:
            # TODO(dcramer):
            raise NotImplementedError

        if sort_by == 'date':
            sort_clause = [{'last_seen': {'order': 'desc'}}]
        elif sort_by == 'new':
            sort_clause = [{'first_seen': {'order': 'desc'}}]
        elif sort_by == 'priority':
            sort_clause = [{'score': {'order': 'desc'}}]
        elif sort_by == 'freq':
            sort_clause = [{'times_seen': {'order': 'desc'}}]
        elif sort_by == 'tottime':
            raise NotImplementedError
        elif sort_by == 'avgtime':
            raise NotImplementedError
        else:
            raise ValueError('Invalid sort_by: %s' % (sort_by,))

        results = self.backend.search(
            index=self.index_prefix + 'sentry-1',
            doc_type='group',
            body={
                'query': {'filtered': query_body},
                'sort': sort_clause,
                'size': limit,
                # 'from': offset,
            },
        )
        if not results.get('hits'):
            return CursorResult(
                results=[],
                cursor=cursor,
                limit=limit,
            )

        instance_ids = [int(n['_id']) for n in results['hits']['hits']]

        return CursorResult.from_ids(
            id_list=instance_ids,
            cursor=cursor,
            limit=limit,
            # TODO(dcramer): implement cursors
            key=lambda x: x.id,
        )
Ejemplo n.º 17
0
    def get(self, request, project):
        """
        List a Project's Issues
        ```````````````````````

        Return a list of issues (groups) bound to a project.  All parameters are
        supplied as query string parameters.

        A default query of ``is:unresolved`` is applied. To return results
        with other statuses send an new query value (i.e. ``?query=`` for all
        results).

        The ``statsPeriod`` parameter can be used to select the timeline
        stats which should be present. Possible values are: '' (disable),
        '24h', '14d'

        :qparam string statsPeriod: an optional stat period (can be one of
                                    ``"24h"``, ``"14d"``, and ``""``).
        :qparam bool shortIdLookup: if this is set to true then short IDs are
                                    looked up by this function as well.  This
                                    can cause the return value of the function
                                    to return an event issue of a different
                                    project which is why this is an opt-in.
                                    Set to `1` to enable.
        :qparam querystring query: an optional Sentry structured search
                                   query.  If not provided an implied
                                   ``"is:unresolved"`` is assumed.)
        :pparam string organization_slug: the slug of the organization the
                                          issues belong to.
        :pparam string project_slug: the slug of the project the issues
                                     belong to.
        :auth: required
        """
        stats_period = request.GET.get('statsPeriod')
        if stats_period not in (None, '', '24h', '14d'):
            return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
        elif stats_period is None:
            # default
            stats_period = '24h'
        elif stats_period == '':
            # disable stats
            stats_period = None

        serializer = functools.partial(
            StreamGroupSerializer,
            environment_func=self._get_environment_func(request, project.organization_id),
            stats_period=stats_period,
        )

        query = request.GET.get('query', '').strip()
        if query:
            matching_group = None
            matching_event = None
            if len(query) == 32:
                # check to see if we've got an event ID
                try:
                    matching_group = Group.objects.from_event_id(project, query)
                except Group.DoesNotExist:
                    pass
                else:
                    try:
                        matching_event = Event.objects.get(
                            event_id=query, project_id=project.id)
                    except Event.DoesNotExist:
                        pass

            # If the query looks like a short id, we want to provide some
            # information about where that is.  Note that this can return
            # results for another project.  The UI deals with this.
            elif request.GET.get('shortIdLookup') == '1' and \
                    looks_like_short_id(query):
                try:
                    matching_group = Group.objects.by_qualified_short_id(
                        project.organization_id, query
                    )
                except Group.DoesNotExist:
                    matching_group = None

            if matching_group is not None:
                response = Response(
                    serialize(
                        [matching_group], request.user, serializer(
                            matching_event_id=getattr(matching_event, 'id', None),
                        )
                    )
                )
                response['X-Sentry-Direct-Hit'] = '1'
                return response

        try:
            query_kwargs = self._build_query_params_from_request(
                request, project)
        except ValidationError as exc:
            return Response({'detail': six.text_type(exc)}, status=400)

        try:
            environment_id = self._get_environment_id_from_request(
                request, project.organization_id)
        except Environment.DoesNotExist:
            # XXX: The 1000 magic number for `max_hits` is an abstraction leak
            # from `sentry.api.paginator.BasePaginator.get_result`.
            cursor_result = CursorResult([], None, None, hits=0, max_hits=1000)
        else:
            cursor_result = search.query(
                count_hits=True,
                environment_id=environment_id,
                **query_kwargs)

        results = list(cursor_result)

        context = serialize(results, request.user, serializer())

        # HACK: remove auto resolved entries
        if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
            context = [r for r in context if r['status'] == 'unresolved']

        response = Response(context)

        self.add_cursor_headers(request, response, cursor_result)

        if results and query not in SAVED_SEARCH_QUERIES:
            advanced_search.send(project=project, sender=request.user)

        return response