Exemplo n.º 1
0
    def resolve_connection(cls, connection, args, iterable, max_limit=None):
        # Remove the offset parameter and convert it to an after cursor.
        offset = args.pop("offset", None)
        after = args.get("after")
        if offset:
            if after:
                offset += cursor_to_offset(after) + 1
            # input offset starts at 1 while the graphene offset starts at 0
            args["after"] = offset_to_cursor(offset - 1)

        iterable = maybe_queryset(iterable)

        if isinstance(iterable, QuerySet):
            list_length = iterable.count()
        else:
            list_length = len(iterable)
        list_slice_length = (min(max_limit, list_length)
                             if max_limit is not None else list_length)

        # If after is higher than list_length, connection_from_list_slice
        # would try to do a negative slicing which makes django throw an
        # AssertionError
        after = min(
            get_offset_with_default(args.get("after"), -1) + 1, list_length)

        if max_limit is not None and "first" not in args:
            if "last" in args:
                args["first"] = list_length
                list_slice_length = list_length
            else:
                args["first"] = max_limit

        connection = connection_from_list_slice(
            iterable[after:],
            args,
            slice_start=after,
            list_length=list_length,
            list_slice_length=list_slice_length,
            connection_type=connection,
            edge_type=connection.Edge,
            pageinfo_type=PageInfo,
        )
        connection.iterable = iterable
        connection.length = list_length
        return connection
Exemplo n.º 2
0
def get_entities(interfaces, states, args, info, user=None, intersect=None):  #pylint: disable=W0613
    try:
        after = cursor_to_offset(args.get('after'))
        first = args.get('first')
        if after is None:
            limit = first
        else:
            limit = after + 1 + first

        limit = limit + 1  # retrieve one more so the hasNextPage works
    except Exception:  # FIXME:
        limit = None

    # For the scrolling of the results, it's important that the sort is stable.
    # release_date is set to datetime.datetime.now(tz=pytz.UTC) when the event
    # is published, so we have microsecond resolution and so have a stable sort
    # even with not stable sort algorithms like nbest (because it's unlikely
    # we have several events with the same date).
    # When we specify limit in the query, the sort algorithm chosen will
    # most likely be nbest instead of stable timsort (python sorted).
    # The sort is ascending, meaning we will get new events published during
    # the scroll, it's ok.
    # The only issue we can found here is if x events are removed or unpublished
    # during the scroll, we will skip x new events during the scroll.
    # A naive solution is to implement our own graphql arrayconnection to slice
    # from the last known oid + 1, but the last known oid may not be in the
    # array anymore, so it doesn't work. It's not too bad we skip x events, in
    # reality it should rarely happen.
    rs = find_entities(sort_on=None,
                       user=user,
                       interfaces=interfaces,
                       metadata_filter={'states': states},
                       text_filter={'text_to_search': args.get('filter', '')},
                       intersect=intersect)
    catalog = find_catalog('novaideo')
    release_date_index = catalog['release_date']
    return list(
        release_date_index.sort(list(rs.ids),
                                limit=limit,
                                sort_type=STABLE,
                                reverse=True))  #pylint: disable=E1101
Exemplo n.º 3
0
def get_all_comments(container, args):
    try:
        after = cursor_to_offset(args.get('after'))
        first = args.get('first')
        if after is None:
            limit = first
        else:
            limit = after + 1 + first

        limit = limit + 1  # retrieve one more so the hasNextPage works
    except Exception:  # FIXME:
        limit = None

    filter_ = args.get('filter', '')
    comments = get_comments(container, [], filter_, filter_)
    catalog = find_catalog('novaideo')
    release_date_index = catalog['release_date']
    return list(
        release_date_index.sort(list(comments.ids),
                                limit=limit,
                                sort_type=STABLE,
                                reverse=True))
Exemplo n.º 4
0
def get_cultural_events(args, info):
    start, end = get_start_end_dates(args)
    request = get_current_request()
    request.start_end = (start, end)  # used in resolve_next_date
    location = args.get('geo_location', None)
    venues = []
    if location:
        radius = args.get('radius', DEFAULT_RADIUS)
        venues = get_venues_by_location(location, radius, get_current_hour())
        if not venues:
            return []

    cities_query = get_cities_query(args)
    dates_query = get_dates_query(args)
    dates_range_query = get_dates_range_query(args)
    location_query = get_location_query(venues)
    query = and_op(location_query, dates_query)
    query = and_op(query, dates_range_query)
    query = and_op(query, cities_query)
    try:
        after = cursor_to_offset(args.get('after'))
        first = args.get('first')
        if after is None:
            limit = first
        else:
            limit = after + 1 + first

        limit = limit + 1  # retrieve one more so the hasNextPage works
    except Exception:
        limit = None

    # For the scrolling of the results, it's important that the sort is stable.
    # release_date is set to datetime.datetime.now(tz=pytz.UTC) when the event
    # is published, so we have microsecond resolution and so have a stable sort
    # even with not stable sort algorithms like nbest (because it's unlikely
    # we have several events with the same date).
    # When we specify limit in the query, the sort algorithm chosen will
    # most likely be nbest instead of stable timsort (python sorted).
    # The sort is ascending, meaning we will get new events published during
    # the scroll, it's ok.
    # The only issue we can found here is if x events are removed or unpublished
    # during the scroll, we will skip x new events during the scroll.
    # A naive solution is to implement our own graphql arrayconnection to slice
    # from the last known oid + 1, but the last known oid may not be in the
    # array anymore, so it doesn't work. It's not too bad we skip x events, in
    # reality it should rarely happen.
    rs = find_entities(
        add_query=query,
        # sort_on="release_date",
        # limit=limit,
        sort_on=None,
        interfaces=[ICulturalEvent],
        metadata_filter={'states': ['published', 'archived']},
        text_filter={'text_to_search': args.get('text', '')},
        keywords=args.get('categories', ''),
        force_publication_date=None  # None to avoid intersect with publication_start_date index
    )
    lac_catalog = find_catalog('lac')
    sort_on = args.get('sort_on', 'release_date')
    if sort_on == 'start_date':
        start_date_index = lac_catalog['start_date']
        return list(start_date_index.sort(
            list(rs.ids), limit=limit, from_=start, until=end,
            sort_type=STABLE))
    else:
        release_date_index = lac_catalog['release_date']
        return list(release_date_index.sort(
            list(rs.ids), limit=limit, sort_type=NBEST))
Exemplo n.º 5
0
 def _assert_page_info(self, page_info, has_next, has_prev, start, end):
     self.assertEqual(has_next, page_info.has_next_page)
     self.assertEqual(has_prev, page_info.has_previous_page)
     self.assertEqual(start, cursor_to_offset(page_info.start_cursor))
     self.assertEqual(end, cursor_to_offset(page_info.end_cursor))
Exemplo n.º 6
0
    def default_resolver(self, _root, info, required_fields=list(), **args):
        args = args or {}

        if _root is not None:
            field_name = to_snake_case(info.field_name)
            if field_name in _root._fields_ordered:
                if getattr(_root, field_name, []) is not None:
                    args["pk__in"] = [
                        r.id for r in getattr(_root, field_name, [])
                    ]

        _id = args.pop('id', None)

        if _id is not None:
            args['pk'] = from_global_id(_id)[-1]
        iterables = []
        list_length = 0
        skip = 0
        count = 0
        limit = None
        reverse = False
        if callable(getattr(self.model, "objects", None)):
            first = args.pop("first", None)
            after = cursor_to_offset(args.pop("after", None))
            last = args.pop("last", None)
            before = cursor_to_offset(args.pop("before", None))
            if "pk__in" in args and args["pk__in"]:
                count = len(args["pk__in"])
                skip, limit, reverse = find_skip_and_limit(first=first,
                                                           last=last,
                                                           after=after,
                                                           before=before,
                                                           count=count)
                if limit:
                    if reverse:
                        args["pk__in"] = args["pk__in"][::-1][skip:skip +
                                                              limit]
                    else:
                        args["pk__in"] = args["pk__in"][skip:skip + limit]
                elif skip:
                    args["pk__in"] = args["pk__in"][skip:]
                iterables = self.get_queryset(self.model, info,
                                              required_fields, **args)
                list_length = len(iterables)
                if isinstance(info, ResolveInfo):
                    if not info.context:
                        info.context = Context()
                    info.context.queryset = self.get_queryset(
                        self.model, info, required_fields, **args)
            elif _root is None:
                count = self.get_queryset(self.model, info, required_fields,
                                          **args).count()
                if count != 0:
                    skip, limit, reverse = find_skip_and_limit(first=first,
                                                               after=after,
                                                               last=last,
                                                               before=before,
                                                               count=count)
                    iterables = self.get_queryset(self.model, info,
                                                  required_fields, skip, limit,
                                                  reverse, **args)
                    list_length = len(iterables)
                    if isinstance(info, ResolveInfo):
                        if not info.context:
                            info.context = Context()
                        info.context.queryset = self.get_queryset(
                            self.model, info, required_fields, **args)
        has_next_page = True if (0 if limit is None else limit) + (
            0 if skip is None else skip) < count else False
        has_previous_page = True if skip else False
        if reverse:
            iterables = list(iterables)
            iterables.reverse()
            skip = limit
        connection = connection_from_iterables(
            edges=iterables,
            start_offset=skip,
            has_previous_page=has_previous_page,
            has_next_page=has_next_page,
            connection_type=self.type,
            edge_type=self.type.Edge,
            pageinfo_type=graphene.PageInfo)

        connection.iterable = iterables
        connection.list_length = list_length
        return connection
Exemplo n.º 7
0
    def connection_from_query(cls,
                              query,
                              args=None,
                              connection_type=None,
                              edge_type=None,
                              pageinfo_type=None):
        """
        similar to connection_from_list, but replace some of page operations to database limit...offset...
        it will be much faster and save more memory
        """
        connection_type = connection_type or graphene.relay.Connection
        edge_type = edge_type or connection_type.Edge
        pageinfo_type = pageinfo_type or graphene.relay.PageInfo

        args = args or {}

        before = cursor_to_offset(args.get('before', ''))
        after = cursor_to_offset(args.get('after', ''))
        first = args.get('first', None)
        last = args.get('last', None)

        offset = 0
        limit = None
        slice_start = None

        if after is not None:
            offset = after + 1
        if before is not None:
            limit = max(before - offset, 0)
            if first is not None:
                limit = min(limit, first)
            elif last is not None:
                offset = max(before - last, offset)
                limit = max(before - offset, 0)
        else:
            if first is not None:
                limit = first
            elif last is not None:
                slice_start = -last

        if limit is not None:
            query = query.limit(limit + 1)
        query = query.offset(offset)
        query_result = list(query)
        _len = len(query_result)

        if limit is not None and _len > limit:
            query_result = query_result[:-1]

        cursor_offset = offset
        if slice_start is not None:
            cursor_offset = offset + _len + slice_start

        edges = [
            edge_type(node=node, cursor=offset_to_cursor(cursor_offset + i))
            for i, node in enumerate(query_result[slice_start:])
        ]

        first_edge_cursor = edges[0].cursor if edges else None
        last_edge_cursor = edges[-1].cursor if edges else None
        first_edge_offset = cursor_to_offset(first_edge_cursor)
        last_edge_offset = cursor_to_offset(last_edge_cursor)
        has_previous_page = bool(first_edge_offset and last
                                 and (first_edge_offset > 0 if after is None
                                      else first_edge_offset > after + 1))
        has_next_page = bool(last_edge_cursor and first
                             and (_len > limit if before is None else
                                  last_edge_offset < before - 1))

        return connection_type(edges=edges,
                               page_info=pageinfo_type(
                                   start_cursor=first_edge_cursor,
                                   end_cursor=last_edge_cursor,
                                   has_previous_page=has_previous_page,
                                   has_next_page=has_next_page))