def get_next_link(self): if not self.page_has_next(): return None url = self.request.build_absolute_uri() url = replace_query_param(url, self.page_size_query_param, self.page_size) return replace_query_param(url, self.page_query_param, self.page_number + 1)
def get_previous_link(self): if not self.page_has_previous(): return None url = self.request.build_absolute_uri() if self.page_number == 2: return remove_query_param(url, self.page_query_param) url = replace_query_param(url, self.page_size_query_param, self.page_size) return replace_query_param(url, self.page_query_param, self.page_number - 1)
def get_previous_link(self): oldest_object = hasattr(self, 'page') and len(self.page) and self.page[self.get_oldest_index()] if not oldest_object or len(self.page) < self.page_size: return None oldest_object_timestamp = getattr(oldest_object, self.datetime_unix_attribute) url = get_current_uri(self.request) url = replace_query_param(url, self.before_field, oldest_object_timestamp) url = replace_query_param(url, self.page_size_query_param, self.page_size) return url
def get_next_link(self): recent_object = hasattr(self, 'page') and len(self.page) and self.page[self.get_recent_index()] if not recent_object or len(self.page) < self.page_size: return None recent_object_timestamp = getattr(recent_object, self.datetime_unix_attribute) url = get_current_uri(self.request) url = replace_query_param(url, self.after_field, recent_object_timestamp) url = replace_query_param(url, self.page_size_query_param, self.page_size) return url
def set_next_prev_urls(page, max_page, uri): if page < max_page: nexturi = replace_query_param(uri, 'page', page + 1) else: nexturi = None if page > 1: previous = replace_query_param(uri, 'page', page - 1) else: previous = None return nexturi, previous
def set_next_prev_urls(page,max_page,uri): if page < max_page: nexturi = replace_query_param(uri, 'page', page + 1) else: nexturi = None if page > 1: previous = replace_query_param(uri, 'page', page - 1) else: previous = None return nexturi, previous
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra): url = super(QueryParameterVersioning, self).reverse( viewname, args, kwargs, request, format, **extra ) if request.version is not None: return replace_query_param(url, self.version_param, request.version) return url
def to_representation(self, value): if not value.has_next(): return None page = value.next_page_number() request = self.context.get('request') url = request and request.build_absolute_uri() or '' return replace_query_param(url, self.page_field, page)
def to_native(self, value): if not value.has_next(): return None page = value.next_page_number() request = self.context.get('request') url = request and request.build_absolute_uri() or '' return replace_query_param(url, self.page_field, page)
def get_previous_link(self): url = self.request.build_absolute_uri() page = self.get_page() if page < 2: return None return replace_query_param(url, self.page_query_param, self.get_page() - 1)
def to_native(self, value): if not value.has_previous(): return None page = value.previous_page_number() request = self.context.get('request') url = request and request.get_full_path() or '' return replace_query_param(url, self.page_field, page)
def to_representation(self, value): if not value.has_previous(): return None page = value.previous_page_number() request = self.context.get('request') url = request and request.build_absolute_uri() or '' return replace_query_param(url, self.page_field, page)
def to_native(self, value): if not value.has_next(): return None page = value.next_page_number() request = self.context.get('request') url = request and request.get_full_path() or '' return replace_query_param(url, self.page_field, page)
def to_native(self, page_range): response = [] request = self.context.get('request') url = request and request.build_absolute_uri() or '' for page in page_range: response.append(replace_query_param(url, self.page_field, page)) return response
def to_representation(self, value): if not value.has_previous(): return None page = value.previous_page_number() request = self.context.get('request') url = request and request.get_full_path() or '' return replace_query_param(url, self.page_field, page)
def to_native(self, value): if not value.has_previous(): return None page = value.previous_page_number() request = self.context.get('request') url = request and request.build_absolute_uri() or '' return replace_query_param(url, self.page_field, page)
def get_previous_link(self): url = self.request.build_absolute_uri() page = self.get_page() if page < 2: return None return replace_query_param( url, self.page_query_param, self.get_page() - 1)
def build_page_url(self, page_identifier): """ Builds a URL which can be used to access the given page identifer. In the general case, the page identifiers do not have to be numbers. """ url = self.request.build_absolute_uri() return replace_query_param(url, self.paging_parameter, page_identifier)
def to_representation(self, value): if not value.has_previous(): return None page = value.previous_page_number() request = self.context.get('request') url = request and request.get_full_path() or '' # remove /api/v1 so ansible-galaxy pagination works url = url.replace('/api/v1', '') return replace_query_param(url, self.page_field, page)
def paginate_words(words, request): """Generate paginated output from a word query set""" page_size = _get_page_size(request) page_num = int(request.query_params.get('page', 1)) paginator = Paginator(words, page_size) page = paginator.page(page_num) serializer = WordSerializer() response = {'count': paginator.count, 'page_count': paginator.num_pages, 'page': page_num, 'results': serializer.serialize_many(page.object_list)} request_url = request.build_absolute_uri() if page.has_previous(): response['previous'] = replace_query_param(request_url, 'page', page.previous_page_number()) if page.has_next(): response['next'] = replace_query_param(request_url, 'page', page.next_page_number()) return Response(response)
def to_native(self, value): request = self.context.get('request') if request and '/api/v1/' not in request.path: if not value.has_previous(): return None page = value.previous_page_number() url = request and request.build_absolute_uri() or '' new_url = remove_query_param(url, self.offset_field) return replace_query_param(new_url, self.page_field, page) elif request: if not value.has_previous(): return None try: offset = int(request.GET.get('offset') or 0) except ValueError: offset = 0 new_offset = offset - value.object_list.count() url = request and request.build_absolute_uri() or '' if new_offset > 0: return replace_query_param(url, self.offset_field, new_offset) else: return remove_query_param(url, self.offset_field)
def paginate_queryset(self, queryset, page_size=None): page = super().paginate_queryset(queryset=queryset, page_size=page_size) if page is None: return page self.headers["x-pagination-count"] = page.paginator.count self.headers["x-paginated"] = "true" self.headers["x-paginated-by"] = page.paginator.per_page self.headers["x-pagination-current"] = page.number if page.has_next(): num = page.next_page_number() url = self.request.build_absolute_uri() url = replace_query_param(url, "page", num) self.headers["X-Pagination-Next"] = url if page.has_previous(): num = page.previous_page_number() url = self.request.build_absolute_uri() url = replace_query_param(url, "page", num) self.headers["X-Pagination-Prev"] = url return page
def get_next_page_url(self, url, page): """ Calculate the URL for the next page of results, based on the current `page` and base `url`. (Where `page` is the output from the `paginate_queryset` method.) Returns None if this is the last page. """ params = self.get_default_paging_params() if page['offset'] is None or page['end_row'] == page['total'] - 1: next_offset = None else: next_offset = page['offset'] + page['limit'] if next_offset is None: return None return urllib.unquote( replace_query_param(url, params['offset_qp'], next_offset))
def get_prev_page_url(self, url, page): """ Calculate the URL for the previous page of results, based on the current `page` and base `url`. (Where `page` is the output from the `paginate_queryset` method.) Returns None if this is the first page. """ params = self.get_default_paging_params() offset, limit = page['offset'], page['limit'] if offset is None or offset == 0: prev_offset = None else: prev_offset = offset - limit if offset - limit >= 0 else 0 if prev_offset is None: return None return urllib.unquote( replace_query_param(url, params['offset_qp'], prev_offset))
def get_next_link(self): url = self.request.build_absolute_uri() return replace_query_param(url, self.page_query_param, self.get_page() + 1)
def to_native(self, value): if not value.has_next(): return None page = value.next_page_number() return replace_query_param("", self.page_field, page)
def build_link(self, index): if not index: return None url = self.request and self.request.build_absolute_uri() or '' return replace_query_param(url, 'page', index)
def MongoDataPagination(DB_MongoClient, database, collection, query=None, page=1, nPerPage=None, uri=''): db = DB_MongoClient if query: query = ast.literal_eval(query) q = [(k, v) for k, v in query['spec'].items()] query['spec'] = dict(q) #print query count = db[database][collection].find(**query).count() #print count if nPerPage == 0: page=1 offset=0 max_page=1 else: max_page = math.ceil(float(count) / nPerPage) # Page min is 1 if page < 1: page = 1 #Change page to last page with data if page * nPerPage > count: page = int(max_page) #Cover count =0 if page < 1: page = 1 offset = (page - 1) * nPerPage data = [row for row in db[database][collection].find(**query).skip(offset).limit(nPerPage)] #replace_query_param(uri, 'page', page) else: count = db[database][collection].find().count() if nPerPage == 0: page=1 offset=0 max_page=1 else: max_page = math.ceil(float(count) / nPerPage) print max_page # Page min is 1 if page < 1: page = 1 #Change page to last page with data if page * nPerPage > count: page = int(max_page) #Cover count =0 if page < 1: page = 1 offset = (page - 1) * nPerPage data = [row for row in db[database][collection].find().skip(offset).limit(nPerPage)] if page < max_page: next = replace_query_param(uri, 'page', page + 1) else: next = None if page > 1: previous = replace_query_param(uri, 'page', page - 1) else: previous = None result = {'count': count, 'meta': {'page': page, 'page_size': nPerPage, 'pages': int(max_page)}, 'next': next, 'previous': previous, 'results': data} try: od = collections.OrderedDict(sorted(result.items())) except: # older python versions < 2.7 od = OrderedDict(sorted(result.items())) return od
def to_native(self, value): if not value.has_next(): return None page = value.next_page_number() return replace_query_param('', self.page_field, page)
class ListModelMixin(ListModelMixin): """ List a queryset. Should be mixed in with `MultipleObjectAPIView`. """ empty_error = u"Empty list and '%(class_name)s.allow_empty' is False." object_list = None def parse_range_header(self, result_range): starts = [] ends = [] for result_range in result_range.split(","): start = end = None if result_range.startswith("-"): start = int(result_range) # + 1 elif result_range.endswith("-"): start = int(result_range.split("-")[0]) else: start, end = result_range.split("-") start, end = int(start), int(end) + 1 starts.append(start) ends.append(end) return starts, ends sort_by_identifier_mapping = { 'first_name': ['firstname'], 'last_name': ['lastname'], } def translate_sort_by_identifier(self, sort_by_identifier): for key, value in self.sort_by_identifier_mapping.iteritems(): if sort_by_identifier in value: return key return sort_by_identifier def apply_sorting(self, object_list): page_sort_by = self.request.GET.get('sort-by', None) page_sort_method = self.request.GET.get('sort-method', None) page_sort_startswith = self.request.GET.get('startswith', None) if page_sort_method in ['asc', 'desc'] and page_sort_by: object_list = object_list.order_by( ('-' if page_sort_method == "desc" else '') +\ self.translate_sort_by_identifier(page_sort_by) ) if page_sort_startswith: # TODO: test if its a CHAR field or smth l that object_list = object_list.filter( **{ self.translate_sort_by_identifier(page_sort_by) + '__startswith': page_sort_startswith }) return object_list return object_list def list(self, request, *args, **kwargs): obj_list = getattr(self, 'object_list', None) if obj_list is None: obj_list = self.get_filtered_queryset() self.object_list = self.apply_sorting(obj_list) status_code = None headers = {} partial_content = False try: # use querysets .count() to get quantity of elements records_count = self.object_list.count() except TypeError, AttributeError: # TypeError: [] # AttributeError: obj.__len__ might be available records_count = len(self.object_list) # Default is to allow empty querysets. This can be altered by setting # `.allow_empty = False`, to raise 404 errors on empty querysets. allow_empty = self.allow_empty if not allow_empty and records_count == 0: error_args = {'class_name': self.__class__.__name__} raise Http404(self.empty_error % error_args) if 'HTTP_RANGE' in self.request.META: token, result_range = self.request.META['HTTP_RANGE'].split("=") if token == self.settings.PAGINATION_RANGE_HEADER_TOKEN: try: ranges = [] records_start, records_end = self.parse_range_header( result_range) for range_start, range_end in zip(records_start, records_end): if range_start is not None and range_start < 0: # Querystes don't support negative indexing (yet?) range_start = records_count + range_start ranges.append((range_start, range_end)) if len(ranges) > 1: raise Exception # currently not available except: return Response( status=status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE, headers=headers) # ranges can be comma seperated, so multiple lists are possibly requested limited_object_list = itertools.chain(*[ self.object_list[record_start:record_end] for record_start, record_end in ranges ]) serializer = self.get_serializer(limited_object_list, many=True) partial_content = True else: return Response( status=status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE, headers=headers) else: # Pagination size is set by the `.paginate_by` attribute, # which may be `None` to disable pagination. page_size = self.get_paginate_by(self.object_list) page_nr = int(self.request.GET.get('page', 0)) if page_size: packed = self.paginate_queryset(self.object_list, page_size) paginator, page, queryset, is_paginated = packed if self.settings.PAGINATION_IN_HEADER: headers['Link'] = headers.get('Link', '') url = self.request and self.request.build_absolute_uri( ) or '' first_url = replace_query_param(url, 'page', 1) last_url = replace_query_param(url, 'page', paginator.num_pages) if len(headers['Link']): headers['Link'] += ', ' headers[ 'Link'] += '<%(url)s>; rel="section"; title="first"' % { 'url': first_url } headers[ 'Link'] += ', <%(url)s>; rel="section"; title="last"' % { 'url': last_url } if page_size and page_nr: if self.settings.PAGINATION_IN_HEADER: ranges = (((page.number - 1) * page_size, page.number * page_size), ) limited_object_list = itertools.chain(*[ self.object_list[record_start:record_end] for record_start, record_end in ranges ]) serializer = self.get_serializer(limited_object_list, many=True) if page.has_other_pages(): url = self.request and self.request.build_absolute_uri( ) or '' if page.has_next(): next_url = replace_query_param( url, 'page', page.next_page_number()) headers[ 'Link'] += ', <%(url)s>; rel="section"; title="next"' % { 'url': next_url } if page.has_previous(): prev_url = replace_query_param( url, 'page', page.previous_page_number()) headers[ 'Link'] += ', <%(url)s>; rel="section"; title="previous"' % { 'url': prev_url } else: serializer = self.get_pagination_serializer(page) else: serializer = self.get_serializer(self.object_list, many=True) headers[ 'Accept-Ranges'] = self.settings.PAGINATION_RANGE_HEADER_TOKEN if partial_content: status_code = status.HTTP_206_PARTIAL_CONTENT # currently just 1 range processable cur_range = ranges[0] headers[ 'Content-Range'] = '%(token)s %(records_start)d-%(records_end)d/%(records_count)d' % { 'token': self.settings.PAGINATION_RANGE_HEADER_TOKEN, 'records_count': records_count, 'records_start': cur_range[0] or 0, 'records_end': min((cur_range[1] - 1) if cur_range[1] is not None else records_count, records_count - 1), } headers[ 'Accept-Ranges'] = self.settings.PAGINATION_RANGE_HEADER_TOKEN headers.update( self.get_response_headers(request, status_code, serializer=serializer)) return Response(serializer.data, status=status_code, headers=headers)
def paginate(self, queryset, request): # first get paging parameters. limit_p = settings.REST_FRAMEWORK.get('PAGINATE_BY_PARAM', 'limit') offset_p = settings.REST_FRAMEWORK.get('PAGINATE_PARAM', 'offset') max_limit = settings.REST_FRAMEWORK.get('MAX_PAGINATE_BY', 500) default_limit = settings.REST_FRAMEWORK.get('PAGINATE_BY', 10) offset = int(request.QUERY_PARAMS.get(offset_p, 0)) limit = int(request.QUERY_PARAMS.get(limit_p, default_limit)) limit = max_limit if limit > max_limit else limit page = queryset[offset:offset+limit] # make sure the end row num is not > the total count of the queryset total_count = queryset.count() if total_count > 0: end_row = offset + limit - 1 end_row = total_count - 1 if end_row > total_count - 1 else end_row else: end_row = None offset = None url = request.build_absolute_uri() # determine the previous and next offsets for the previous and next # pages of results if offset is None or offset == 0: prev_offset = None else: prev_offset = offset - limit if offset - limit >= 0 else 0 if offset is None or end_row == total_count - 1: next_offset = None else: next_offset = offset + limit prev_page = None if prev_offset is not None: prev_page = urllib.unquote(replace_query_param(url, offset_p, prev_offset)) next_page = None if next_offset is not None: next_page = urllib.unquote(replace_query_param(url, offset_p, next_offset)) resource_name = render.underscoreToCamel(self.resource_name) resource_list = self.get_serializer(instance=page, force_refresh=True, context={'request': request, 'view': self}).data # page_data elements dictate what shows up in the API for page-level # metadata page_data = OrderedDict() page_data['totalCount'] = total_count if offset is not None: page_data['startRow'] = offset if end_row is not None: page_data['endRow'] = end_row page_data['_links'] = OrderedDict() page_data['_links']['self'] = {'href': url} if prev_page is not None: page_data['_links']['previous'] = {'href': prev_page} if next_page is not None: page_data['_links']['next'] = {'href': next_page} if resource_list: page_data['_embedded'] = {resource_name: resource_list} return page_data
def add_query_param(url, **query): for key, val in query.items(): url = replace_query_param(url, key, val) return url
def to_native(self, value): if not value.has_previous(): return None page = value.previous_page_number() return replace_query_param('', self.page_field, page)
def _get_page_link(self, value, page): if not value.paginator.count: return None uri = self._get_current_uri() return self._get_link_object(replace_query_param(uri, self.page_field, page))
def get_next_link(self): url = self.request.build_absolute_uri() return replace_query_param( url, self.page_query_param, self.get_page() + 1)
def paginate(self, queryset, request): # first get paging parameters. limit_p = settings.REST_FRAMEWORK.get('PAGINATE_BY_PARAM', 'limit') offset_p = settings.REST_FRAMEWORK.get('PAGINATE_PARAM', 'offset') max_limit = settings.REST_FRAMEWORK.get('MAX_PAGINATE_BY', 500) default_limit = settings.REST_FRAMEWORK.get('PAGINATE_BY', 10) offset = int(request.QUERY_PARAMS.get(offset_p, 0)) limit = int(request.QUERY_PARAMS.get(limit_p, default_limit)) limit = max_limit if limit > max_limit else limit page = queryset[offset:offset + limit] # make sure the end row num is not > the total count of the queryset total_count = queryset.count() if total_count > 0: end_row = offset + limit - 1 end_row = total_count - 1 if end_row > total_count - 1 else end_row else: end_row = None offset = None url = request.build_absolute_uri() # determine the previous and next offsets for the previous and next # pages of results if offset is None or offset == 0: prev_offset = None else: prev_offset = offset - limit if offset - limit >= 0 else 0 if offset is None or end_row == total_count - 1: next_offset = None else: next_offset = offset + limit prev_page = None if prev_offset is not None: prev_page = urllib.unquote( replace_query_param(url, offset_p, prev_offset)) next_page = None if next_offset is not None: next_page = urllib.unquote( replace_query_param(url, offset_p, next_offset)) resource_name = render.underscoreToCamel(self.resource_name) resource_list = self.get_serializer(instance=page, force_refresh=True, context={ 'request': request, 'view': self }).data # page_data elements dictate what shows up in the API for page-level # metadata page_data = OrderedDict() page_data['totalCount'] = total_count if offset is not None: page_data['startRow'] = offset if end_row is not None: page_data['endRow'] = end_row page_data['_links'] = OrderedDict() page_data['_links']['self'] = {'href': url} if prev_page is not None: page_data['_links']['previous'] = {'href': prev_page} if next_page is not None: page_data['_links']['next'] = {'href': next_page} if resource_list: page_data['_embedded'] = {resource_name: resource_list} return page_data