def test_overlap(self): kwargs = { 'count': 100, 'offset': 50, 'buf_size': 10, 'object_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'per_page': 2 } bp = BufferedPaginator(**kwargs) # use paginator's buf_size self.assertEqual(bp.get_overlap(45), (True, (45, 5), (None, None))) self.assertEqual(bp.get_overlap(47), (True, (47, 3), (None, None))) self.assertEqual(bp.get_overlap(55), (True, (None, None), (61, 5))) self.assertEqual(bp.get_overlap(52), (True, (None, None), (61, 2))) self.assertEqual(bp.get_overlap(20), (False, (20, 10), (None, None))) self.assertEqual(bp.get_overlap(70), (False, (70, 10), (None, None))) # explicit buf_size self.assertEqual(bp.get_overlap(47, 14), (True, (47, 3), (61, 1))) self.assertEqual(bp.get_overlap(20, 100), (True, (20, 30), (61, 60))) self.assertEqual(bp.get_overlap(55, 12), (True, (None, None), (61, 7))) self.assertEqual(bp.get_overlap(20, 8), (False, (20, 8), (None, None))) self.assertEqual(bp.get_overlap(70, 3), (False, (70, 3), (None, None)))
def test_base(self): kwargs = { 'count': 100, 'offset': 0, 'buf_size': 10, 'object_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'per_page': 2 } bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 50) self.assertEqual(bp.cached_page_indices(), (1, 6)) self.assertEqual(bp.cached_pages(), 5) self.assertTrue(bp.page(2).in_cache()) self.assertFalse(bp.page(6).in_cache())
def test_base(self): kwargs = { "count": 100, "offset": 0, "buf_size": 10, "object_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "per_page": 2, } bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 50) self.assertEqual(bp.cached_page_indices(), (1, 6)) self.assertEqual(bp.cached_pages(), 5) self.assertTrue(bp.page(2).in_cache()) self.assertFalse(bp.page(6).in_cache())
def test_partial(self): kwargs = { 'count': 20, 'offset': 0, 'buf_size': 10, 'object_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'per_page': 40 } bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 1) self.assertEqual(bp.cached_page_indices(), (1, 2)) self.assertEqual(bp.cached_pages(), 1) p1 = bp.page(1) self.assertTrue(p1.in_cache()) self.assertEqual((p1.start_index(), p1.end_index()), (1, 20)) self.assertEqual(p1.object_list, kwargs['object_list']) kwargs['offset'] = 10 bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 1) self.assertEqual(bp.cached_page_indices(), (0, 0)) self.assertEqual(bp.cached_pages(), 0)
def test_partial(self): kwargs = { "count": 20, "offset": 0, "buf_size": 10, "object_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "per_page": 40, } bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 1) self.assertEqual(bp.cached_page_indices(), (1, 2)) self.assertEqual(bp.cached_pages(), 1) p1 = bp.page(1) self.assertTrue(p1.in_cache()) self.assertEqual((p1.start_index(), p1.end_index()), (1, 20)) self.assertEqual(p1.object_list, kwargs["object_list"]) kwargs["offset"] = 10 bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 1) self.assertEqual(bp.cached_page_indices(), (0, 0)) self.assertEqual(bp.cached_pages(), 0)
def get(self, request): params = self.get_params(request) # GET param to explicitly export the data export_type = params.get('export') # Attempt to get the appropriate view and context objects view = self.get_view(request) context = self.get_context(request) view_node = view.node() # Special case for browser-based consumption if not export_type: exporter_class = JSONHTMLExporter exporter = exporter_class(view_node.columns) page = params.get('page') try: per_page = int(params.get('per_page')) except (ValueError, TypeError): per_page = self.param_defaults['per_page'] # For new contexts, `count` will be `None` if context.count is None: context.count = context.apply().distinct().count() context.save() paginator = BufferedPaginator(context.count, per_page=per_page) try: page = paginator.page(page) except PageNotAnInteger: page = paginator.page(1) except EmptyPage: page = paginator.page(paginator.num_pages) # Get the current offset offset = page.offset() # Build the queryset queryset = view.apply(context.apply()).distinct() # Slice and prepare as a raw query (note: this is a # ModelTreeQuerySet method and is not built-in to Django) iterator = queryset[offset:offset + per_page].raw() # Insert formatter to process the primary key as a raw value pk_name = queryset.model._meta.pk.name exporter.params.insert(0, (RawFormatter(keys=[pk_name]), 1)) # Build up the header values header = [] ordering = OrderedDict(view_node.ordering) for concept in view_node.columns: obj = {'id': concept.id, 'name': concept.name} if concept.id in ordering: obj['direction'] = ordering[concept.id] header.append(obj) objects = [] for row in exporter.read(iterator): pk = None values = [] for i, output in enumerate(row): if i == 0: pk = output[pk_name] else: values.extend(output.values()) objects.append({'pk': pk, 'values': values}) # Various other attributes about the model model_meta = queryset.model._meta model_name = model_meta.verbose_name.format() model_name_plural = model_meta.verbose_name_plural.format() data = { 'keys': header, 'objects': objects, 'object_name': model_name, 'object_name_plural': model_name_plural, 'num_pages': paginator.num_pages, 'page_num': page.number, } # Augment previous and next page links if other pages exist links = { 'self': { 'rel': 'self', 'href': reverse('serrano:exporter') + '?page=' + \ str(page.number), }, 'base': { 'rel': 'base', 'href': reverse('serrano:exporter'), } } if page.number != 1: links['prev'] = { 'rel': 'prev', 'href': reverse('serrano:exporter') + '?page=' + \ str(page.number - 1), } if page.number < paginator.num_pages - 1: links['next'] = { 'rel': 'next', 'href': reverse('serrano:exporter') + '?page=' + \ str(page.number + 1), } data['_links'] = links return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json') # Handle an explicit export type to a file resp = HttpResponse() if export_type not in EXPORT_TYPES: types = ', '.join(EXPORT_TYPES) resp.content = "Export type '{0}' is not supported. Choose one " \ "of the following: {1}".format(export_type, types) resp.status_code = codes.unprocessable_entity return resp exporter_class = exporters[export_type] exporter = exporter_class(view_node.columns) queryset = view.apply(context.apply(), include_pk=False).distinct() iterator = queryset.raw() file_extension = exporter_class.file_extension filename = '{0}-data.{1}'.format(datetime.now(), exporter_class.file_extension) if file_extension == 'zip': zipball = exporter.write(iterator) request.content = zipball else: exporter.write(iterator, resp) if file_extension == 'zip': content_type = 'application/zip' elif file_extension == 'xlsx': content_type = 'application/vnd.ms-excel' elif file_extension == 'csv': content_type = 'text/csv' elif file_extension == 'json': content_type = 'application/json' else: content_type = 'text/plain' resp['Content-Disposition'] = 'attachment; filename={0}'.format(filename) resp['Content-Type'] = content_type return resp
def get(self, request): params = self.get_params(request) # GET param to explicitly export the data export_type = params.get('export') # Attempt to get the appropriate view and context objects view = self.get_view(request) context = self.get_context(request) view_node = view.node() # Special case for browser-based consumption if not export_type: exporter_class = JSONHTMLExporter exporter = exporter_class(view_node.columns) page = params.get('page') try: per_page = int(params.get('per_page')) except (ValueError, TypeError): per_page = self.param_defaults['per_page'] # For new contexts, `count` will be `None` if context.count is None: context.count = context.apply().distinct().count() context.save() paginator = BufferedPaginator(context.count, per_page=per_page) try: page = paginator.page(page) except PageNotAnInteger: page = paginator.page(1) except EmptyPage: page = paginator.page(paginator.num_pages) # Get the current offset offset = page.offset() # Build the queryset queryset = view.apply(context.apply()).distinct() # Slice and prepare as a raw query (note: this is a # ModelTreeQuerySet method and is not built-in to Django) iterator = queryset[offset:offset + per_page].raw() # Insert formatter to process the primary key as a raw value pk_name = queryset.model._meta.pk.name exporter.params.insert(0, (RawFormatter(keys=[pk_name]), 1)) # Build up the header values header = [] ordering = OrderedDict(view_node.ordering) for concept in view_node.columns: obj = {'id': concept.id, 'name': concept.name} if concept.id in ordering: obj['direction'] = ordering[concept.id] header.append(obj) objects = [] for row in exporter.read(iterator): pk = None values = [] for i, output in enumerate(row): if i == 0: pk = output[pk_name] else: values.extend(output.values()) objects.append({'pk': pk, 'values': values}) # Various other attributes about the model model_meta = queryset.model._meta model_name = model_meta.verbose_name.format() model_name_plural = model_meta.verbose_name_plural.format() data = { 'keys': header, 'objects': objects, 'object_name': model_name, 'object_name_plural': model_name_plural, 'num_pages': paginator.num_pages, 'page_num': page.number, } # Augment previous and next page links if other pages exist links = { 'self': { 'rel': 'self', 'href': reverse('serrano:exporter') + '?page=' + \ str(page.number), }, 'base': { 'rel': 'base', 'href': reverse('serrano:exporter'), } } if page.number != 1: links['prev'] = { 'rel': 'prev', 'href': reverse('serrano:exporter') + '?page=' + \ str(page.number - 1), } if page.number < paginator.num_pages - 1: links['next'] = { 'rel': 'next', 'href': reverse('serrano:exporter') + '?page=' + \ str(page.number + 1), } data['_links'] = links return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json') # Handle an explicit export type to a file resp = HttpResponse() if export_type not in EXPORT_TYPES: types = ', '.join(EXPORT_TYPES) resp.content = "Export type '{0}' is not supported. Choose one " \ "of the following: {1}".format(export_type, types) resp.status_code = codes.unprocessable_entity return resp exporter_class = exporters[export_type] exporter = exporter_class(view_node.columns) queryset = view.apply(context.apply(), include_pk=False).distinct() iterator = queryset.raw() file_extension = exporter_class.file_extension filename = '{0}-data.{1}'.format(datetime.now(), exporter_class.file_extension) if file_extension == 'zip': zipball = exporter.write(iterator) request.content = zipball else: exporter.write(iterator, resp) if file_extension == 'zip': content_type = 'application/zip' elif file_extension == 'xlsx': content_type = 'application/vnd.ms-excel' elif file_extension == 'csv': content_type = 'text/csv' elif file_extension == 'json': content_type = 'application/json' else: content_type = 'text/plain' resp['Content-Disposition'] = 'attachment; filename={0}'.format( filename) resp['Content-Type'] = content_type return resp
def test_offset(self): kwargs = { 'count': 100, 'offset': 40, 'buf_size': 10, 'object_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'per_page': 2 } bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 50) self.assertEqual(bp.cached_page_indices(), (21, 26)) self.assertEqual(bp.cached_pages(), 5) self.assertFalse(bp.page(20).in_cache()) self.assertTrue(bp.page(21).in_cache()) self.assertFalse(bp.page(26).in_cache()) # try as a negative offset kwargs['offset'] = -60 bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 50) self.assertEqual(bp.cached_page_indices(), (21, 26)) self.assertEqual(bp.cached_pages(), 5) self.assertFalse(bp.page(20).in_cache()) self.assertTrue(bp.page(21).in_cache()) self.assertFalse(bp.page(26).in_cache())
def test_offset(self): kwargs = { "count": 100, "offset": 40, "buf_size": 10, "object_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "per_page": 2, } bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 50) self.assertEqual(bp.cached_page_indices(), (21, 26)) self.assertEqual(bp.cached_pages(), 5) self.assertFalse(bp.page(20).in_cache()) self.assertTrue(bp.page(21).in_cache()) self.assertFalse(bp.page(26).in_cache()) # try as a negative offset kwargs["offset"] = -60 bp = BufferedPaginator(**kwargs) self.assertEqual(bp.num_pages, 50) self.assertEqual(bp.cached_page_indices(), (21, 26)) self.assertEqual(bp.cached_pages(), 5) self.assertFalse(bp.page(20).in_cache()) self.assertTrue(bp.page(21).in_cache()) self.assertFalse(bp.page(26).in_cache())