def render(self, data, media_type=None, renderer_context={}): """ Renders serialized *data* into CSV to be used with Django StreamingHttpResponse. We need to return a generator here, so Django can iterate over it, rendering and returning each line. >>> renderer = CSVStreamingRenderer() >>> renderer.headers = ['a', 'b'] >>> data = [{'a': 1, 'b': 2}] >>> from django.http import StreamingHttpResponse >>> response = StreamingHttpResponse(renderer.render(data), content_type='text/csv') >>> response['Content-Disposition'] = 'attachment; filename="f.csv"' >>> # return response """ if data is None: yield '' self.labels = renderer_context.get('labels', self.labels) if not isinstance(data, list): data = [data] table = self.tablize(data) csv_buffer = Echo() csv_writer = csv.writer(csv_buffer) for row in table: # Assume that strings should be encoded as UTF-8 yield csv_writer.writerow([ elem.encode('utf-8') if isinstance(elem, text_type) and PY2 else elem for elem in row ])
def render(self, data, media_type=None, renderer_context={}): """ Renders serialized *data* into CSV to be used with Django StreamingHttpResponse. We need to return a generator here, so Django can iterate over it, rendering and returning each line. >>> renderer = CSVStreamingRenderer() >>> renderer.header = ['a', 'b'] >>> data = [{'a': 1, 'b': 2}] >>> from django.http import StreamingHttpResponse >>> response = StreamingHttpResponse(renderer.render(data), content_type='text/csv') >>> response['Content-Disposition'] = 'attachment; filename="f.csv"' >>> # return response """ if data is None: yield '' self.labels = renderer_context.get('labels', self.labels) if not isinstance(data, list): data = [data] writer_opts = renderer_context.get('writer_opts', self.writer_opts or {}) header = renderer_context.get('header', self.header) labels = renderer_context.get('labels', self.labels) encoding = renderer_context.get('encoding', settings.DEFAULT_CHARSET) table = self.tablize(data, header=header, labels=labels) csv_buffer = Echo() csv_writer = csv.writer(csv_buffer, encoding=encoding, **writer_opts) for row in table: yield csv_writer.writerow(row)
def render( self, data: dict, media_type: str = None, renderer_context: dict = dict): """ Prepare and render response """ try: queryset = data['queryset'] serializer = data['serializer'] context = data['context'] except KeyError: yield None else: csv_buffer = Echo() csv_writer = csv.writer(csv_buffer) header_fields = list() for item in queryset: # yield the headers if not header_fields: header_fields = list( serializer(item, context=context).fields) yield csv_writer.writerow(header_fields) # yield the actual data items = serializer(item, context=context).data ordered = [items[column] for column in header_fields] yield csv_writer.writerow(ordered)
def render(self, data, media_type=None, renderer_context={}): queryset = data['queryset'] serializer = data['serializer'] context = data['context'] csv_buffer = Echo() csv_writer = csv.writer(csv_buffer) # Header row if queryset.count(): yield csv_writer.writerow(self.header_fields.values()) # Need to efficiently page through querysets if isinstance(queryset, django.db.models.query.QuerySet): queryset = batched_queryset(queryset, chunksize=25) else: # This should be build into LazySearch object, but it's not... queryset = LazySearchIterator(queryset) # Data rows for item in queryset: items = serializer(item, context=context).data # Sort by `header_fields` ordering ordered = [items[column] for column in self.header_fields] yield csv_writer.writerow([ elem.encode('utf-8') if isinstance(elem, six.text_type) and six.PY2 else elem for elem in ordered ])
def render(self, data, media_type=None, renderer_context=None, writer_opts=None): if data is None: yield '' if not isinstance(data, list): data = data.get(self.results_field, data) if not isinstance(data, list): data = [data] header = renderer_context.get('header', self.header) labels = renderer_context.get('labels', self.labels) writer_opts = renderer_context.get( 'writer_opts', writer_opts or self.writer_opts or {}) request = renderer_context.get('request') header = [ field for field in request.query_params.get('fields', '').split(',') if field ] or header import csv csv_writer = csv.writer(Echo(), **writer_opts) for row in self.tablize(data, header=header, labels=labels): yield csv_writer.writerow([elem for elem in row])
def render(self, data, renderer_context={}, *args, **kwargs): csv_buffer = Echo() csv_writer = csv.writer(csv_buffer, quoting=csv.QUOTE_ALL) queryset = data['queryset'] serializer = data['serializer'] page_range = None limits = renderer_context.get('limits') if limits and len(limits) == 2: from_item = limits[0] // 50 + 1 or 1 to_item = limits[1] // 50 if to_item % 50: to_item += 1 page_range = range(from_item, to_item) elif limits and len(limits) == 1: to_item = limits[0] // 50 + 1 if limits[0] % 50: to_item += 1 page_range = range(1, to_item) paginator = BatchPaginator(queryset, 50, page_range=page_range) # rendering the header or label field was taken from the tablize # method in django rest framework csv header = renderer_context.get('header', self.header) labels = renderer_context.get('labels', self.labels) if labels: yield csv_writer.writerow([labels.get(x, x) for x in header]) elif header: yield csv_writer.writerow(header) for page in paginator.page_range: serialized = serializer(paginator.page(page).object_list, many=True).data # we use the tablize function on the parent class to get a # generator that we can use to yield a row table = self.tablize( serialized, header=header, labels=labels, ) # we want to remove the header from the tablized data so we use # islice to take from 1 to the end of generator for row in itertools.islice(table, 1, None): yield csv_writer.writerow(row)
def get(self, request, domain): """Stream the CSV from EBI Search """ page_size = 100 if "total" not in request.GET: return HttpResponseBadRequest( content="Missing 'total' pages params.", status=status.HTTP_400_BAD_REQUEST ) total = int(request.GET.get("total")) total_pages = math.ceil(total / page_size) fields = request.GET.get("fields", "").split(",") base = settings.EBI_SEARCH_URL + domain csv_buffer = Echo() csv_writer = csv.writer(csv_buffer) query = { "format": "json", "fields": request.GET.get("fields", ""), "query": request.GET.get("query", ""), "facets": request.GET.get("facets", "") } def get_data(): # header yield fields for page in range(total_pages + 1): query["size"] = page_size start = page * page_size if start >= total: return query["start"] = start response = requests.get(base, params=query) if not response.ok: raise Exception("There was an error downloading the data from EBI Search" + "Status Code: " + str(response.status_code) + " Content: " + response.content) else: data = response.json() for entry in data.get("entries"): yield emg_utils.parse_ebi_search_entry(entry, fields) stream_res = StreamingHttpResponse((csv_writer.writerow(row) for row in get_data()), content_type="text/csv") stream_res["Content-Disposition"] = "attachment; filename=search_download.csv" return stream_res
def render(self, data, media_type=None, renderer_context={}): csv_buffer = Echo() csv_writer = csv.writer(csv_buffer) if isinstance(data, ReturnDict): yield csv_writer.writerow(data.keys()) flat_row = self.flatten([data[column] for column in data.keys()]) yield csv_writer.writerow(flat_row) return try: queryset = data['queryset'] serializer = data['serializer'] context = data['context'] except KeyError: return None if isinstance(queryset, BaseList): # Handle SortedListField in the AnnotationModels # TODO: pending review as this has no pagination at the moment if not queryset: return None header_fields = list( serializer(queryset[0], context=context).fields) yield csv_writer.writerow(header_fields) for item in queryset: items = serializer(item, context=context).data ordered = [items[column] for column in header_fields] yield csv_writer.writerow(self.flatten(ordered)) return total = queryset.count() page_size = 25 header_fields = list( serializer(queryset.first(), context=context).fields) yield csv_writer.writerow(header_fields) for page in range(0, math.ceil(total / page_size)): for item in queryset[page * page_size:(page + 1) * page_size]: items = serializer(item, context=context).data ordered = [items[column] for column in header_fields] yield csv_writer.writerow(self.flatten(ordered))
def render(self, data, fieldnames): writer = csv.DictWriter(Echo(), fieldnames=fieldnames, dialect="excel", delimiter=";") yield writer.writerow({name: name for name in fieldnames}) for obj in data: yield writer.writerow(obj)