def livegeojson(request): if request.is_ajax(): message = StreamingHttpResponse(staticfiles_storage.open('live.geojson'), content_type="application/json") return message else: message = StreamingHttpResponse(staticfiles_storage.open('live.geojson'), content_type="application/json") return message
def stream_video(request, path): range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) path = os.path.join(settings.MEDIA_ROOT, path) try: size = os.path.getsize(path) except FileNotFoundError: raise exceptions.NotFound('File not found') content_type, encoding = mimetypes.guess_type(path) content_type = content_type or 'application/octet-stream' if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 response = StreamingHttpResponse(RangeFileWrapper(open(path, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type) response['Content-Length'] = str(length) response['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: response = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type) response['Content-Length'] = str(size) response['Accept-Ranges'] = 'bytes' return response
def download(request, name): try: request_headers = {} for key in ACCEPT_HEADERS: meta_key = key.replace('-', '_').upper() if meta_key in request.META: request_headers[key] = request.META[meta_key] f = default_storage.open(name) headers, data = f.connection.get_object( f.container_name, f.name, resp_chunk_size=1024 * 1024, headers=request_headers ) response = StreamingHttpResponse(data) for item in headers.viewitems(): response[item[0]] = item[1] except BaseException as e: response = HttpResponse() if hasattr(e, 'http_status'): response.status_code = e.http_status else: response.status_code = 500 logger.exception(e) return response
def get(self, request): url = request.GET.get('path') video = url.split('/')[-1] path = '/home/bjkim/' + video self.s3_client.download_file(AWS_STORAGE_BUCKET_NAME, video, path) range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) size = os.path.getsize(path) content_type, encoding = mimetypes.guess_type(path) content_type = content_type or 'application/octet-stream' if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 resp = StreamingHttpResponse(RangeFileWrapper(open(path, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type) resp['Content-Length'] = str(length) resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type) resp['Content-Length'] = str(size) resp['Accept-Ranges'] = 'bytes' return resp
def get_file(request): path = request.GET['path'] if not is_allowed_file(path): return HttpResponse(status=404) response = StreamingHttpResponse() response['Accept-Ranges'] = 'bytes' if not os.path.exists(path.encode('utf-8')): response.status_code = 404 return response if '.flac' in path.lower(): response['Content-Type'] = 'audio/flac' elif '.mp3' in path.lower(): response['Content-Type'] = 'audio/mpeg' response['Content-Length'] = os.path.getsize(path.encode('utf-8')) if request.method == 'HEAD': print 'head' return response file = open(path.encode('utf-8'), 'rb') response.streaming_content = apply_range(request, response, file) return response
def stream_data(request, path, live=False): range_match = None if not live: range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) size = os.path.getsize(path) content_type, encoding = mimetypes.guess_type(path) content_type = content_type or 'application/octet-stream' if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 resp = StreamingHttpResponse(RangeFileWrapper(open(path, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type) # resp['Content-Length'] = str(length) resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: resp = StreamingHttpResponse(live_read(open(path, 'rb')), content_type=content_type) resp['Accept-Ranges'] = 'bytes' return resp
def list(self, request, *args, **kwargs): range_re = re.compile(r'bytes\s*=\s*(\d+)\s*-\s*(\d*)', re.I) m = self.queryset.get(id=self.kwargs.get('mission_uuid')) range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) file_path = m.mission_file.mission_file.path size = os.path.getsize(file_path) content_type = mimetypes.guess_type(file_path)[0] if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 resp = StreamingHttpResponse(RangeFileWrapper(open( file_path, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type) resp['Content-Length'] = str(length) resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: resp = StreamingHttpResponse(FileWrapper(open(file_path, 'rb')), content_type=content_type) resp['Content-Length'] = str(size) resp['Accept-Ranges'] = 'bytes' resp[ 'Content-Disposition'] = f'inline; filename={m.mission_file.mission_file.name.split("/")[1]}' return resp
def infinite(request: HttpRequest): def iter(): while True: yield '\r\n\r\nz' * 10000 h = StreamingHttpResponse(iter()) h.status_code = 303 h['Location'] = '/normal/' return h
def process_view(self, request, view_func, view_args, view_kwargs): """ Check if response has information about regular pending task. Loop and check for task result if task_id of a PENDING task is found. """ # Ignore socket.io requests if not request.path.startswith('/api/'): return None response = view_func(request, *view_args, **view_kwargs) # Valid task response is always a TaskResponse objects if not isinstance(response, TaskResponse): return response # api.task.views.task_status should immediately show task status if response.task_status: return response # Only if task/status is PENDING if response.status_code != HTTP_201_CREATED: return response # We need the task_id # noinspection PyBroadException try: task_id = response.data['task_id'] except Exception: return response # This should never happen (Dummy task has it's own Response class) if is_dummy_task(task_id): return response # Use streaming only if client is es or es compatible stream = request.META.get('HTTP_ES_STREAM', None) if stream: # Let's render the pending response as it sets some headers (Content-type) pending_response = response.rendered_content # Switch to streaming response stream_res = StreamingHttpResponse(task_status_loop( request, pending_response, task_id, stream=stream), status=HTTP_201_CREATED) # Copy headers # noinspection PyProtectedMember stream_res._headers = response._headers # Set custom es_stream header => es will process the stream correctly stream_res['es_stream'] = bool(stream) stream_res['es_task_id'] = task_id return stream_res else: return response
def get(self, request): resp = StreamingHttpResponse() command_kwargs = { 'shell':False, 'env':None, 'cwd':None } admin_script = getattr(settings,"ADMIN_SCRIPT", {}) command = admin_script.get('args',"") if not command: return resp command_kwargs.update(admin_script, stdout=PIPE, stderr=STDOUT, bufsize=0, close_fds=True, preexec_fn=os.setsid ) resp['Connection'] = "Keep-Alive" doc_start = [ '<!DOCTYPE html>', '<html lang="en">', '<head>', '<meta charset="utf-8">', '<title>output</title>', '<style>body {font-family: monospace; white-space: pre;}</style>', '</head>', '<body>', ] doc_end = [ '<script>parent.done();</script>', '</html>', '</body>' ] scroll_to_bottom = '<script type="text/javascript">window.scrollBy(0,50);</script>' process = Popen(**command_kwargs) # Save the pid in the user's session (a thread-safe place) request.session['pid'] = process.pid def read_output(): for line in iter(process.stdout.readline, b''): yield "%s%s" %(line, scroll_to_bottom) resp.streaming_content = itertools.chain(doc_start, read_output(), doc_end) return resp
def openStream(request, slug): #get file SQL object indexObject = None bsonDocumentKey = None try: indexObject = IndexObject.objects.get(id=slug) bsonDocumentKey = indexObject.fileReference except exceptions.ObjectDoesNotExist: return HttpResponse("not found") except: return HttpResponse("500") #get the file fileToStream = None try: fileToStream = mongoGetFile(bsonDocumentKey) except: return HttpResponse("not found") #stream the file filename = indexObject.name range_header = request.META.get('HTTP_RANGE', '').strip() range_match = range_re.match(range_header) # calculate the size fileToStream.seek(0, 2) size = fileToStream.tell() fileToStream.seek(0) #set the content type content_type, encoding = mimetypes.guess_type(filename) content_type = content_type or 'application/octet-stream' if range_match: first_byte, last_byte = range_match.groups() first_byte = int(first_byte) if first_byte else 0 last_byte = int(last_byte) if last_byte else size - 1 if last_byte >= size: last_byte = size - 1 length = last_byte - first_byte + 1 resp = StreamingHttpResponse(RangeFileWrapper(fileToStream, offset=first_byte, length=length), status=206, content_type=content_type) resp['Content-Length'] = str(length) resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size) else: resp = StreamingHttpResponse(FileWrapper(fileToStream), content_type=content_type) resp['Content-Length'] = str(size) resp['Accept-Ranges'] = 'bytes' return resp
def testjob_csv(request, job): job = get_object_or_404(TestJob, pk=job) check_request_auth(request, job) def testjob_stream(suites, pseudo_buffer): fieldnames = testcase_export_fields() writer = csv.DictWriter(pseudo_buffer, fieldnames=fieldnames) # writer.writeheader does not return the string while writer.writerow # does. Copy writeheader code from csv.py and yield the value. yield writer.writerow(dict(zip(fieldnames, fieldnames))) for test_suite in suites: for test_case in test_suite.testcase_set.all(): yield writer.writerow(export_testcase(test_case)) suites = job.testsuite_set.all().prefetch_related( "test_sets__test_cases__actionlevels" ) pseudo_buffer = StreamEcho() response = StreamingHttpResponse( testjob_stream(suites, pseudo_buffer), content_type="text/csv" ) filename = "lava_%s.csv" % job.id response["Content-Disposition"] = 'attachment; filename="%s"' % filename return response
def get(self, request): if "dataset_id" not in request.query_params: return Response(status=status.HTTP_400_BAD_REQUEST) dataset_id = request.query_params["dataset_id"] dataset = self.gpf_instance.get_wdae_wrapper(dataset_id) if dataset is None or not self.gpf_instance.has_pheno_data(dataset): return Response(status=status.HTTP_404_NOT_FOUND) instrument = request.query_params.get("instrument", None) if not instrument: measure_ids = list(dataset.phenotype_data.measures.keys()) values_iterator = dataset.phenotype_data.get_values_streaming_csv( measure_ids) response = StreamingHttpResponse( values_iterator, content_type="text/csv") else: if instrument not in dataset.phenotype_data.instruments: return Response(status=status.HTTP_404_NOT_FOUND) df = dataset.phenotype_data.get_instrument_values_df(instrument) df_csv = df.to_csv(index=False, encoding="utf-8") response = HttpResponse(df_csv, content_type="text/csv") response["Content-Disposition"] = "attachment; filename=instrument.csv" response["Expires"] = "0" return response
def get(self, request): if "dataset_id" not in request.query_params: return Response(status=status.HTTP_400_BAD_REQUEST) dataset_id = request.query_params["dataset_id"] dataset = self.gpf_instance.get_wdae_wrapper(dataset_id) if dataset is None or not self.gpf_instance.has_pheno_data(dataset): return Response(status=status.HTTP_404_NOT_FOUND) instrument = request.query_params.get("instrument", None) search_term = request.query_params.get("search", None) pheno_instruments = self.gpf_instance.get_instruments(dataset) if instrument and instrument not in pheno_instruments: return Response(status=status.HTTP_404_NOT_FOUND) data = self.gpf_instance.search_measures( dataset, instrument, search_term) response = StreamingHttpResponse( iterator_to_json(data), status=status.HTTP_200_OK, content_type="text/event-stream", ) response["Cache-Control"] = "no-cache" return response
def stats_group_sales_data(request, event, type_id, prices="false"): item_type = ItemType.objects.get(event=event, id=int(type_id)) formatter = stats.SalesData(event=event, as_prices=prices == "true", extra_filter=dict(item__itemtype=item_type)) log_generator = stats.iterate_logs(formatter) return StreamingHttpResponse(log_generator, content_type='text/csv')
def get(self, request, *args, **kwargs): # 只取出unique_code codes = models.SurveyCode.objects.filter(survey=kwargs.get("pk")).values_list("unique_code") codes = list(codes) survey_name = models.Survey.objects.filter(id=kwargs.get("pk")).values_list("name")[0] # 存储到excel book = xlwt.Workbook() table = book.add_sheet("sheet1") table.write(0, 0, "唯一码号") # 迭代遍历,节省内存 for index, code in enumerate(codes, 1): table.write(index, 0, code) book.save("唯一码.xls") # 文件流 def iter_file(path, size=1024): with open(path, 'rb') as f: for data in iter(lambda: f.read(size), b""): yield data response = StreamingHttpResponse(iter_file(os.path.join(settings.BASE_DIR, "唯一码.xls"))) # content-type response['Content-Type'] = 'application/octet-stream' # 内容描述 response['Content-Disposition'] = 'attachment; {}'.format( "filename*={}".format(quote(str(survey_name[0])+"--唯一码.xls")) ) return response
def download_video(request, file_name): if request.session.get('is_logged_in'): user_id = request.session.get('user_ID') file_name = file_name.split("-") print(file_name[0]) print(file_name[1]) show_id = file_name[1] path = "E:\\" + file_name[0] if os.path.exists(path): file_path = path chunk_size = 1024 #PUSH_into_db pushintoDhistory(show_id, user_id) response = StreamingHttpResponse(FileWrapper( open(file_path, 'rb'), chunk_size), content_type="video/mp4") response['Content-Length'] = os.path.getsize(file_path) response[ 'Content-Disposition'] = "attachment; filename=%s" % file_name[ 0] return response else: raise Http404 else: return redirect("http://127.0.0.1:8000/user/login")
def videoStream(request): url = 0 url = "https://192.168.67.206:8080/video" camera = VideoCamera(url) return StreamingHttpResponse( framesGenerator(camera), content_type='multipart/x-mixed-replace; boundary=frame')
def sse(request): def itero(): s = """retry: 2000\n\nevent: message\ndata: gg\n\n""" # data = json.dumps({'time':str(datetime.now())}) db = connectToDB() data = retrieveAllData(db) s = '\n'.join(['retry: 1000', '\n', 'event: message', 'data: %s' % data, '\n']) yield s for i in range(10): s = '\n'.join(['retry: 1000', '\n', 'event: message', 'data: %s' % i, '\n']) for i in range(10): s = '\n'.join(['retry: 100000', '\n', 'event: message', 'data: %s' % i, '\n']) s = 'data:\n\n' s = ':\n\n' if request.META.get('HTTP_ACCEPT') == 'text/event-stream': response = StreamingHttpResponse(itero(), content_type="text/event-stream") else: response = HttpResponse(itero(), content_type="text/event-stream") return response
def csv_stream(filename, headers, data): ''' Stream the given data to the client as a csv This is useful when data is generated and therefore not loaded all at once (e.g. when streaming a very large dataset) if data is all loaded (e.g. a python list) this function has no benefit over the none-streaming response ''' class Echo: ''' Object to implement the file-like API method #write to simply return the given value, as StreamingHttpResponse cannot use file-like objects ''' def write(self, value): return value def stream(headers, data, writer): ''' Generator to yeild first the headers then each data row data may also be generated, allowing the stream to take up little memory ''' if headers: yield writer.writerow(headers) for row in data: yield writer.writerow(row) pseudo_buffer = Echo() writer = csv.writer(pseudo_buffer, csv.excel) response = StreamingHttpResponse(stream(headers, data, writer), content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="%s"' % filename return response
def get(self, *args, **kwargs): _ = args[0] queryset = self.queryset.filter(middle_survey_id=kwargs.get("pk")) xls = xlwt.Workbook(encoding="utf-8", style_compression=2) sheet = xls.add_sheet("唯一码", cell_overwrite_ok=True) for index, code in enumerate(queryset.iterator(), ): sheet.write(index, 0, code.unique_code) xls.save(self.file_name) file_path = os.path.join(settings.BASE_DIR, self.file_name) def iter_file(path, size=1024): with open( path, "rb", ) as f: for data in iter(lambda: f.read(size), b''): yield data response = StreamingHttpResponse(iter_file(file_path)) response['Content-Type'] = 'application/octet-stream' response['Content-Disposition'] = 'attachment; {}'.format( "filename*=utf-8''{}".format(quote(self.file_name))) return response
def login_with_face_part2(request): details = {} if request.POST: try: user = UserProfile.objects.get(user=request.user) gender = user.gender details = { 'gender': gender, 'username': user.user.username, 'unique_id': user.unique_id, 'user': user, } except: details = None return StreamingHttpResponse( gen( RecognizerClass(details, username=user.user.username, unique_id=user.unique_id, request=request)), content_type='multipart/x-mixed-replace; boundary=frame') else: return HttpResponse('shit')
def tarload(request, *args, **kwargs): """ Download group of files in TAR format @param path : Relative path of target files @param files[] : List of files """ path = request.POST.get('path') files = request.POST.getlist('files[]') # 권한 확인 fileManager = CELLAR_FileManager(request) if path is None or not fileManager.isReadable(path) : raise PermissionDenied dirs = path.split("/"); while "" in dirs : dirs.remove("") dirs.reverse() tarStream = TarStream.open(fileManager.getFullPath(path), 128) for file in files : tarStream.add(file) response = StreamingHttpResponse(tarStream, content_type='application/x-tar') response['Content-Length'] = tarStream.getTarSize() response['Content-Disposition'] = 'attachment' return response
def suite_csv_stream(request, job, pk): """ Django is designed for short-lived requests. Streaming responses will tie a worker process for the entire duration of the response. This may result in poor performance. Generally speaking, you should perform expensive tasks outside of the request-response cycle, rather than resorting to a streamed response. https://docs.djangoproject.com/en/1.8/ref/request-response/#django.http.StreamingHttpResponse https://docs.djangoproject.com/en/1.8/howto/outputting-csv/ """ job = get_object_or_404(TestJob, pk=job) test_suite = get_object_or_404(TestSuite, name=pk, job=job) check_request_auth(request, job) querydict = request.GET offset = int(querydict.get("offset", default=0)) limit = int(querydict.get("limit", default=0)) pseudo_buffer = StreamEcho() writer = csv.writer(pseudo_buffer) testcases = get_testcases_with_limit(test_suite, limit, offset) response = StreamingHttpResponse( (writer.writerow(export_testcase(row)) for row in testcases), content_type="text/csv", ) filename = "lava_stream_%s.csv" % test_suite.name response["Content-Disposition"] = 'attachment; filename="%s"' % filename return response
def some_streaming_csv_view(request, file_name): path = 'media/csv_output/{}.csv'.format(file_name) response = StreamingHttpResponse(open(path), content_type='text/csv') response[ 'Content-Disposition'] = 'attachment; filename=' + '{}.csv'.format( file_name) return response
def create_archive_stream(self, items, subdir=None): import zipstream from django.http.response import StreamingHttpResponse from settings.settings import ZIPFILE_SIZE_LIMIT_BYTES from utils import zipdir, get_total_size from os.path import isfile, isdir path = self.get_path() if subdir is None else os.path.join( self.get_path(), subdir) if not os.path.exists(path): raise Exception('Invalid subdirectory provided') share_path = self.get_path() z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) # total_size = get_total_size([os.path.join(path,item) for item in items]) # if total_size > ZIPFILE_SIZE_LIMIT_BYTES: # raise Exception("%d bytes is above bioshare's limit for creating zipfiles, please use rsync or wget instead" % (total_size)) for item in items: item_path = os.path.join(path, item) if not os.path.exists(item_path): raise Exception("File or folder: '%s' does not exist" % (item)) if isfile(item_path): item_name = item #os.path.join(self.id,item) z.write(item_path, arcname=item_name) elif isdir(item_path): zipdir(share_path, item_path, z) from datetime import datetime zip_name = 'archive_' + datetime.now().strftime( '%Y_%m_%d__%H_%M_%S') + '.zip' response = StreamingHttpResponse(z, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename={}'.format( zip_name) return response
def get_xls_response_all_images_to_one_file( data, output_filename, request=None, image_keys=None, title_function=None, list_brackets=None): ''' ''' # using XlsxWriter for constant memory usage max_rows_per_sheet = 2**20 with NamedTemporaryFile(delete=False) as temp_file: logger.info('save to file; %r...', output_filename) xls_write_workbook(temp_file, data, request=request, image_keys=image_keys, title_function=title_function, list_brackets=list_brackets) logger.info('saved temp file for; %r', output_filename) temp_file.seek(0, os.SEEK_END) size = temp_file.tell() temp_file.seek(0) logger.info('stream to response: file: %r...', output_filename) _file = file(temp_file.name) response = StreamingHttpResponse(FileWrapper1(_file)) response['Content-Length'] = size response['Content-Type'] = XLSX_MIMETYPE response['Content-Disposition'] = \ 'attachment; filename=%s.xlsx' % output_filename return response
def list(self, request, *args, **kwargs): queryset = self.filter_queryset(self.get_queryset()) if isinstance(request.accepted_renderer, CSVStreamingRenderer): response = StreamingHttpResponse(request.accepted_renderer.render({ 'queryset': queryset, 'serializer': self.get_serializer_class(), 'context': { 'request': request }, }), content_type='text/csv') try: filename = queryset.model.__name__ except AttributeError: try: filename = queryset._name except AttributeError: filename = queryset._document.__name__ response['Content-Disposition'] = \ 'attachment; filename="{}.csv"'.format(filename) return response page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) return Response(serializer.data)
def post(self, request): data = request.data dataset_id = data.pop("datasetId", None) if dataset_id is None: return Response(status=status.HTTP_400_BAD_REQUEST) dataset = self.gpf_instance.get_wdae_wrapper(dataset_id) if dataset is None: return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) user = request.user handle_partial_permissions(user, dataset_id, data) config = dataset.config.gene_browser freq_col = config.frequency_column variants = dataset.get_gene_view_summary_variants(freq_col, **data) response = StreamingHttpResponse(iterator_to_json(variants), status=status.HTTP_200_OK, content_type="text/event-stream") response["Cache-Control"] = "no-cache" return response
def template_download(request): """ 题库模板下载 :param request: 请求对象 :return: 返回excel文件的数据流 """ uid = request.GET.get('uid', '') # 获取uid try: Profile.objects.get(uid=uid) # 用户信息 except Profile.DoesNotExist: return render(request, 'err.html', ProfileNotFound) def iterator(file_name, chunk_size=512): # chunk_size大小512KB with open(file_name, 'rb') as f: # rb,以字节读取 while True: c = f.read(chunk_size) if c: yield c # 使用yield返回数据,直到所有数据返回完毕才退出 else: break template_path = 'web/static/template/template.xlsx' file_path = os.path.join(settings.BASE_DIR, template_path) # 希望保留题库文件到一个单独目录 if not os.path.exists(file_path): # 路径不存在 return render(request, 'err.html', TemplateNotFound) # 将文件以流式响应返回到客户端。 response = StreamingHttpResponse(iterator(file_path), content_type='application/vnd.ms-excel') response[ 'Content-Disposition'] = 'attachment; filename=template.xlsx' # 格式为xlsx return response
def template_download(request): """ 题库模板下载 :param request: 请求对象 :return: 返回excel文件的数据流 """ uid = request.GET.get('uid', '') try: profile = Profile.objects.get(uid=uid) except Profile.DoesNotExist: return render(request, 'err.html', ProfileNotFound) def iterator(file_name, chunk_size=512): with open(file_name, 'rb') as f: # rb mode while True: c = f.read(chunk_size) if c: yield c else: break template_path = 'web/static/template/template.xlsx' file_path = os.path.join(settings.BASE_DIR, template_path) if not os.path.exists(file_path): return render(request, 'err.html', TemplateNotFound) response = StreamingHttpResponse(iterator(file_path), content_type='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment; filename=template.xlsx' return response
def download(self, request, *args, **kwargs): # 1、从数据库中读取测试报告的HTML源码 instance = self.get_object() # 2、将源码写入到html文件中 # 获取测试报告的存放路径 report_dir = settings.REPORTS_DIR # 生成测试报告的完整路径 report_full_dir = os.path.join(report_dir, instance.name + '.html') # 如果测试报告在reports目录下不存在,那么才生成HTML报告文件 if not os.path.exists(report_full_dir): with open(report_full_dir, 'w') as file: file.write(instance.html) # 3、读写html文件对象,将其传递给StreamingHttpResponse # 第一个参数需要传递生成器对象(每次迭代需要返回文件数据) # 字符串 --> 字节类型 # str.encode('utf-8') # 字节类型 --> 字符串 # byte.decode('utf-8') # one_file_byte = instance.html. # \encode() one_file_byte = instance.html # response = StreamingHttpResponse(get_file_content(report_full_dir)) response = StreamingHttpResponse(iter(one_file_byte)) # 如果要提供用户下载,必须添加相关的响应头 # Content-Type response['Content-Type'] = 'application/octet-stream' # Content-Disposition # response['Content-Disposition'] = f"attachment; filename*=UTF-8''{instance.name}" response[ 'Content-Disposition'] = f"attachment; filename*=UTF-8''{escape_uri_path(instance.name)}" return response
def download(request, name): try: f = default_storage.open(name) headers, data = f.connection.get_object( f.container_name, f.name, resp_chunk_size = 1024 * 1024 ) response = StreamingHttpResponse(data) for item in headers.viewitems(): response[item[0]] = item[1] except BaseException as e: response = HttpResponse() if hasattr(e, 'http_status'): response.status_code = e.http_status else: response.status_code = 500 logger.exception(e) return response