def test_streaming_response(self): r = StreamingHttpResponse(iter(['hello', 'world'])) # iterating over the response itself yields bytestring chunks. chunks = list(r) self.assertEqual(chunks, [b'hello', b'world']) for chunk in chunks: self.assertIsInstance(chunk, six.binary_type) # and the response can only be iterated once. self.assertEqual(list(r), []) # even when a sequence that can be iterated many times, like a list, # is given as content. r = StreamingHttpResponse(['abc', 'def']) self.assertEqual(list(r), [b'abc', b'def']) self.assertEqual(list(r), []) # streaming responses don't have a `content` attribute. self.assertFalse(hasattr(r, 'content')) # and you can't accidentally assign to a `content` attribute. with self.assertRaises(AttributeError): r.content = 'xyz' # but they do have a `streaming_content` attribute. self.assertTrue(hasattr(r, 'streaming_content')) # that exists so we can check if a response is streaming, and wrap or # replace the content iterator. r.streaming_content = iter(['abc', 'def']) r.streaming_content = (chunk.upper() for chunk in r.streaming_content) self.assertEqual(list(r), [b'ABC', b'DEF']) # coercing a streaming response to bytes doesn't return a complete HTTP # message like a regular response does. it only gives us the headers. r = StreamingHttpResponse(iter(['hello', 'world'])) self.assertEqual( six.binary_type(r), b'Content-Type: text/html; charset=utf-8') # and this won't consume its content. self.assertEqual(list(r), [b'hello', b'world']) # additional content cannot be written to the response. r = StreamingHttpResponse(iter(['hello', 'world'])) with self.assertRaises(Exception): r.write('!') # and we can't tell the current position. with self.assertRaises(Exception): r.tell() r = StreamingHttpResponse(iter(['hello', 'world'])) self.assertEqual(r.getvalue(), b'helloworld')
def downloadreport(request): filename = report_name # 所生成的word文档需要以.docx结尾,文档格式需要 filepath = path_name template_path = os.getcwd() + '/test.docx' #加载模板文件 template = DocxTemplate(template_path) context = { 'text': '哈哈哈,来啦', 't1': '燕子', 't2': '杨柳', 't3': '桃花', 't4': '针尖', 't5': '头涔涔', 't6': '泪潸潸', 't7': '茫茫然', 't8': '伶伶俐俐', 'picture1': InlineImage(template, img_name, width=Mm(80), height=Mm(60)), } user_labels = ['姓名', '年龄', '性别', '入学日期'] context['user_labels'] = user_labels user_dict1 = {'number': 1, 'cols': ['林小熊', '27', '男', '2019-03-28']} user_dict2 = {'number': 2, 'cols': ['林小花', '27', '女', '2019-03-28']} user_list = [] user_list.append(user_dict1) user_list.append(user_dict2) context['user_list'] = user_list template.render(context) template.save(os.path.join(filepath, filename)) response = StreamingHttpResponse( read_file(os.path.join(filepath, filename), 512)) # response['Content-Type'] = 'application/msword' #msword是输出word文件 response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment;filename="{}"'.format( filename) # time.sleep(10) buffer = BytesIO() p = canvas.Canvas(buffer) p.save() pdf = buffer.getvalue() buffer.close() response.write(pdf) return response
def test_streaming_response(self): r = StreamingHttpResponse(iter(["hello", "world"])) # iterating over the response itself yields bytestring chunks. chunks = list(r) self.assertEqual(chunks, [b"hello", b"world"]) for chunk in chunks: self.assertIsInstance(chunk, six.binary_type) # and the response can only be iterated once. self.assertEqual(list(r), []) # even when a sequence that can be iterated many times, like a list, # is given as content. r = StreamingHttpResponse(["abc", "def"]) self.assertEqual(list(r), [b"abc", b"def"]) self.assertEqual(list(r), []) # iterating over Unicode strings still yields bytestring chunks. r.streaming_content = iter(["hello", "café"]) chunks = list(r) # '\xc3\xa9' == unichr(233).encode('utf-8') self.assertEqual(chunks, [b"hello", b"caf\xc3\xa9"]) for chunk in chunks: self.assertIsInstance(chunk, six.binary_type) # streaming responses don't have a `content` attribute. self.assertFalse(hasattr(r, "content")) # and you can't accidentally assign to a `content` attribute. with self.assertRaises(AttributeError): r.content = "xyz" # but they do have a `streaming_content` attribute. self.assertTrue(hasattr(r, "streaming_content")) # that exists so we can check if a response is streaming, and wrap or # replace the content iterator. r.streaming_content = iter(["abc", "def"]) r.streaming_content = (chunk.upper() for chunk in r.streaming_content) self.assertEqual(list(r), [b"ABC", b"DEF"]) # coercing a streaming response to bytes doesn't return a complete HTTP # message like a regular response does. it only gives us the headers. r = StreamingHttpResponse(iter(["hello", "world"])) self.assertEqual(six.binary_type(r), b"Content-Type: text/html; charset=utf-8") # and this won't consume its content. self.assertEqual(list(r), [b"hello", b"world"]) # additional content cannot be written to the response. r = StreamingHttpResponse(iter(["hello", "world"])) with self.assertRaises(Exception): r.write("!") # and we can't tell the current position. with self.assertRaises(Exception): r.tell() r = StreamingHttpResponse(iter(["hello", "world"])) self.assertEqual(r.getvalue(), b"helloworld")
def test_streaming_response(self): r = StreamingHttpResponse(iter(["hello", "world"])) # iterating over the response itself yields bytestring chunks. chunks = list(r) self.assertEqual(chunks, [b"hello", b"world"]) for chunk in chunks: self.assertIsInstance(chunk, bytes) # and the response can only be iterated once. self.assertEqual(list(r), []) # even when a sequence that can be iterated many times, like a list, # is given as content. r = StreamingHttpResponse(["abc", "def"]) self.assertEqual(list(r), [b"abc", b"def"]) self.assertEqual(list(r), []) # iterating over strings still yields bytestring chunks. r.streaming_content = iter(["hello", "café"]) chunks = list(r) # '\xc3\xa9' == unichr(233).encode() self.assertEqual(chunks, [b"hello", b"caf\xc3\xa9"]) for chunk in chunks: self.assertIsInstance(chunk, bytes) # streaming responses don't have a `content` attribute. self.assertFalse(hasattr(r, "content")) # and you can't accidentally assign to a `content` attribute. with self.assertRaises(AttributeError): r.content = "xyz" # but they do have a `streaming_content` attribute. self.assertTrue(hasattr(r, "streaming_content")) # that exists so we can check if a response is streaming, and wrap or # replace the content iterator. r.streaming_content = iter(["abc", "def"]) r.streaming_content = (chunk.upper() for chunk in r.streaming_content) self.assertEqual(list(r), [b"ABC", b"DEF"]) # coercing a streaming response to bytes doesn't return a complete HTTP # message like a regular response does. it only gives us the headers. r = StreamingHttpResponse(iter(["hello", "world"])) self.assertEqual(bytes(r), b"Content-Type: text/html; charset=utf-8") # and this won't consume its content. self.assertEqual(list(r), [b"hello", b"world"]) # additional content cannot be written to the response. r = StreamingHttpResponse(iter(["hello", "world"])) with self.assertRaises(Exception): r.write("!") # and we can't tell the current position. with self.assertRaises(Exception): r.tell() r = StreamingHttpResponse(iter(["hello", "world"])) self.assertEqual(r.getvalue(), b"helloworld")
def export_data_full(self): # @todo: use cache (already existing files) # @todo: serve ZIP file as StreamingHttpResponse data = _export_data(limit=int(self.GET.get('limit', 999999999)), since=self.GET.get('since', None), to=self.GET.get('to', None)) if self.GET.get('stream', None) is not None: # Save data to local file data_export = json.dumps(data, default=_json_serial) export_filename = 'updates_{}_{}_{}.json'.format( data['export_since'].strftime('%s'), data['export_to'].strftime('%s'), data['last_update'].strftime('%s')) # Check if a dump already exists, otherwise create it export_fp = 'media/data/export/{}'.format(export_filename) if os.path.isfile(export_fp) is False: with open(export_fp, 'w', encoding="utf8") as f: f.write(data_export) # Prepare and serve response response = StreamingHttpResponse( open('media/data/export/{}'.format(export_filename)), status=200, content_type='application/octet-stream') response['Cache-Control'] = 'no-cache' response['Content-Disposition'] = 'attachment; filename={};'.format( export_filename) return response if self.GET.get('f', '').lower() == 'zip': exp_filename = "patrowlhears_datadump_{}".format( datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")[:-3]) in_memory = BytesIO() zip = ZipFile(in_memory, "a") zip.writestr("all.json", json.dumps(data, sort_keys=True, default=_json_serial)) zip.writestr( "vulns.json", json.dumps({'vulns': data['vulns']}, sort_keys=True, default=_json_serial)) zip.writestr( "exploits.json", json.dumps({'exploits': data['exploits']}, sort_keys=True, default=_json_serial)) zip.writestr( "threats.json", json.dumps({'threats': data['threats']}, sort_keys=True, default=_json_serial)) zip.writestr( "kb_cwe.json", json.dumps({'kb_cwe': data['kb_cwe']}, sort_keys=True, default=_json_serial)) zip.writestr( "kb_cpe.json", json.dumps({'kb_cpe': data['kb_cpe']}, sort_keys=True, default=_json_serial)) zip.writestr( "kb_cve.json", json.dumps({'kb_cve': data['kb_cve']}, sort_keys=True, default=_json_serial)) zip.writestr( "kb_vendor.json", json.dumps({'kb_vendor': data['kb_vendor']}, sort_keys=True, default=_json_serial)) zip.writestr( "kb_product.json", json.dumps({'kb_product': data['kb_product']}, sort_keys=True, default=_json_serial)) zip.writestr( "kb_product_version.json", json.dumps({'kb_product_version': data['kb_product_version']}, sort_keys=True, default=_json_serial)) # fix for Linux zip files read in Windows for file in zip.filelist: file.create_system = 0 zip.close() response = HttpResponse(content_type="application/zip") response["Content-Disposition"] = "attachment; filename={}.zip".format( exp_filename) in_memory.seek(0) response.write(in_memory.read()) return response else: return JsonResponse(data, safe=False)