def test_csv(self): d = dict() with open(__file__) as f: for c in re.findall('[A-Z]', f.read()): try: d[c] += 1 except: d[c] = 1 path = PathSpliter(__file__).dirname save_counter_as_csv(d, path + 'counter.csv', ['KEY', 'NUM']) b = b'''a,b,c,d,e,f,g,h,j,k 1,2,3,4,5,6,7,8,9,0 0,9,8,7,6,5,4,3,2,1 1,2,3,4,5,6,7,8,9,0 0,9,8,7,6,5,4,3,2,1 1,2,3,4,5,6,7,8,9,0 6,6,6,6,6,6,6,6,6,6''' csv = CSV() csv.read(BytesIO(b), withhead=1, convert=int) csv.show() csv.read(BytesIO(b), withhead=1, convert=float) print(csv.to_matrix()) csv.write(path + 'test.csv', delim=',', body_format=','.join(['%.2f'] * 10)) m = numpy.array([['a', 'b', 'c'], ['b', 'c', 'd'], ['d', 'e', 'f'], ['a', 'f', 'c'], ['g', 'd', 'e']]) print(indexed_all(m))
def get(self, request, transaction_number, challan_number): r = requests.get(url=SALES_TRANSACTION, params={'transaction_number': transaction_number}) if r.status_code is 200: json_data = r.json() if hasUpdatePurchaseRecordAccess(request.user): item_list = json.loads(requests.get(SALES_ITEM_LIST).text) uom = json.loads(requests.get(UNIT_OF_MEASURE).text) po_line_statuses = json.loads( requests.get(PURCHASE_ORDER_LINES_STATUS).text) po_header_statuses = json.loads( requests.get(PURCHASE_ORDER_HEADER_STATUS).text) po_type = json.loads(requests.get(PURCHASE_ORDER_TYPE).text) supplier_list = json.loads(requests.get(SUPPLIER_LIST).text) data = { 'user': request.user.username, 'po_type': po_type['purchaseOrderType'], 'supplier_list': supplier_list['supplierLists'], 'item_list': item_list['itemDetailsList'], 'uom': uom['UnitOfMeasure'], 'header_status': po_header_statuses['purchaseOrderHeaderStatus'], 'line_status': po_line_statuses['purchaseOrderLineStatus'], 'details': json_data['sales_trx_details'][0] } template = jinja_template.get_template( 'pdf-templates/sales-challan.html') html = template.render(request, data=data) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), response) if not pdf.err: resp = HttpResponse(response.getvalue(), content_type='application/pdf') resp[ 'Content-Disposition'] = 'attachment; filename="SalesChallan.pdf"' return resp else: return HttpResponse("Error Rendering PDF", status=400) else: template = jinja_template.get_template( 'sales/sales-line-view.html') return HttpResponse( template.render(request, data=json_data['purchase_trx_details'][0])) else: template = jinja_template.get_template( 'internal_server_error.html') return HttpResponse(template.render(request))
def content(self): from _io import BytesIO if isfile(self.file_path): with ZipFile(self.file_path, 'r') as zip_ref: file_list = zip_ref.namelist() if len(file_list) > 0: doc_file = zip_ref.open(file_list[0]) return BytesIO(doc_file.read()) return BytesIO(b'')
def _create_dataset(): examples_file, label_file = BytesIO(), BytesIO() examples_file.name = 'examples.csv' label_file.name = 'labels.csv' iris = load_iris() numpy.savetxt(examples_file, iris.data, delimiter=',') numpy.savetxt(label_file, iris.target, delimiter=',') examples_file.seek(0), label_file.seek(0) return examples_file, label_file
def __init__(self, database, language="english", filename="english_lang_cds.txt"): self.database = database self.language = language # load decompression DLL decompDLL = ctypes.CDLL("riftdecomp.dll") stream = BytesIO() self.database.extractByNameToMemory("lang_" + language + ".cds", stream) # seek to start stream.seek(0) dis = LittleEndianDataInputStream(stream) entryCount = dis.read_int(); # read the frequency table frequencyTable = stream.read(1024) print("entryCount:" + str(entryCount)) # not sure what these are for i in range(0, entryCount): key = stream.read(4) value = readUnsignedLeb128(stream) f = open(filename, "w", encoding='UTF-8') for i in range(0, entryCount): compressedSize = readUnsignedLeb128(stream) uncompressedSize = readUnsignedLeb128(stream) entryData = stream.read(compressedSize) # create a buffer to decompress into outputData = ctypes.create_string_buffer(uncompressedSize) # call a DLL to do the actual decompress. The ASM code to decompress was too complicated to reverse engineer, so I just # took the code and put it into a DLL decompDLL.decompressData(frequencyTable, entryData, compressedSize, outputData, uncompressedSize) # And the results are in! # - The first 10 bytes we don't know, they seem to be the same between files though? buffer = BytesIO(outputData.raw) buffer.read(10) # - Then a LEB128 with length of string strLength = readUnsignedLeb128(buffer) # - Then string finalStringBytes = buffer.read(strLength) finalString = finalStringBytes.decode("utf-8") # print("doing entry: " + str(i) + ", length[" + str(strLength) + "]:" + finalString.encode(sys.stdout.encoding, errors="replace").decode(sys.stdout.encoding)) print(finalString,file=f) f.close()
def decompress(frequencyTable, valueData): buffer = BytesIO(valueData) uncompressedSize = readUnsignedLeb128(buffer) compressedSize = len(valueData) - buffer.tell() # create a buffer to decompress into inputData = buffer.read(compressedSize) outputData = ctypes.create_string_buffer(uncompressedSize) decompDLL.decompressData(frequencyTable, inputData, compressedSize, outputData, uncompressedSize) return BytesIO(outputData.raw)
def content(self): from _io import BytesIO file_path = get_user_path("documents", "document_%s" % six.text_type(self.id)) if isfile(file_path): with ZipFile(file_path, 'r') as zip_ref: file_list = zip_ref.namelist() if len(file_list) > 0: doc_file = zip_ref.open(file_list[0]) return BytesIO(doc_file.read()) return BytesIO(b'')
def bytesio_helper(): return (BytesIO(bytearray(b'')), BytesIO(bytearray(b'a')), BytesIO(bytearray(b'ab')), BytesIO(bytearray(b'abc')), BytesIO(bytearray(b'abcd')), BytesIO(bytearray(b'abcde')), BytesIO(bytearray(b'abcdef')), BytesIO(bytearray(b'abcdefg')), BytesIO(bytearray(b'abcdefgh')), BytesIO(bytearray(b'abcdefghi')) )
def test_issue25862(): # CPython issue #25862 # Assertion failures occurred in tell() after read() and write(). from _io import TextIOWrapper, BytesIO t = TextIOWrapper(BytesIO(b'test'), encoding='ascii') t.read(1) t.read() t.tell() t = TextIOWrapper(BytesIO(b'test'), encoding='ascii') t.read(1) t.write('x') t.tell()
async def __call__(self, scope: Scope, receive: Receive, send: Send): if scope["type"] == "lifespan": if (await receive())["type"] == "lifespan.startup": self.executor = ThreadPoolExecutor(max_workers=20, ) await send({'type': 'lifespan.startup.complete'}) if (await receive())["type"] == "lifespan.shutdown": fut = ConcFuture() def _shutdown(): try: self.executor.shutdown() finally: fut.set_result(None) Thread(target=_shutdown).start() await asyncio.wrap_future(fut) await send({'type': 'lifespan.shutdown.complete'}) return if scope["type"] != "http": return input_io = BytesIO() loop = get_running_loop() send_sync = lambda data: asyncio.run_coroutine_threadsafe( send(data), loop=loop).result() server = scope.get("server", ["localhost", 80]) environ = { "REQUEST_METHOD": scope["method"], "SERVER_NAME": server[0], "SERVER_PORT": server[1], "SCRIPT_NAME": "", "PATH_INFO": scope["path"], "QUERY_STRING": scope["query_string"].decode("ascii"), "SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"], "wsgi.version": (1, 0), "wsgi.url_scheme": scope.get("scheme", "http"), "wsgi.input": input_io, "wsgi.errors": sys.stderr, "wsgi.multithread": True, "wsgi.multiprocess": True, "wsgi.run_once": False, } fut = get_running_loop().run_in_executor( self.executor, lambda: self.run_wsgi(environ, send_sync)) task = get_running_loop().create_task( self.run_wsgi_feeder(scope, receive, fut, input_io)) await fut if not task.done(): task.cancel() try: await task except CancelledError: pass
def test_send_with_files(self): file1 = BytesIO(get_binay('blablabla\blabla.')) file2 = open(join(dirname(__file__), 'static', 'lucterios.mailing', 'images', 'config_mail.png'), mode='rb') try: configSMTP('localhost', 1025) self.assertEqual(0, self.server.count()) self.assertEqual(True, will_mail_send()) send_email('*****@*****.**', 'send with files', '2 files sent!', [('filename1.txt', file1), ('filename2.png', file2)]) self.assertEqual(1, self.server.count()) self.assertEqual('*****@*****.**', self.server.get(0)[1]) self.assertEqual(['*****@*****.**'], self.server.get(0)[2]) msg, msg_f1, msg_f2 = self.server.check_first_message( 'send with files', 3) self.assertEqual('text/plain', msg.get_content_type()) self.assertEqual('base64', msg.get('Content-Transfer-Encoding', '')) self.assertEqual('2 files sent!', decode_b64(msg.get_payload())) self.assertEqual(None, self.server.smtp.auth_params) self.assertTrue('filename1.txt' in msg_f1.get('Content-Type', ''), msg_f1.get('Content-Type', '')) self.assertEqual('blablabla\blabla.', decode_b64(msg_f1.get_payload())) self.assertTrue('filename2.png' in msg_f2.get('Content-Type', ''), msg_f2.get('Content-Type', '')) file2.seek(0, SEEK_END) self.assertEqual(file2.tell(), len(b64decode(msg_f2.get_payload()))) finally: file1.close() file2.close()
def parse_data(src): # HTML解析 et = html.fromstring(src) # 整理数据 product_items_list = et.xpath( "//div[@class='list-product']//div[@class='plp-slide']") final_list = [] for i in product_items_list: data = {} data["img_box_src"] = i.xpath(".//div[@class='img-box']//img/@lazysrc") data["img_box_src"] = data["img_box_src"][0] if data[ "img_box_src"] else "" data["goods_tit"] = i.xpath(".//p[@class='goods-tit']/a/text()") data["goods_tit"] = data["goods_tit"][0] if data["goods_tit"] else "" data["goods_introudce"] = i.xpath( ".//p[@class='goods-introudce']/a/text()") data["goods_introudce"] = data["goods_introudce"][0] if data[ "goods_introudce"] else "" goods_classify = i.xpath(".//div[@class='goods-classify']//span") gc_list = data["goods_classify"] = [] for gc in goods_classify: dgc = {} dgc["title"] = gc.xpath("./img/@title") dgc["title"] = dgc["title"][0] if dgc["title"] else "" dgc["title"] = dgc["title"].replace('\xa0', ' ') dgc["code"] = gc.xpath("./@data-code") dgc["code"] = dgc["code"][0] if dgc["code"] else "" dgc["saleprice"] = gc.xpath("./@data-saleprice") dgc["saleprice"] = dgc["saleprice"][0] if dgc["saleprice"] else "" dgc["img_src"] = gc.xpath("./img/@src") dgc["img_src"] = dgc["img_src"][0] if dgc["img_src"] else "" # 解析SKU颜色值 if dgc["img_src"]: req_img = requests.get(dgc["img_src"], verify=False) img_data = req_img.content bio = BytesIO() bio.write(img_data) bio.seek(0) pimg = Image.open(bio) # 读入PIL图像 pimg.thumbnail((1, 1)) # 转换为1x1像素的图片 r, g, b = pimg.getcolors( pimg.size[0] * pimg.size[1])[0][1] # 形式:[(1, (223, 218, 212))] dgc["img_color"] = '#%02x%02x%02x' % (r, g, b) pimg.close() bio.close() else: dgc["img_color"] = "" gc_list.append(dgc) final_list.append(data) return final_list
def create(): totp_ = totp.TOTP(random_base32()) outn = BytesIO() img = make(totp_.provisioning_uri("*****@*****.**",issuer_name = "Cinara-Lyca Network Co.LTD")) img.save(outn) data = standard_b64encode(outn.getvalue()) data_uri = "data:image/png;base64," + data.decode() return jsonify({"qr_code_url":data_uri,"auth_vendor_id":totp_.secret})
def read_tick(filename): if isinstance(filename, BytesIO): gf = GzipFile(fileobj=filename) elif isinstance(filename, bytes): gf = GzipFile(fileobj=BytesIO(filename)) else: gf = GzipFile(filename) return list(select(gf))
def open_as_bytes_stream(filename) -> _io.BufferedReader: """ If file's size < 2GB, read whole file as BytesIO object """ filesize = os.path.getsize(filename) if filesize < MAX_FILE_SIZE_32BIT: with open(filename, 'rb') as f: return BytesIO(f.read(filesize)) else: return open(filename, 'rb', buffering=BEST_IO_BUFFER_SIZE)
def test_read_string(): a = b" test1 test2" with BytesIO(a) as sp: print("first time", utils.read_string(sp)) print("second time", utils.read_string(sp)) print("third time", utils.read_string(sp))
def get(self, request, transaction_number, challan_number): item_list = json.loads(requests.get(PURCHASE_ITEM_LIST).text) uom = json.loads(requests.get(UNIT_OF_MEASURE).text) # po_line_statuses = requests.get(PURCHASE_ORDER_LINES_STATUS) po_receipt_statuses = json.loads( requests.get(PURCHASE_ORDER_HEADER_STATUS).text) receipt_details = json.loads( requests.get(RECEIPT_SEARCH + 'challan_number=' + challan_number).text) if receipt_details['receipt_details'][0]['challan_date']: receipt_details['receipt_details'][0][ 'challan_date'] = receipt_details['receipt_details'][0][ 'challan_date'].split(' ')[0] # po_type = json.loads(requests.get(PURCHASE_ORDER_TYPE).text) # supplier_list = json.loads(requests.get(SUPPLIER_LIST).text) # # data= {'user' : request.user.username, # 'po_type' : po_type['purchaseOrderType'], # 'supplier_list' : supplier_list['supplierLists'], # 'item_list' : item_list['itemDetailsList'], # 'uom' : uom['UnitOfMeasure'] # } data = { 'transaction_number': transaction_number, 'item_list': item_list['itemDetailsList'], 'uom': uom['UnitOfMeasure'], 'po_receipt_statuses': po_receipt_statuses['purchaseOrderHeaderStatus'], 'details': receipt_details['receipt_details'][0] } template = jinja_template.get_template( 'pdf-templates/purchase_challan.html') html = template.render(request, data=data) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("utf-8")), response) if not pdf.err: resp = HttpResponse(response.getvalue(), content_type='application/pdf') resp[ 'Content-Disposition'] = 'attachment; filename="PurchaseChallan.pdf"' return resp else: return HttpResponse("Error Rendering PDF", status=400)
def test_dataset_post_dataset_length_mismatch(self): examples_file, label_file = BytesIO(), BytesIO() examples_file.name = 'examples.csv' label_file.name = 'labels.csv' iris = load_iris() breast_cancer = load_breast_cancer() numpy.savetxt(examples_file, iris.data, delimiter=',') numpy.savetxt(label_file, breast_cancer.target, delimiter=',') examples_file.seek(0), label_file.seek(0) client = DjangoClient() response = client.post(reverse('datasets'), data={ 'dataset': 'TEST', 'file[0]': examples_file, 'file[1]': label_file }) self.assertEqual(400, response.status_code) self.assertEqual(b'"Examples and labels are not the same length"', response.content)
def get(self, request, *args, **kwargs): data = request.query_params.copy() device = app_device(request) if not device: device = data.get('username', '') f = BytesIO() img, code = check_code.create_validate_code() DeivceVcode.objects.update_or_create(device=device, defaults={'vcode': upper(code)}) img.save(f, 'GIF') return HttpResponse(f.getvalue(), content_type='image/gif')
def test4atom_no_match_missing_value_string_set(self): """ This test case sets up a set of values, which are all expected to be matched. The missing value string is set to a value, so when a string does not match this value is used instead. """ description = "Test4MatchValueStreamWriter" output_stream = BytesIO() match_context = MatchContext( b'25537Euro 25538Euro 25539Euro 25540Pfund ') decimal_integer_value_me = DecimalIntegerValueModelElement( 'd1', DecimalIntegerValueModelElement.SIGN_TYPE_NONE, DecimalIntegerValueModelElement.PAD_TYPE_NONE) fixed_dme = FixedDataModelElement('s1', self.euro) sequence_model_element = SequenceModelElement( 'sequence', [decimal_integer_value_me, fixed_dme]) match_value_stream_writer = MatchValueStreamWriter( output_stream, [self.match_sequence_d1, self.match_sequence_s1], b';', b'-') self.analysis_context.register_component(match_value_stream_writer, description) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) match_element = decimal_integer_value_me.get_match_element( 'match', match_context) match_element.path = self.match_sequence_d1 log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) self.assertEqual(output_stream.getvalue().decode(), '25537;Euro \n25538;Euro \n25539;Euro \n25540;-\n')
def __GetIcon(self): icon_png = None icon_jpg = None icon_fallback = None try: iconList = self.FindInDescription('d:device/d:iconList') for icon in iconList: width = int(self.FindInDescription('d:width', icon).text) mimetype = self.FindInDescription('d:mimetype', icon).text if width >= 32 and mimetype == 'image/png': icon_png = icon break elif width >= 32 and mimetype == 'image/jpeg': icon_jpg = icon else: if icon_fallback: fallback_width = int( self.FindInDescription('d:width', icon_fallback).text) if fallback_width < width: icon_fallback = icon else: icon_fallback = icon except Exception as e: self._logger.warning("{}: {}".format(self.name, str(e))) self.__icon = None return if icon_png: icon = icon_png elif icon_jpg: icon = icon_jpg elif icon_fallback: icon = icon_fallback try: icon_url = self.FindInDescription('d:url', icon).text icon_url = self.path_to_url(icon_url) icon_data = urlopen(icon_url).read() sbuf = BytesIO(icon_data) image = wx.Image(sbuf) image = image.Scale(32, 32, wx.IMAGE_QUALITY_BICUBIC) self.__icon = image.ConvertToBitmap() except Exception as e: self._logger.warning("{}: {}".format(self.name, str(e))) self.__icon = wx.Bitmap.FromRGBA(32, 32, 255, 255, 255, 255) return
def sideeffect(url_, timeout=None): try: url = url_.get_full_url() except AttributeError: url = url_ if "geofon" in url: if isinstance(geofon_retval, Exception): raise geofon_retval # pylint: disable=raising-bad-type if geofon_retval is None: with open(os.path.join(DATADIR, "ZE.network.xml")) as opn: return BytesIO(opn.read()) else: return BytesIO(geofon_retval) elif 'doi.org' in url: if isinstance(doicit_retval, Exception): raise doicit_retval # pylint: disable=raising-bad-type if doicit_retval is None: return BytesIO("Marc Smith (2002): A Paper. %s" % url.encode('utf8')) return BytesIO(doicit_retval) else: if isinstance(others_retval, Exception): raise others_retval # pylint: disable=raising-bad-type if others_retval is None: with open(os.path.join(DATADIR, "other_stations.xml")) as opn: return BytesIO(opn.read()) else: return BytesIO(others_retval)
def test2all_atoms_match_no_seperator(self): """ This test case sets up a set of values, which are all expected to be matched. The seperator string is None, so all values are expected to be one string. """ description = "Test2MatchValueStreamWriter" output_stream = BytesIO() match_context = MatchContext( b'25537Euro 25538Euro 25539Euro 25540Euro ') decimal_integer_value_me = DecimalIntegerValueModelElement( 'd1', DecimalIntegerValueModelElement.SIGN_TYPE_NONE, DecimalIntegerValueModelElement.PAD_TYPE_NONE) fixed_dme = FixedDataModelElement('s1', self.euro) sequence_model_element = SequenceModelElement( 'sequence', [decimal_integer_value_me, fixed_dme]) match_value_stream_writer = MatchValueStreamWriter( output_stream, [self.match_sequence_d1, self.match_sequence_s1], b'', b'-') self.analysis_context.register_component(match_value_stream_writer, description) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) match_element = sequence_model_element.get_match_element( 'match', match_context) log_atom = LogAtom(match_context.match_data, ParserMatch(match_element), 1, match_value_stream_writer) match_value_stream_writer.receive_atom(log_atom) self.assertEqual(output_stream.getvalue().decode(), '25537Euro \n25538Euro \n25539Euro \n25540Euro \n')
def show_map(): f = open("map.png", "rb") img = ImageTk.PhotoImage(Image.open(BytesIO(f.read()))) maplabel.config(image=img) maplabel.img = img maplabel.place(x=window.winfo_width() / 2, y=window.winfo_height() / 2, anchor="center") giflabel.place_forget() global check_gif check_gif = False # GIF is no longer being played
async def filter(ctx, arg): sendfile = 0 for file in ctx.message.attachments: sendfile = 1 link = requests.get(file.url) img = Image.open(BytesIO(link.content)) img.save('image.jpg') image = cv2.imread('image.jpg') filename = 'filtered.jpg' if arg == 'blur': blurred = cv2.GaussianBlur(image,(33,33,),0) # blurred = cv2.medianBlur(image,55) cv2.imwrite(filename,blurred) await ctx.message.channel.send(file=discord.File(filename)) elif arg == 'gray': gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.imwrite(filename,gray) await ctx.message.channel.send(file=discord.File(filename)) elif arg == 'edge': gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray,(5,5,),0) edged = cv2.Canny(blurred,85,85) cv2.imwrite(filename,edged) await ctx.message.channel.send(file=discord.File(filename)) elif arg == 'reflect': flip = cv2.flip(image, 1) cv2.imwrite(filename,flip) await ctx.message.channel.send(file=discord.File(filename)) elif arg == 'bright': array = np.array([[0.01, 0.54, 0.9],[0.4, 0.01, 0.4],[0.01, 0.2, 0.01]]) bright = cv2.filter2D(image, -1, array) cv2.imwrite(filename,bright) await ctx.message.channel.send(file=discord.File(filename)) elif arg == '70s': gray = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) cv2.imwrite(filename,gray) await ctx.message.channel.send(file=discord.File(filename)) else: await ctx.message.channel.send('Improper Usage\nFilters: **blur, gray, edge, reflect, bright, 70s**\nFilter Usage Example: *&filter edge*') if sendfile == 0: await ctx.message.channel.send('Improper Usage\nFilters: **blur, gray, edge, reflect, bright, 70s**\nFilter Usage Example: *&filter edge*')
def history_tick_content(contract, date): response = requests.get(hist_tick_url(contract, date), stream=True) if response.status_code == 200: disposition = response.headers['Content-Disposition'] bio = BytesIO(b"") chunk_size = 2**16 with click.progressbar(response.iter_content(chunk_size), label=disposition) as bar: for content in bar: bio.write(content) bio.seek(0) return bio.read() else: raise IOError(response.status_code)
def func4(): """ StringIO顾名思义就是在内存中读写str。 """ f = StringIO("可以这样初始化#\t#\t") # f = StringIO() f.write("HelloWorld!") # 后面写入会覆盖初始化 print(f.getvalue()) # getvalue()方法用于获得写入后的str。 """ StringIO操作的只能是str,如果要操作二进制数据,就需要使用BytesIO """ fb = BytesIO() # f = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')#也可以这样初始化 fb.write("测试中文".encode(encoding='utf_8')) print(fb.getvalue()) pass
def decompress(self, input): header = lz4Header().read(input) table = lz4Table().read(input, header.chunkCount) input.seek(header.headerSize, 0) data = bytes(header.headerSize + sum(chunk.decompressedChunkSize for chunk in table)) memoryStream = BytesIO(data) memoryStream.seek(header.headerSize, 0) for chunk in table: saveChunk = chunk.read(input) #print(saveChunk) memoryStream.write(saveChunk) memoryStream.seek(header.headerSize, 0) return memoryStream
def getXMLDictFromGZCache(filename, documentName): finalFileName = Constant.CACHE_FOLDER + filename[ 0:filename.find(".txt")] + "/" + documentName + ".gz" logging.getLogger( Constant.LOGGER_GENERAL).debug("XML - Processing filename " + finalFileName.replace("//", "/")) file = getBinaryFileFromCache(finalFileName) if (file is not None): with gzip.open(BytesIO(file), 'rb') as f: file_content = f.read() text = file_content.decode("ISO-8859-1") xmlDict = xmltodict.parse(text) return xmlDict else: shortFileName = finalFileName[finalFileName.rfind("/") + 1:len(finalFileName)] raise FileNotFoundException(shortFileName)
def pdf_generation(request,proyID): proyID = int(proyID) us = request.user loadP = is_mine(us.uuid,proyID) if not loadP: #PROJECT ID DOES NOT BELONG TO USER OR (PROY ID SIMPLY DOESN'T EXIST) raise Http404 if loadP.status == 'borrador': #CAN'T PRINT PDFS OF INCOMPLETE PROJECTS raise Http404 fileName = loadP.fecha_envio_a_revision.strftime("%Y%m%d")+str(loadP.id) # Create the HttpResponse object with the appropriate PDF headers. response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="'+fileName+'.pdf"' buffer = BytesIO() report = PdfPrint(buffer, 'Letter') pdf = report.report('POSiBLE - Modelo de Negocio',fileName,loadP) response.write(pdf) return response