def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response nbytes = len(response.data) start_time = time.time() gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer, compresslevel=1) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() duration = int(1000 * (time.time() - start_time)) print '{duration}ms to gzip {nbytes} bytes'.format(**locals()) response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response
def fetch_structure(pdbid, biounit=False): """Downloads the structure in PDB format from the RCSB PDB website. """ base_url = 'https://files.rcsb.org/download/' pdb_type = '.pdb1' if biounit else '.pdb' pdb_url = base_url + pdbid.lower() + pdb_type + '.gz' try: request = Request(pdb_url) opener = build_opener() url_data = opener.open(request).read() except HTTPError as e: emsg = '[!] Error fetching structure: ({0}) {1}\n' sys.stderr.write(emsg.format(e.code, e.msg)) return else: try: buf = IO(url_data) gz_handle = gzip.GzipFile(fileobj=buf, mode='rb') for line in gz_handle: yield line.decode('utf-8') except IOError as e: emsg = '[!] Error fetching structure: ({0}) {1}\n' sys.stderr.write(emsg.format(e.code, e.msg)) return finally: gz_handle.close()
def zipper(response): if not request.args.get("compress"): return response accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb',fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response
def _fetch_structure(pdbid, biounit=False): """Enclosing logic in a function""" base_url = 'https://files.rcsb.org/download/' pdb_type = '.pdb1' if biounit else '.pdb' pdb_url = base_url + pdbid.lower() + pdb_type + '.gz' try: request = Request(pdb_url) opener = build_opener() url_data = opener.open(request).read() except HTTPError as e: print('[!] Error fetching structure: ({0}) {1}'.format(e.code, e.msg), file=sys.stderr) return else: try: buf = IO(url_data) gz_handle = gzip.GzipFile(fileobj=buf, mode='rb') for line in gz_handle: yield line.decode('utf-8') except IOError as e: print('[!] Error fetching structure: {0}'.format(e.msg), file=sys.stderr) return finally: gz_handle.close()
def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except requests.HTTPError as e: buf = IO(e.response.content) raise transport.TransportError( 'Error in requests\n' + traceback.format_exc(), e.response.status_code, buf, ) except requests.RequestException: buf = IO(traceback.format_exc()) raise transport.TransportError( 'Error in requests\n' + traceback.format_exc(), 000, buf, )
def deserialize(self, value): image = value[value.find(",") + 1:] image = base64.decodestring(image.encode('utf8')) buffer = IO(image) deserialized_image = Image.open(buffer) if deserialized_image.mode != self.get_pil_mode(): deserialized_image = deserialized_image.convert( self.get_pil_mode()) return deserialized_image
def serialize(self, value): if type(value) is np.ndarray: im_pil = Image.fromarray(value) elif issubclass(type(value), Image.Image): im_pil = value else: raise InvalidArgumentError('value is not a PIL or numpy image') buffer = IO() im_pil.save(buffer, format='JPEG') return 'data:image/jpeg;base64,' + base64.b64encode( buffer.getvalue()).decode('utf8')
def deserialize(self, value): try: image = value[value.find(",") + 1:] image = base64.decodestring(image.encode('utf8')) buffer = IO(image) img = Image.open(buffer) if img.mode.startswith('RGB'): return self.colormap_to_segmentation(img) else: return img except: msg = 'unable to parse expected base64-encoded image' raise InvalidArgumentError(msg)
def zip_response(json_data): gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) gzip_file.write(json_data) gzip_file.close() response = HttpResponse(content_type='application/json') response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response
def serialize(self, value): if type(value) is np.ndarray: im_pil = Image.fromarray(value) elif issubclass(type(value), Image.Image): im_pil = value else: raise InvalidArgumentError(self.name, 'value is not a PIL or numpy image') if im_pil.mode == 'L': im_pil = self.segmentation_to_colormap(im_pil) buffer = IO() im_pil.save(buffer, format='PNG') return 'data:image/png;base64,' + base64.b64encode( buffer.getvalue()).decode('utf8')
def run(pdbid, biounit=False): """ Download the structure in PDB format from the RCSB PDB website. This function is a generator. Parameters ---------- pdbid : str The alpha-numeric code of the PBDID. biounit : bool Whether to download biounit version. Yield ----- str (line-by-line) The original PBD data. """ base_url = 'https://files.rcsb.org/download/' pdb_type = '.pdb1' if biounit else '.pdb' pdb_url = base_url + pdbid.lower() + pdb_type + '.gz' try: request = Request(pdb_url) opener = build_opener() url_data = opener.open(request).read() except HTTPError as e: emsg = '[!] Error fetching structure: ({0}) {1}\n' sys.stderr.write(emsg.format(e.code, e.msg)) return else: try: buf = IO(url_data) gz_handle = gzip.GzipFile(fileobj=buf, mode='rb') for line in gz_handle: yield line.decode('utf-8') except IOError as e: emsg = '[!] Error fetching structure: ({0}) {1}\n' sys.stderr.write(emsg.format(e.code, e.msg)) return finally: gz_handle.close()
def serialize(self, value): if type(value) is np.ndarray: im_pil = Image.fromarray(value.astype(np.uint8)) elif issubclass(type(value), Image.Image): im_pil = value else: raise InvalidArgumentError(self.name, 'value is not a PIL or numpy image') buffer = IO() if im_pil.mode != self.get_pil_mode(): im_pil = im_pil.convert(self.get_pil_mode()) im_pil.save(buffer, format=self.default_output_format) body = base64.b64encode(buffer.getvalue()).decode('utf8') return 'data:image/{format};base64,{body}'.format( format=self.default_output_format.lower(), body=body)
def zipper(response): # accept_encoding = request.headers.get('Accept-Encoding', '') # if 'gzip' not in accept_encoding.lower(): # return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() with gzip.GzipFile(mode='wb', fileobj=gzip_buffer) as gzip_file: gzip_file.write(response.data.replace(' ', '')) response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response
def test_image_serialize_and_deserialize(): directory = os.path.dirname(os.path.realpath(__file__)) img = Image.open(os.path.join(directory, 'test_image.jpg')) serialized_pil = image().serialize(img) deserialized_pil = image().deserialize(serialized_pil) assert issubclass(type(deserialized_pil), Image.Image) serialize_np_img = image().serialize(np.asarray(img)) deserialize_np_img = image().deserialize(serialize_np_img) assert issubclass(type(deserialize_np_img), Image.Image) serialize_np_img = image(channels=1).serialize(np.asarray(img)) img = serialize_np_img[serialize_np_img.find(",") + 1:] img = base64.decodestring(img.encode('utf8')) buffer = IO(img) deserialized_image = Image.open(buffer) assert (deserialized_image.mode == 'L') deserialize_np_img = image(channels=4).deserialize(serialize_np_img) assert (deserialize_np_img.mode == 'RGBA') assert (np.array(deserialize_np_img).shape[2] == 4)
def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if (response.status_code < 200 or response.status_code >= 300 or response.direct_passthrough or len(response.data) < minimum_size or 'gzip' not in accept_encoding.lower() or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response
def __init__(self, sizepage=A4, list_xml=None, recibo=True, orientation='portrait', logo=None): self.width = 210 # 21 x 29,7cm self.height = 297 self.nLeft = 10 self.nRight = 10 self.nTop = 7 self.nBottom = 15 self.nlin = self.nTop self.logo = logo self.oFrete = { '0': '0 - Emitente', '1': '1 - Dest/Remet', '2': '2 - Terceiros', '9': '9 - Sem Frete' } self.oPDF_IO = IO() if orientation == 'landscape': raise NameError('Rotina não implementada') else: size = sizepage self.canvas = canvas.Canvas(self.oPDF_IO, pagesize=size) self.canvas.setTitle('DANFE') self.canvas.setStrokeColor(black) for oXML in list_xml: oXML_cobr = oXML.find( ".//{http://www.portalfiscal.inf.br/nfe}cobr") self.NrPages = 1 self.Page = 1 # Calculando total linhas usadas para descrições dos itens # Com bloco fatura, apenas 29 linhas para itens na primeira folha nNr_Lin_Pg_1 = 34 if oXML_cobr is None else 29 # [ rec_ini , rec_fim , lines , limit_lines ] oPaginator = [[0, 0, 0, nNr_Lin_Pg_1]] el_det = oXML.findall(".//{http://www.portalfiscal.inf.br/nfe}det") if el_det is not None: list_desc = [] nPg = 0 for nId, item in enumerate(el_det): el_prod = item.find( ".//{http://www.portalfiscal.inf.br/nfe}prod") infAdProd = item.find( ".//{http://www.portalfiscal.inf.br/nfe}infAdProd") list_ = wrap(tagtext(oNode=el_prod, cTag='xProd'), 51) if infAdProd is not None: list_.extend(wrap(infAdProd.text, 51)) list_desc.append(list_) # Nr linhas necessárias p/ descrição item nLin_Itens = len(list_) if (oPaginator[nPg][2] + nLin_Itens) >= oPaginator[nPg][3]: oPaginator.append([0, 0, 0, 77]) nPg += 1 oPaginator[nPg][0] = nId oPaginator[nPg][1] = nId + 1 oPaginator[nPg][2] = nLin_Itens else: # adiciona-se 1 pelo funcionamento de xrange oPaginator[nPg][1] = nId + 1 oPaginator[nPg][2] += nLin_Itens self.NrPages = len(oPaginator) # Calculando nr. páginas if recibo: self.recibo_entrega(oXML=oXML) self.ide_emit(oXML=oXML) self.destinatario(oXML=oXML) if oXML_cobr is not None: self.faturas(oXML=oXML_cobr) self.impostos(oXML=oXML) self.transportes(oXML=oXML) self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPaginator[0], list_desc=list_desc) self.adicionais(oXML=oXML) # Gera o restante das páginas do XML for oPag in oPaginator[1:]: self.newpage() self.ide_emit(oXML=oXML) self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPag, list_desc=list_desc, nHeight=77) self.newpage() self.canvas.save()
def deserialize(self, value): image = value[value.find(",") + 1:] image = base64.decodestring(image.encode('utf8')) buffer = IO(image) return Image.open(buffer)
def __enter__(self): self._stdout = sys.stdout sys.stdout = self._stringio = IO() return self
def __init__(self, sizepage=A4, list_xml=None, recibo=True, orientation='portrait', logo=None, cce_xml=None, timezone=None): path = os.path.join(os.path.dirname(__file__), 'fonts') pdfmetrics.registerFont( TTFont('NimbusSanL-Regu', os.path.join(path, 'NimbusSanL Regular.ttf'))) pdfmetrics.registerFont( TTFont('NimbusSanL-Bold', os.path.join(path, 'NimbusSanL Bold.ttf'))) self.width = 210 # 21 x 29,7cm self.height = 297 self.nLeft = 10 self.nRight = 10 self.nTop = 7 self.nBottom = 8 self.nlin = self.nTop self.logo = logo self.oFrete = { '0': '0 - Contratação por conta do Remetente (CIF)', '1': '1 - Contratação por conta do Destinatário (FOB)', '2': '2 - Contratação por conta de Terceiros', '3': '3 - Transporte Próprio por conta do Remetente', '4': '4 - Transporte Próprio por conta do Destinatário', '9': '9 - Sem Ocorrência de Transporte' } self.oPDF_IO = IO() if orientation == 'landscape': raise NameError('Rotina não implementada') else: size = sizepage self.canvas = canvas.Canvas(self.oPDF_IO, pagesize=size) self.canvas.setTitle('DANFE') self.canvas.setStrokeColor(black) for oXML in list_xml: oXML_cobr = oXML.find( ".//{http://www.portalfiscal.inf.br/nfe}cobr") self.NrPages = 1 self.Page = 1 # Calculando total linhas usadas para descrições dos itens # Com bloco fatura, apenas 25 linhas para itens na primeira folha nNr_Lin_Pg_1 = 30 if oXML_cobr is None else 26 # [ rec_ini , rec_fim , lines , limit_lines ] oPaginator = [[0, 0, 0, nNr_Lin_Pg_1]] el_det = oXML.findall(".//{http://www.portalfiscal.inf.br/nfe}det") if el_det is not None: list_desc = [] list_cod_prod = [] nPg = 0 for nId, item in enumerate(el_det): el_prod = item.find( ".//{http://www.portalfiscal.inf.br/nfe}prod") infAdProd = item.find( ".//{http://www.portalfiscal.inf.br/nfe}infAdProd") list_ = wrap(tagtext(oNode=el_prod, cTag='xProd'), 50) if infAdProd is not None: list_.extend(wrap(infAdProd.text, 50)) list_desc.append(list_) list_cProd = wrap(tagtext(oNode=el_prod, cTag='cProd'), 14) list_cod_prod.append(list_cProd) # Nr linhas necessárias p/ descrição item nLin_Itens = len(list_) if (oPaginator[nPg][2] + nLin_Itens) >= oPaginator[nPg][3]: oPaginator.append([0, 0, 0, 77]) nPg += 1 oPaginator[nPg][0] = nId oPaginator[nPg][1] = nId + 1 oPaginator[nPg][2] = nLin_Itens else: # adiciona-se 1 pelo funcionamento de xrange oPaginator[nPg][1] = nId + 1 oPaginator[nPg][2] += nLin_Itens self.NrPages = len(oPaginator) # Calculando nr. páginas if recibo: self.recibo_entrega(oXML=oXML, timezone=timezone) self.ide_emit(oXML=oXML, timezone=timezone) self.destinatario(oXML=oXML, timezone=timezone) if oXML_cobr is not None: self.faturas(oXML=oXML_cobr, timezone=timezone) self.impostos(oXML=oXML) self.transportes(oXML=oXML) self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPaginator[0], list_desc=list_desc, list_cod_prod=list_cod_prod) self.adicionais(oXML=oXML) # Gera o restante das páginas do XML for oPag in oPaginator[1:]: self.newpage() self.ide_emit(oXML=oXML, timezone=timezone) self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPag, list_desc=list_desc, nHeight=77, list_cod_prod=list_cod_prod) self.newpage() if cce_xml: for xml in cce_xml: self._generate_cce(cce_xml=xml, oXML=oXML, timezone=timezone) self.newpage() self.canvas.save()
def open(self, request): resp = self._session.get(request.url) resp.raise_for_status() return IO(resp.content)
def gzip_compress(data): compressed_data = IO() g = gzip.GzipFile(fileobj=compressed_data, mode='w') g.write(data) g.close() return compressed_data.getvalue()
def gzip_decompress(data): compressed_data = IO(data) return gzip.GzipFile(fileobj=compressed_data, mode='r').read()
def gzip_context(): gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) yield gzip_file, gzip_buffer gzip_file.close()