def decompress(self, body): buf = IO() for chunk in body: buf.write(chunk) buf.seek(0) gzfile = GzipFile(mode='rb', fileobj=buf) data = gzfile.read() gzfile.close() del buf del gzfile return data
def __call__(self, request, status, headers, body): """Compress, if able, the response. This has the side effect that if your application does not declare a content-length, this filter will. """ # TODO: Remove some of this debug logging; it'll slow things down and isn't really needed. if request.get('wsgi.compression', True) is False: log.debug("Bypassing compression at application's request.") return status, headers, body if request.get('wsgi.async') and hasattr(body, '__call__'): log.debug("Can not compress async responses, returning original response.") return status, headers, body if b'gzip' not in request.get('HTTP_ACCEPT_ENCODING', b''): log.debug("Browser support for GZip encoding not found, returning original response.") return status, headers, body def find(header): """Determine the value and index into headers for the given header, or None if not found.""" for i, (name, value) in enumerate(headers): if name.lower() == header: return value, i return None, None if find(b'content-encoding')[0]: log.debug("Content encoding already defined, returning original response.") return status, headers, body ctype, ctypeidx = find(b'content-type') if not ctype or not ctype.startswith((b'text/', b'application/')) or b'zip' in ctype: log.debug("Encountered uncompressable Content-Type (%s), returning original response.", ctype) return status, headers, body clength, clengthidx = find(b'content-length') headers.append((b"Content-Encoding", b'gzip')) # We have to read the entire body into a buffer before we can compress it. # This is because we need to determine the final Content-Length. # TODO: If the Content-Length is > 4MiB, we should use a tmpfile on-disk instead! buf = IO() compressed = GzipFile(mode='wb', compresslevel=self.level, fileobj=buf) for chunk in body: compressed.write(chunk) compressed.close() del compressed length = buf.tell() buf.seek(0) if clength: clength = int(clength) log.debug("Content-Length: %d - Compressed: %d - Savings: %d (%d%%)", clength, length, clength - length, length * 100 // clength) if length > int(clength): log.warn("Compression increased size of response!") length = (b'Content-Length', unicode(length).encode('ascii')) if clength: headers[clengthidx] = length else: headers.append(length) return status, headers, iter(partial(buf.read, 4096), b'')
def render(self, encoding='ascii'): indentation = 0 text = False stack = [] buf = IO() for k, t in self: if k == 'enter': indent = getattr(t, 'indent', True) stack.append(t) if t.strip: continue if text and indent: buf.write('\n') if indent: buf.write(' ' * indentation) for element in t.enter(): buf.write(element.encode(encoding)) if indent: buf.write('\n') indentation += 1 text = False continue if k == 'exit': indent = getattr(t, 'indent', True) stack.pop() if t.strip: continue if indent: indentation -= 1 if not t.simple: if text and indent: buf.write('\n') if indent: buf.write(' ' * indentation) for element in t.exit(): buf.write(element.encode(encoding)) if not t.simple or t.children: buf.write('\n') text = False continue if k == 'text': indent = getattr(stack[-1], 'indent', True) if not text and indent: buf.write(' ' * indentation) t = t.encode(encoding) buf.write(t.replace('\n', '\n' + ' ' * indentation) if indent else t) text = True if k == 'flush': yield buf.getvalue() del buf buf = IO() yield buf.getvalue()