def graphs( self ): """ Create dot format graphs from the current :class:`Animation` object. Each graph in the list of graphs that is returned is the state of the graph during one frame of the animation Returns ------- graphs: list of str """ # Get a list of all the steps. Each step contains a list of all # current nodes, edges as well as highlighted edges and nodes. Those # are animation steps # Get also all nodes and vertices steps, E, V = self.edges_nodes # Go through all the steps graphs = [] for n, s in enumerate( steps ): # New graph for the current animation frame graph = [ 'digraph G {' ] # Add all the nodes in their dot format for v in V: graph.append( '"{}" {};'.format( quote( str( v ) ), s.node_format( v ) ) ) # Add all the edges, described in dot format for e in E: graph.append( '"{}" -> "{}" {};'.format( quote( str( e[ 0 ] ) ), quote( str( e[ 1 ] ) ), s.edge_format( e ) ) ) graph.append( '}' ) # Add this graph to the list of all graphs graphs.append( '\n'.join( graph ) ) return graphs
def set_cookie(self, name, value, max_age=None, expires=None, path='/', domain=None, secure=False, http_only=True): """ Set a cookie. Args: name: the cookie name. value: the cookie value. max_age: optional, seconds of cookie's max age. expires: optional, unix timestamp, datetime or date object that indicate an absolute time of the expiration time of cookie. Note that if expires specified, the max_age will be ignored. path: the cookie path, default to '/'. domain: the cookie domain, default to None. secure: if the cookie secure, default to False. http_only: if the cookie is for http only, default to True for better safty (client-side script cannot access cookies with HttpOnly flag). >>> r = Response() >>> r.set_cookie('company', 'Abc, Inc.', max_age=3600) >>> r._cookies {'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'} >>> r.set_cookie('company', r'Example="Limited"', expires=1342274794.123, path='/sub/') >>> r._cookies {'company': 'company=Example%3D%22Limited%22; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/sub/; HttpOnly'} >>> dt = datetime.datetime(2012, 7, 14, 22, 6, 34, tzinfo=UTC('+8:00')) >>> r.set_cookie('company', 'Expires', expires=dt) >>> r._cookies {'company': 'company=Expires; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/; HttpOnly'} """ if not hasattr(self, '_cookies'): self._cookies = {} L = ['%s=%s' % (eutils.quote(name), eutils.quote(value))] if expires is not None: if isinstance(expires, (float, int, long)): L.append('Expires=%s' % datetime.datetime.fromtimestamp( expires, UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT')) if isinstance(expires, (datetime.date, datetime.datetime)): L.append('Expires=%s' % expires.astimezone(UTC_0).strftime( '%a, %d-%b-%Y %H:%M:%S GMT')) elif isinstance(max_age, (int, long)): L.append('Max-Age=%d' % max_age) L.append('Path=%s' % path) if domain: L.append('Domain=%s' % domain) if secure: L.append('Secure') if http_only: L.append('HttpOnly') self._cookies[name] = '; '.join(L)
def graphs( self ): steps = self.steps() V, E = set(), set() for step in steps: V |= step.V E |= step.E graphs = [] for n, s in enumerate( steps ): graph = [ 'digraph G {' ] for v in V: graph.append( '"{}" {};'.format( quote( str( v ) ), s.node_format( v ) ) ) for e in E: graph.append( '"{}" -> "{}" {};'.format( quote( str( e[ 0 ] ) ), quote( str( e[ 1 ] ) ), s.edge_format( e ) ) ) graph.append( '}' ) graphs.append( '\n'.join( graph ) ) return graphs
def getBody(msg): body = [] attchs = [] for part in msg.walk(): #fix that , now only explicitly written content type text/plain is passed to ES #check for base64 ? to be sure its attch ? --> if none then plain ? missing = object() value = part.get('content-type', missing) if value is not missing: if part.get_content_type() == 'text/plain': charset = part.get_param('charset') if charset: charset = quote(charset) else: charset = quote('utf-8') #create unicode representation of DATA try: body.append(part.get_payload().decode(charset, 'ignore')) except LookupError: body = part.get_payload().decode('utf-8', 'ignore') #find attachments names if (part.get_content_type() != 'text/plain' and part.get_content_type() != 'text/html' and part.get_content_maintype() != 'multipart' and part.get_content_maintype() != 'message' and part.get_content_type() != 'message/rfc822'): fileName = part.get_filename(None) if fileName == None: fileName = '' else: if type(fileName) is unicode: fileName = fileName.encode('utf8', 'ignore') else: attchs.append(fileName) body = ''.join(body) #print body.encode('iso-8859-2', 'ignore' ) return (body, attchs)
def _formatparam(param, value=None, quote=True): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. If value is a three tuple (charset, language, value), it will be encoded according to RFC2231 rules. If it contains non-ascii characters it will likewise be encoded according to RFC2231 rules, using the utf-8 charset and a null language. """ if value is not None and len(value) > 0: if isinstance(value, tuple): param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) return '%s=%s' % (param, value) else: try: value.encode('ascii') except UnicodeEncodeError: param += '*' value = utils.encode_rfc2231(value, 'utf-8', '') return '%s=%s' % (param, value) if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def sign(self, method, url, headers, expires=None): parsed = urlparse(url) path = parsed.path sig_headers = sorted([[k.lower(), v] for k, v in headers.items() if k.lower().startswith('x-amz-')], key=lambda i: i[0]) sig_headers = "\n".join(( ":".join((key, value)) for key, value in sig_headers )) parts = [method, headers.get('Content-MD5', ''), headers.get('Content-Type', ''), # date provided in x-amz-date if not from Expires str(expires) if expires is not None else ""] if sig_headers: parts.append(sig_headers) parts.append(quote(path)) string_to_sign = "\n".join(parts) return base64.b64encode(hmac.new( self.secret.encode(), string_to_sign.encode(), digestmod=hashlib.sha1, ).digest()).decode()
def _formatparam(param, value=None, quote=True): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. If value is a three tuple (charset, language, value), it will be encoded according to RFC2231 rules. If it contains non-ascii characters it will likewise be encoded according to RFC2231 rules, using the utf-8 charset and a null language. """ if value is not None and len(value) > 0: # A tuple is used for RFC 2231 encoded parameter values where items # are (charset, language, value). charset is a string, not a Charset # instance. RFC 2231 encoded values are never quoted, per RFC. if isinstance(value, tuple): # Encode as per RFC 2231 param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) return '%s=%s' % (param, value) else: try: value.encode('ascii') except UnicodeEncodeError: param += '*' value = utils.encode_rfc2231(value, 'utf-8', '') return '%s=%s' % (param, value) # BAW: Please check this. I think that if quote is set it should # force quoting even if not necessary. if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def graphs(self, use_cases, actors, uc_diagram): steps = self.steps() C, V, E = dict(), set(), set() for step in steps: C.update(step.C) V |= step.V E |= step.E graphs = [] actors_first_half, actors_second_half = actors[:len(actors) / 2], actors[len(actors) / 2:] for n, s in enumerate(steps): graph = [ 'digraph G {', 'fontpath="%s"' % font_path, 'fontname="%s"' % font_name, 'bgcolor=%s' % BACKGROUND_COLOR, 'pad=".25"', 'ranksep=".75"', 'nodesep=".75"', 'rankdir="LR"' if uc_diagram else '', 'node[style=filled,fillcolor=%s,width=2,height=2,fontpath="%s",fontname="%s"]' % (NODE_INSIDE_COLOR, font_path, font_name), 'edge[dir=back, arrowtail=vee, fontpath="%s", fontname="%s"]' % (font_path, font_name), '{rank=min;%s}' % "".join(list(map(lambda x: '"%s";' % x, actors_first_half))), '{rank=same;%s}' % "".join(list(map(lambda x: '"%s";' % x, use_cases))), '{rank=max;%s}' % "".join(list(map(lambda x: '"%s";' % x, actors_second_half))) ] counter = 0 # for each cluster handle contents for key in s.C: counter = self.handle_cluster(s.C, key, counter, graph, s) for v in V: graph.append(u'"{}" {};'.format( quote(str(v)).encode('utf-8'), s.node_format(v, hide=v not in s.V))) for e in E: graph.append('"{}" -> "{}" {};'.format( quote(str(e[0])), quote(str(e[1])), s.edge_format(e, hide=e not in s.E))) graph.append('}') graphs.append('\n'.join(graph)) return graphs
def generate_header(value, params): """ Given unicode *value* and parameters *params* return a string suitable for use as a value of a header. Usage examples: >>> generate_header('value') 'value' >>> generate_header('value', {'param': 'val'}) 'value; param="val"' :param value: 'Main' value of the header :param params: A dict or mapping of unicode strings. """ parts = [quote(value)] for key in params: parts.append('%s="%s"' % (key, quote(params[key]))) return '; '.join(parts)
def rawEmail(msg, envelope, metaData): charset = msg.get_param('charset') if charset: charset = quote(charset) else: charset = quote('utf-8') try: body = msg.get_payload().decode(charset, 'ignore') except LookupError: body = msg.get_payload().decode('utf-8', 'ignore') data = elasticEmail(envelope, metaData, [], body) return data
def encode_header_value(value: Union[str, datetime, Number], params: Dict[str, Union[str, datetime, Number]], *, encoding: str = 'ascii', always_quote: bool = False) -> bytes: """ Encode a structured header value for transmission over the network. If a parameter value cannot be encoded to the given encoding, the :rfc:`5987` method is used to encode the value in an alternate field using :rfc:`2231` encoding where the field name ends with ``*``. The original field will have an urlencoded version of the value. Any datetimes in the ``value`` argument or any of the parameter values will be formatted as defined by :rfc:`822`. Any numeric values will be converted to strings. If a parameter value is ``False`` or ``None``, the parameter is omitted entirely from the output. If the value is ``True``, only the key will be included in the output (without any ``=``). All other value types are disallowed. :param value: the main value of the header :param params: a dictionary of parameter names and values :param encoding: the character encoding to use (either ``ascii`` or ``iso-8859-1``) :param always_quote: always enclose the parameter values in quotes, even if it's unnecessary :return: the encoded bytestring """ def transform_value(val): if isinstance(val, str): return val elif isinstance(val, datetime): return format_datetime(val.astimezone(timezone.utc), usegmt=True) else: return str(val) assert check_argument_types() buffer = transform_value(value).encode(encoding) for key, value in params.items(): key = key.encode(encoding) buffer += b'; ' + key value = transform_value(value) quoted_value = quote(value) add_quotes = always_quote or quoted_value != value try: quoted_value = quoted_value.encode(encoding) except UnicodeEncodeError: ascii_value = urllib_quote(quoted_value).encode('ascii') rfc2231_value = encode_rfc2231(quoted_value, 'utf-8').encode('utf-8') if add_quotes: ascii_value = b'"' + ascii_value + b'"' rfc2231_value = b'"' + rfc2231_value + b'"' buffer += b'=' + ascii_value + b'; ' + key + b'*=' + rfc2231_value else: if add_quotes: quoted_value = b'"' + quoted_value + b'"' buffer += b'=' + quoted_value return buffer
def content_disposition(disposition, filename): """ Generates a content disposition hedaer given a *disposition* and a *filename*. The filename needs to be the base name of the path, i.e. instead of ``~/file.txt`` you need to pass in ``file.txt``. The filename is automatically quoted. """ yield 'Content-Disposition' yield '%s; filename="%s"' % (disposition, quote(filename))
def node_format( self, v ): fmt = [] if v in self.lV: fmt.append( 'label="{}"'.format( quote( str( self.lV[ v ] ) ) ) ) if v in self.hV: fmt.append( 'color={}'.format( self.hV[ v ] ) ) elif v not in self.V: fmt.append( 'style=invis' ) if fmt: return '[{}]'.format( ', '.join( fmt ) ) return ''
def _formatparam(param, value=None, quote=True): if value is not None and len(value) > 0: if isinstance(value, tuple): param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def _formatparam(param, value = None, quote = True): if value is not None and len(value) > 0: if isinstance(value, tuple): param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def edge_format(self, e): fmt = [] if e in self.lE: fmt.append('label="{}"'.format(quote(str(self.lE[e])))) if e in self.hE: fmt.append('color={}'.format(self.hE[e])) elif e not in self.E: fmt.append('style=invis') if fmt: return '[{}]'.format(', '.join(fmt)) return ''
def graphs(self, layout='neato'): steps = self.steps() V, E = set(), set() for step in steps: V |= step.V E |= step.E graphs = [] for n, s in enumerate(steps): edge_symbol = "--" graph = ['strict graph G {'] if isinstance(graph, Graph): edge_symbol = "->" graph = ['digraph G {'] graph.append('layout="{}"'.format(layout)) graph.append('ordering=out;') # for e in E: print(type(e)) for v in V: graph.append('"{}" {};'.format(quote(v.getNodeName()), s.node_format(v))) for e in E: graph.append('"{}" {} "{}" {};'.format(quote(e.frm.getNodeName()), edge_symbol, quote(e.to.getNodeName()), s.edge_format(e))) graph.append('}') print('\n'.join(graph)) graphs.append('\n'.join(graph)) return graphs
def graphs(self): steps = self.steps() V, E = set(), set() for step in steps: V |= step.V E |= step.E graphs = [] for n, s in enumerate(steps): graph = ['digraph G {'] graph.append('rankdir=' + self._rankdir) for group in self._rank_same: graph.append('{{rank = same; {};}};'.format('; '.join(group))) for v in V: graph.append('"{}" {};'.format(quote(str(v)), s.node_format(v))) for e in E: graph.append('"{}" -> "{}" {};'.format(quote(str(e[0])), quote(str(e[1])), s.edge_format(e))) graph.append('}') graphs.append('\n'.join(graph)) return graphs
def _formatparam(param, value=None, quote=True): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. """ if value is not None and len(value) > 0: if isinstance(value, tuple): param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def _formatparam(param, value=None, quote=True): if value is not None and len(value) > 0: if isinstance(value, tuple): param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) return '%s=%s' % (param, value) try: value.encode('ascii') except UnicodeEncodeError: param += '*' value = utils.encode_rfc2231(value, 'utf-8', '') return '%s=%s' % (param, value) if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) return '%s=%s' % (param, value) else: return param
def _formatparam(param, value=None, quote=True): """This is _formatparam from Python 2.7""" if value is not None and len(value) > 0: # A tuple is used for RFC 2231 encoded parameter values where items # are (charset, language, value). charset is a string, not a Charset # instance. if isinstance(value, tuple): # Encode as per RFC 2231 param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) # BAW: Please check this. I think that if quote is set it should # force quoting even if not necessary. if quote or message.tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def _formatparam(param, value = None, quote = True): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. If value is a three tuple (charset, language, value), it will be encoded according to RFC2231 rules. """ if value is not None and len(value) > 0: if isinstance(value, tuple): param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param return
def handle_cluster(self, C, key, counter, graph, s): graph.append('subgraph cluster_%i {' % counter) for node in C[key]: if isinstance(node, dict): for subkey in node: counter = self.handle_cluster(node, subkey, counter, graph, s) else: graph.append('"{}" {};'.format(quote(str(node)), s.node_format(node))) if key in s.cV and not key in s.hC: graph.append('color=%s' % CLUSTER_COLOR) elif key in s.hC: graph.append('color=%s' % CLUSTER_HIGHLIGHT_COLOR) else: graph.append('color=%s' % CLUSTER_DEF_COLOR) graph.append('}') counter += 1 return counter
def set_cookie(self, name: str, value, *, domain: str = None, path: str = None, max_age: Union[int, timedelta] = None, expires: datetime = None, secure: bool = False, httponly: bool = False) -> None: """ Set a cookie in the response headers. :param name: name of the cookie :param value: value for the cookie (converted to string if it's not one already) :param domain: the domain the cookie applies to :param path: the path the cookie applies to :param max_age: maximum age of this cookie (in seconds or as a timedelta) :param expires: expiration date of the cookie (must be timezone aware) :param secure: ``True`` if the cookie should only be sent across secure (HTTPS) connections :param httponly: ``True`` if the cookie should not be accessible from client-side scripts """ assert check_argument_types() main_value = '%s="%s"' % (name, quote(value)) self.add_header('set-cookie', main_value, domain=domain, path=path, maxAge=max_age, expires=expires, secure=secure, httponly=httponly)
def _formatparam(param, value=None, quote=True): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. """ if value is not None and len(value) > 0: # A tuple is used for RFC 2231 encoded parameter values where items # are (charset, language, value). charset is a string, not a Charset # instance. if isinstance(value, tuple): # Encode as per RFC 2231 param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) # BAW: Please check this. I think that if quote is set it should # force quoting even if not necessary. if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
def _generate_multipart_form(self, parts): """Generate a multipart/form-data message. This is very loosely based on the email module in the Python standard library. However, that module doesn't really support directly embedding binary data in a form: various versions of Python have mangled line separators in different ways, and none of them get it quite right. Since we only need a tiny subset of MIME here, it's easier to implement it ourselves. :return: a tuple of two elements: the Content-Type of the message, and the entire encoded message as a byte string. """ # Generate the subparts first so that we can calculate a safe boundary. encoded_parts = [] for is_binary, name, value in parts: buf = io.BytesIO() if is_binary: ctype = 'application/octet-stream' # RFC 7578 says that the filename parameter isn't mandatory # in our case, but without it cgi.FieldStorage tries to # decode as text on Python 3. cdisp = 'form-data; name="%s"; filename="%s"' % ( quote(name), quote(name)) else: ctype = 'text/plain; charset="utf-8"' cdisp = 'form-data; name="%s"' % quote(name) self._write_headers(buf, [ ('MIME-Version', '1.0'), ('Content-Type', ctype), ('Content-Disposition', cdisp), ]) if is_binary: if not isinstance(value, bytes): raise TypeError('bytes payload expected: %s' % type(value)) buf.write(value) else: if not isinstance(value, str): raise TypeError('str payload expected: %s' % type(value)) lines = re.split(r'\r\n|\r|\n', value) for line in lines[:-1]: buf.write(line.encode('UTF-8')) buf.write(b'\r\n') buf.write(lines[-1].encode('UTF-8')) encoded_parts.append(buf.getvalue()) # Create a suitable boundary. boundary = self._make_boundary(b'\r\n'.join(encoded_parts)) # Now we can write the multipart headers, followed by all the parts. buf = io.BytesIO() ctype = 'multipart/form-data; boundary="%s"' % quote(boundary) self._write_headers(buf, [ ('MIME-Version', '1.0'), ('Content-Type', ctype), ]) for encoded_part in encoded_parts: self._write_boundary(buf, boundary) buf.write(encoded_part) buf.write(b'\r\n') self._write_boundary(buf, boundary, closing=True) return ctype, buf.getvalue()
def handler(env): currenttime = time() if rewriteonly: if not env.rewritefrom: for m in env.error(404): yield m env.exit() if not errorpage and checkreferer: try: referer = env.headerdict.get(b'referer') if referer is None: referer_host = None else: referer_host = urlsplit(referer).netloc if not ((refererallowlocal and referer_host == env.host) or referer_host in refererallows): for m in env.error(403, showerror = False): yield m env.exit() except: for m in env.error(403, showerror = False): yield m env.exit() localpath = env.path_match.expand(expand) realpath = env.getrealpath(relativeroot, localpath) filename = os.path.basename(realpath) if xsendfile or xlighttpdsendfile or xaccelredirect: # Apache send a local file env.startResponse(200) if contenttype: env.header('Content-Type', contenttype) else: mime = self.mimetypedatabase.guess_type(filename, mimestrict) if mime[1]: # There should not be a content-encoding here, maybe the file itself is compressed # set mime to application/octet-stream mime_type = 'application/octet-stream' elif not mime[0]: mime_type = 'application/octet-stream' else: mime_type = mime[0] env.header('Content-Type', mime_type, False) if not errorpage and contentdisposition: env.header('Content-Disposition', contentdisposition + '; filename=' + quote(filename)) if xsendfile: env.header('X-Sendfile', realpath) if xaccelredirect: env.header(b'X-Accel-Redirect', urljoin(xaccelredirect_root, self.dispatcher.expand(env.path_match, expand))) if xlighttpdsendfile: env.header(b'X-LIGHTTPD-send-file', realpath) env.exit() use_gzip = False if gzip: if realpath.endswith('.gz'): # GZIP files are preserved for gzip encoding for m in env.error(403, showerror = False): yield m env.exit() encodings = _parseacceptencodings(env) if b'gzip' in encodings or b'x-gzip' in encodings: use_gzip = True use_etag = etag and not errorpage # First time cache check if memorycache: # Cache data: (data, headers, cachedtime, etag) cv = self._cache.get((realpath, use_gzip)) if cv and cv[2] + max(0 if maxage is None else maxage, 3) > currenttime: # Cache is valid if use_etag: if _checketag(env, cv[3]): env.startResponse(304, cv[1]) env.exit() size = len(cv[0]) rng = None if not errorpage and allowrange: rng = _checkrange(env, cv[3], size) if rng is not None: env.startResponse(206, cv[1]) _generaterange(env, rng, size) env.output(MemoryStream(cv[0][rng[0]:rng[1]]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.startResponse(int(m.group()), cv[1]) else: # Show 200-OK is better than 500 env.startResponse(200, cv[1]) else: env.startResponse(200, cv[1]) env.output(MemoryStream(cv[0]), use_gzip) env.exit() # Test file if use_gzip: try: stat_info = os.stat(realpath + '.gz') if not stat.S_ISREG(stat_info.st_mode): raise ValueError('Not regular file') realpath += '.gz' except: try: stat_info = os.stat(realpath) if not stat.S_ISREG(stat_info.st_mode): raise ValueError('Not regular file') use_gzip = False except: for m in env.error(404, showerror = False): yield m env.exit() else: try: stat_info = os.stat(realpath) if not stat.S_ISREG(stat_info.st_mode): raise ValueError('Not regular file') use_gzip = False except: for m in env.error(404, showerror = False): yield m env.exit() newetag = _createetag(stat_info) # Second memory cache test if memorycache: # use_gzip may change cv = self._cache.get((realpath, use_gzip)) if cv and cv[3] == newetag: # Cache is valid if use_etag: if _checketag(env, cv[3]): env.startResponse(304, cv[1]) env.exit() self._cache[(realpath, use_gzip)] = (cv[0], cv[1], currenttime, newetag) size = len(cv[0]) rng = None if not errorpage and allowrange: rng = _checkrange(env, cv[3], size) if rng is not None: env.startResponse(206, cv[1]) _generaterange(env, rng, size) env.output(MemoryStream(cv[0][rng[0]:rng[1]]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.startResponse(int(m.group()), cv[1]) else: # Show 200-OK is better than 500 env.startResponse(200, cv[1]) else: env.startResponse(200, cv[1]) env.output(MemoryStream(cv[0]), use_gzip) env.exit() elif cv: # Cache is invalid, remove it to prevent another hit del self._cache[(realpath, use_gzip)] # No cache available, get local file # Create headers if contenttype: env.header('Content-Type', contenttype) else: mime = self.mimetypedatabase.guess_type(filename, mimestrict) if mime[1]: # There should not be a content-encoding here, maybe the file itself is compressed # set mime to application/octet-stream mime_type = 'application/octet-stream' elif not mime[0]: mime_type = 'application/octet-stream' else: mime_type = mime[0] env.header('Content-Type', mime_type, False) if use_etag: env.header(b'ETag', b'"' + newetag + b'"', False) if maxage is not None: env.header('Cache-Control', 'max-age=' + str(maxage), False) if use_gzip: env.header(b'Content-Encoding', b'gzip', False) if not errorpage and contentdisposition: env.header('Content-Disposition', contentdisposition + '; filename=' + quote(filename)) if allowrange: env.header(b'Accept-Ranges', b'bytes') if extraheaders: env.sent_headers.extend(extraheaders) if use_etag: if _checketag(env, newetag): env.startResponse(304, clearheaders = False) env.exit() if memorycache and stat_info.st_size <= memorycachelimit: # Cache cache = True if len(self._cache) >= self.memorycacheitemlimit: if not self._clearcache(currenttime): cache = False if cache: with open(realpath, 'rb') as fobj: data = fobj.read() self._cache[(realpath, use_gzip)] = (data, env.sent_headers[:], currenttime, newetag) size = len(data) rng = None if not errorpage and allowrange: rng = _checkrange(env, newetag, size) if rng is not None: env.startResponse(206, clearheaders = False) _generaterange(env, rng, size) env.output(MemoryStream(data[rng[0]:rng[1]]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.startResponse(int(m.group()), clearheaders = False) else: # Show 200-OK is better than 500 env.startResponse(200, clearheaders = False) else: env.startResponse(200, clearheaders = False) env.output(MemoryStream(data), use_gzip) env.exit() size = stat_info.st_size if not errorpage and allowrange: rng = _checkrange(env, newetag, size) if rng is not None: env.startResponse(206, clearheaders = False) _generaterange(env, rng, size) fobj = open(realpath, 'rb') try: fobj.seek(rng[0]) except: fobj.close() raise else: env.output(FileStream(fobj, isunicode=False, size=rng[1] - rng[0]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.startResponse(int(m.group()), clearheaders = False) else: # Show 200-OK is better than 500 env.startResponse(200, clearheaders = False) else: env.startResponse(200, clearheaders = False) env.output(FileStream(open(realpath, 'rb'), isunicode = False), use_gzip)
async def handler(env): currenttime = time() if rewriteonly: if not env.rewritefrom: await env.error(404) return if not errorpage and checkreferer: try: referer = env.headerdict.get(b'referer') if referer is None: referer_host = None else: referer_host = urlsplit(referer).netloc if not ((refererallowlocal and referer_host == env.host) or referer_host in refererallows): await env.error(403, showerror=False) return except Exception: await env.error(403, showerror=False) return localpath = env.path_match.expand(expand) realpath = env.getrealpath(relativeroot, localpath) filename = os.path.basename(realpath) if xsendfile or xlighttpdsendfile or xaccelredirect: # Apache send a local file env.start_response(200) if contenttype: env.header('Content-Type', contenttype) else: mime = self.mimetypedatabase.guess_type( filename, mimestrict) if mime[1]: # There should not be a content-encoding here, maybe the file itself is compressed # set mime to application/octet-stream mime_type = 'application/octet-stream' elif not mime[0]: mime_type = 'application/octet-stream' else: mime_type = mime[0] env.header('Content-Type', mime_type, False) if not errorpage and contentdisposition: env.header( 'Content-Disposition', contentdisposition + '; filename=' + quote(filename)) if xsendfile: env.header('X-Sendfile', realpath) if xaccelredirect: env.header( b'X-Accel-Redirect', urljoin(xaccelredirect_root, self.dispatcher.expand(env.path_match, expand))) if xlighttpdsendfile: env.header(b'X-LIGHTTPD-send-file', realpath) return use_gzip = False if gzip: if realpath.endswith('.gz'): # GZIP files are preserved for gzip encoding await env.error(403, showerror=False) encodings = _parseacceptencodings(env) if b'gzip' in encodings or b'x-gzip' in encodings: use_gzip = True use_etag = etag and not errorpage # First time cache check if memorycache: # Cache data: (data, headers, cachedtime, etag) cv = self._cache.get((realpath, use_gzip)) if cv and cv[2] + max(0 if maxage is None else maxage, 3) > currenttime: # Cache is valid if use_etag: if _checketag(env, cv[3]): env.start_response(304, cv[1]) return size = len(cv[0]) rng = None if not errorpage and allowrange: rng = _checkrange(env, cv[3], size) if rng is not None: env.start_response(206, cv[1]) _generaterange(env, rng, size) env.output(MemoryStream(cv[0][rng[0]:rng[1]]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.start_response(int(m.group()), cv[1]) else: # Show 200-OK is better than 500 env.start_response(200, cv[1]) else: env.start_response(200, cv[1]) env.output(MemoryStream(cv[0]), use_gzip) return # Test file if use_gzip: try: stat_info = os.stat(realpath + '.gz') if not stat.S_ISREG(stat_info.st_mode): raise ValueError('Not regular file') realpath += '.gz' except Exception: try: stat_info = os.stat(realpath) if not stat.S_ISREG(stat_info.st_mode): raise ValueError('Not regular file') use_gzip = False except Exception: await env.error(404, showerror=False) return else: try: stat_info = os.stat(realpath) if not stat.S_ISREG(stat_info.st_mode): raise ValueError('Not regular file') use_gzip = False except Exception: await env.error(404, showerror=False) return newetag = _createetag(stat_info) # Second memory cache test if memorycache: # use_gzip may change cv = self._cache.get((realpath, use_gzip)) if cv and cv[3] == newetag: # Cache is valid if use_etag: if _checketag(env, cv[3]): env.start_response(304, cv[1]) return self._cache[(realpath, use_gzip)] = (cv[0], cv[1], currenttime, newetag) size = len(cv[0]) rng = None if not errorpage and allowrange: rng = _checkrange(env, cv[3], size) if rng is not None: env.start_response(206, cv[1]) _generaterange(env, rng, size) env.output(MemoryStream(cv[0][rng[0]:rng[1]]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.start_response(int(m.group()), cv[1]) else: # Show 200-OK is better than 500 env.start_response(200, cv[1]) else: env.start_response(200, cv[1]) env.output(MemoryStream(cv[0]), use_gzip) return elif cv: # Cache is invalid, remove it to prevent another hit del self._cache[(realpath, use_gzip)] # No cache available, get local file # Create headers if contenttype: env.header('Content-Type', contenttype) else: mime = self.mimetypedatabase.guess_type(filename, mimestrict) if mime[1]: # There should not be a content-encoding here, maybe the file itself is compressed # set mime to application/octet-stream mime_type = 'application/octet-stream' elif not mime[0]: mime_type = 'application/octet-stream' else: mime_type = mime[0] env.header('Content-Type', mime_type, False) if use_etag: env.header(b'ETag', b'"' + newetag + b'"', False) if maxage is not None: env.header('Cache-Control', 'max-age=' + str(maxage), False) if use_gzip: env.header(b'Content-Encoding', b'gzip', False) if not errorpage and contentdisposition: env.header( 'Content-Disposition', contentdisposition + '; filename=' + quote(filename)) if allowrange: env.header(b'Accept-Ranges', b'bytes') if extraheaders: env.sent_headers.extend(extraheaders) if use_etag: if _checketag(env, newetag): env.start_response(304, clearheaders=False) return if memorycache and stat_info.st_size <= memorycachelimit: # Cache cache = True if len(self._cache) >= self.memorycacheitemlimit: if not self._clearcache(currenttime): cache = False if cache: with open(realpath, 'rb') as fobj: data = fobj.read() self._cache[(realpath, use_gzip)] = (data, env.sent_headers[:], currenttime, newetag) size = len(data) rng = None if not errorpage and allowrange: rng = _checkrange(env, newetag, size) if rng is not None: env.start_response(206, clearheaders=False) _generaterange(env, rng, size) env.output(MemoryStream(data[rng[0]:rng[1]]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.start_response(int(m.group()), clearheaders=False) else: # Show 200-OK is better than 500 env.start_response(200, clearheaders=False) else: env.start_response(200, clearheaders=False) env.output(MemoryStream(data), use_gzip) return size = stat_info.st_size if not errorpage and allowrange: rng = _checkrange(env, newetag, size) if rng is not None: env.start_response(206, clearheaders=False) _generaterange(env, rng, size) fobj = open(realpath, 'rb') try: fobj.seek(rng[0]) except Exception: fobj.close() raise else: env.output( FileStream(fobj, isunicode=False, size=rng[1] - rng[0]), use_gzip) else: if errorpage: m = statusname.match(filename) if m: env.start_response(int(m.group()), clearheaders=False) else: # Show 200-OK is better than 500 env.start_response(200, clearheaders=False) else: env.start_response(200, clearheaders=False) env.output(FileStream(open(realpath, 'rb'), isunicode=False), use_gzip)
def fix_name(name): name = '"%s"' % quote(name) return name