def do_GET(self): path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "") params = {} content = None if query: params.update(_urllib.parse.parse_qs(query)) for key in params: if params[key]: params[key] = params[key][-1] self.url, self.params = path, params if path == '/': path = "index.html" path = path.strip('/') path = path.replace('/', os.path.sep) path = os.path.abspath(os.path.join(HTML_DIR, path)).strip() if not os.path.isfile(path) and os.path.isfile("%s.html" % path): path = "%s.html" % path if ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and not path.endswith(DISABLED_CONTENT_EXTENSIONS): content = open(path, "rb").read() self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream") else: content = ("<!DOCTYPE html><html lang=\"en\"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>" % self.path.split('?')[0]).encode(UNICODE_ENCODING) self.send_response(_http_client.NOT_FOUND) self.send_header(HTTP_HEADER.CONNECTION, "close") if content is not None: for match in re.finditer(b"<\!(\w+)\!>", content): name = match.group(1) _ = getattr(self, "_%s" % name.lower(), None) if _: content = self._format(content, **{name: _()}) if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING): self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip") _ = six.BytesIO() compress = gzip.GzipFile("", "w+b", 9, _) compress._stream = _ compress.write(content) compress.flush() compress.close() content = compress._stream.getvalue() self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content))) self.end_headers() if content: self.wfile.write(content) self.wfile.flush()
def do_GET(self): path, query = self.path.split( '?', 1) if '?' in self.path else (self.path, "") params = {} content = None skip = False if hasattr(self, "data"): params.update(_urllib.parse.parse_qs(self.data)) if query: params.update(_urllib.parse.parse_qs(query)) for key in params: if params[key]: params[key] = params[key][-1] if path == '/': path = "index.html" path = path.strip('/') extension = os.path.splitext(path)[-1].lower() if hasattr(self, "_%s" % path): content = getattr(self, "_%s" % path)(params) else: path = path.replace('/', os.path.sep) path = os.path.abspath(os.path.join(HTML_DIR, path)).strip() if not os.path.isfile(path) and os.path.isfile( "%s.html" % path): path = "%s.html" % path if any((config.IP_ALIASES, )) and self.path.split('?')[0] == "/js/main.js": content = open(path, 'r').read() content = re.sub( r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content) if ".." not in os.path.relpath( path, HTML_DIR) and os.path.isfile(path) and ( extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS): mtime = time.gmtime(os.path.getmtime(path)) if_modified_since = self.headers.get( HTTP_HEADER.IF_MODIFIED_SINCE) if if_modified_since and extension not in (".htm", ".html"): if_modified_since = [ _ for _ in if_modified_since.split(';') if _.upper().endswith("GMT") ][0] if time.mktime(mtime) <= time.mktime( time.strptime(if_modified_since, HTTP_TIME_FORMAT)): self.send_response(_http_client.NOT_MODIFIED) self.send_header(HTTP_HEADER.CONNECTION, "close") skip = True if not skip: content = content or open(path, "rb").read() last_modified = time.strftime(HTTP_TIME_FORMAT, mtime) self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header( HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream") self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified) # For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/ self.send_header( HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src * blob:; script-src 'self' 'unsafe-eval' https://stat.ripe.net; frame-src *; object-src 'none'; block-all-mixed-content;" ) if os.path.basename(path) == "index.html": content = re.sub( b'\s*<script[^>]+src="js/demo.js"></script>', b'', content) if extension not in (".htm", ".html"): self.send_header( HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT" ) # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/ self.send_header( HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate" ) # Reference: http://stackoverflow.com/a/5084555 else: self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache") else: self.send_response(_http_client.NOT_FOUND) self.send_header(HTTP_HEADER.CONNECTION, "close") content = b'<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split( '?')[0] if content is not None: if isinstance(content, six.text_type): content = content.encode(UNICODE_ENCODING) for match in re.finditer(b"<\\!(\\w+)\\!>", content): name = match.group(1).decode(UNICODE_ENCODING) _ = getattr(self, "_%s" % name.lower(), None) if _: content = self._format(content, **{name: _()}) if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING, ""): self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip") _ = six.BytesIO() compress = gzip.GzipFile("", "w+b", 9, _) compress._stream = _ compress.write(content) compress.flush() compress.close() content = compress._stream.getvalue() self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content))) self.end_headers() try: if content: self.wfile.write(content) self.wfile.flush() except: pass