Exemple #1
0
def native_str2(input):
    if isinstance(input, int):
        return native_str(str(input))
    elif isinstance(input, float):
        return native_str(str(input))
    elif isinstance(input, str):
        return native_str(input)
    else:
        return input
Exemple #2
0
    def set_cookie(self, name, value, expires_days=30, version=None,
                   domain=None, expires=None, path="/", **kwargs):
        """ Sets the given cookie name/value with the given options. Set value
        to None to clear. The cookie value is secured using
        `flexx.config.cookie_secret`; don't forget to set that config
        value in your server. Additional keyword arguments are set on
        the Cookie.Morsel directly.
        """
        # This code is taken (in modified form) from the Tornado project
        # Copyright 2009 Facebook
        # Licensed under the Apache License, Version 2.0

        # Assume tornado is available ...
        from tornado.escape import native_str
        from tornado.httputil import format_timestamp
        from tornado.web import create_signed_value

        # Clear cookie?
        if value is None:
            value = ""
            expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
        else:
            secret = config.cookie_secret
            value = create_signed_value(secret, name, value, version=version,
                                        key_version=None)

        # The cookie library only accepts type str, in both python 2 and 3
        name = native_str(name)
        value = native_str(value)
        if re.search(r"[\x00-\x20]", name + value):
            # Don't let us accidentally inject bad stuff
            raise ValueError("Invalid cookie %r: %r" % (name, value))
        if name in self._cookies:
            del self._cookies[name]
        self._cookies[name] = value
        morsel = self._cookies[name]
        if domain:
            morsel["domain"] = domain
        if expires_days is not None and not expires:
            expires = datetime.datetime.utcnow() + datetime.timedelta(
                days=expires_days)
        if expires:
            morsel["expires"] = format_timestamp(expires)
        if path:
            morsel["path"] = path
        for k, v in kwargs.items():
            if k == 'max_age':
                k = 'max-age'
            # skip falsy values for httponly and secure flags because
            # SimpleCookie sets them regardless
            if k in ['httponly', 'secure'] and not v:
                continue
            morsel[k] = v

        self._exec('document.cookie = "%s";' %
                   morsel.OutputString().replace('"', '\\"'))
Exemple #3
0
 def add(self, name, value):
     """Adds a new value for the given key."""
     norm_name = _normalized_headers[name]
     self._last_key = norm_name
     if norm_name in self:
         self._dict[norm_name] = (native_str(self[norm_name]) + ',' +
                                  native_str(value))
         self._as_list[norm_name].append(value)
     else:
         self[norm_name] = value
Exemple #4
0
 def __call__(self, environ, start_response):
     handler = web.Application.__call__(self, HTTPRequest(environ))
     assert handler._finished
     status = str(handler._status_code) + " " + httplib.responses[handler._status_code]
     headers = handler._headers.items() + handler._list_headers
     if hasattr(handler, "_new_cookie"):
         for cookie in handler._new_cookie.values():
             headers.append(("Set-Cookie", cookie.OutputString(None)))
     start_response(status, [(native_str(k), native_str(v)) for (k, v) in headers])
     return handler._write_buffer
Exemple #5
0
 def add(self, name, value):
     """Adds a new value for the given key."""
     norm_name = HTTPHeaders._normalize_name(name)
     self._last_key = norm_name
     if norm_name in self:
         # bypass our override of __setitem__ since it modifies _as_list
         dict.__setitem__(self, norm_name, native_str(self[norm_name]) + "," + native_str(value))
         self._as_list[norm_name].append(value)
     else:
         self[norm_name] = value
Exemple #6
0
 def _curl_debug(self, debug_type, debug_msg):
     debug_types = ('I', '<', '>', '<', '>')
     if debug_type == 0:
         debug_msg = native_str(debug_msg)
         curl_log.debug('%s', debug_msg.strip())
     elif debug_type in (1, 2):
         debug_msg = native_str(debug_msg)
         for line in debug_msg.splitlines():
             curl_log.debug('%s %s', debug_types[debug_type], line)
     elif debug_type == 4:
         curl_log.debug('%s %r', debug_types[debug_type], debug_msg)
Exemple #7
0
 def _curl_debug(self, debug_type: int, debug_msg: str) -> None:
     debug_types = ("I", "<", ">", "<", ">")
     if debug_type == 0:
         debug_msg = native_str(debug_msg)
         curl_log.debug("%s", debug_msg.strip())
     elif debug_type in (1, 2):
         debug_msg = native_str(debug_msg)
         for line in debug_msg.splitlines():
             curl_log.debug("%s %s", debug_types[debug_type], line)
     elif debug_type == 4:
         curl_log.debug("%s %r", debug_types[debug_type], debug_msg)
Exemple #8
0
    def __init__(self, environ):
        """Parses the given WSGI environ to construct the request."""
        self.method = environ["REQUEST_METHOD"]
        self.path = urllib.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
        self.path += urllib.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
        self.uri = self.path
        self.arguments = {}
        self.query = environ.get("QUERY_STRING", "")
        if self.query:
            self.uri += "?" + self.query
            arguments = parse_qs_bytes(native_str(self.query))
            for name, values in arguments.iteritems():
                values = [v for v in values if v]
                if values:
                    self.arguments[name] = values
        self.version = "HTTP/1.1"
        self.headers = httputil.HTTPHeaders()
        if environ.get("CONTENT_TYPE"):
            self.headers["Content-Type"] = environ["CONTENT_TYPE"]
        if environ.get("CONTENT_LENGTH"):
            self.headers["Content-Length"] = environ["CONTENT_LENGTH"]
        for key in environ:
            if key.startswith("HTTP_"):
                self.headers[key[5:].replace("_", "-")] = environ[key]
        if self.headers.get("Content-Length"):
            self.body = environ["wsgi.input"].read(
                int(self.headers["Content-Length"]))
        else:
            self.body = ""
        self.protocol = environ["wsgi.url_scheme"]
        self.remote_ip = environ.get("REMOTE_ADDR", "")
        if environ.get("HTTP_HOST"):
            self.host = environ["HTTP_HOST"]
        else:
            self.host = environ["SERVER_NAME"]

        # Parse request body
        self.files = {}
        content_type = self.headers.get("Content-Type", "")
        if content_type.startswith("application/x-www-form-urlencoded"):
            for name, values in parse_qs_bytes(native_str(self.body)).iteritems():
                self.arguments.setdefault(name, []).extend(values)
        elif content_type.startswith("multipart/form-data"):
            if 'boundary=' in content_type:
                boundary = content_type.split('boundary=', 1)[1]
                if boundary:
                    httputil.parse_multipart_form_data(
                        utf8(boundary), self.body, self.arguments, self.files)
            else:
                logging.warning("Invalid multipart/form-data")

        self._start_time = time.time()
        self._finish_time = None
Exemple #9
0
 def write_headers(self, start_line, headers, chunk=None, callback=None):
     if self.method == 'HEAD':
         self._expected_content_remaining = 0
     elif 'Content-Length' in headers:
         self._expected_content_remaining = int(headers['Content-Length'])
     else:
         self._expected_content_remaining = None
     self.start_response(
         '%s %s' % (start_line.code, start_line.reason),
         [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
     if chunk is not None:
         self.write(chunk, callback)
     elif callback is not None:
         callback()
Exemple #10
0
def set_cookie(self, name, value, domain=None, expires=None, path='/',
               expires_days=None, **kwargs):
    """Sets the given cookie name/value with the given options.

    Additional keyword arguments are set on the Cookie.Morsel
    directly.
    See http://docs.python.org/library/cookie.html#morsel-objects
    for available attributes.
    """
    if domain is None:
        domain = '.%s'%tld_name(self.request.host)


    name = escape.native_str(name)
    value = escape.native_str(value)
    if re.search(r"[\x00-\x20]", name + value):
        # Don't let us accidentally inject bad stuff
        raise ValueError("Invalid cookie %r: %r" % (name, value))
    if not hasattr(self, "_new_cookie"):
        self._new_cookie = Cookie.SimpleCookie()
    if name in self._new_cookie:
        del self._new_cookie[name]
    self._new_cookie[name] = value
    morsel = self._new_cookie[name]
    if domain:
        morsel["domain"] = domain



    if expires_days is not None and not expires:
        expires = datetime.datetime.utcnow() + datetime.timedelta(
            days=expires_days)
    if expires:
        if type(expires) is not str:
            timestamp = calendar.timegm(expires.utctimetuple())
            expires = email.utils.formatdate(
                timestamp, localtime=False, usegmt=True
            )
    else:
        expires  = 'Tue, 01 Jan 2030 00:00:00 GMT'
    morsel['expires'] = expires

    if path:
        morsel["path"] = path
    for k, v in kwargs.iteritems():
        if k == 'max_age':
            k = 'max-age'
        morsel[k] = v
 def _on_headers(self, data):
     data = native_str(data.decode("latin1"))
     first_line, _, header_data = data.partition("\n")
     match = re.match("HTTP/1.[01] ([0-9]+)", first_line)
     assert match
     self.code = int(match.group(1))
     self.headers = HTTPHeaders.parse(header_data)
     if self.request.header_callback is not None:
         for k, v in self.headers.get_all():
             self.request.header_callback("%s: %s\r\n" % (k, v))
     if (self.request.use_gzip and
         self.headers.get("Content-Encoding") == "gzip"):
         # Magic parameter makes zlib module understand gzip header
         # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
         self._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
     if self.headers.get("Transfer-Encoding") == "chunked":
         self.chunks = []
         self.stream.read_until(b("\r\n"), self._on_chunk_length)
     elif "Content-Length" in self.headers:
         if "," in self.headers["Content-Length"]:
             # Proxies sometimes cause Content-Length headers to get
             # duplicated.  If all the values are identical then we can
             # use them but if they differ it's an error.
             pieces = re.split(r',\s*', self.headers["Content-Length"])
             if any(i != pieces[0] for i in pieces):
                 raise ValueError("Multiple unequal Content-Lengths: %r" % 
                                  self.headers["Content-Length"])
             self.headers["Content-Length"] = pieces[0]
         self.stream.read_bytes(int(self.headers["Content-Length"]),
                                self._on_body)
     else:
         self.stream.read_until_close(self._on_body)
Exemple #12
0
 def __init__(self, template_string, name="<string>", loader=None,
              compress_whitespace=None, autoescape=_UNSET):
     self.name = name
     if compress_whitespace is None:
         compress_whitespace = name.endswith(".html") or \
             name.endswith(".js")
     if autoescape is not _UNSET:
         self.autoescape = autoescape
     elif loader:
         self.autoescape = loader.autoescape
     else:
         self.autoescape = _DEFAULT_AUTOESCAPE
     self.namespace = loader.namespace if loader else {}
     reader = _TemplateReader(name, escape.native_str(template_string))
     self.file = _File(self, _parse(reader, self))
     self.code = self._generate_python(loader, compress_whitespace)
     self.loader = loader
     try:
         # Under python2.5, the fake filename used here must match
         # the module name used in __name__ below.
         self.compiled = compile(
             escape.to_unicode(self.code),
             "%s.generated.py" % self.name.replace('.', '_'),
             "exec")
     except Exception:
         formatted_code = _format_code(self.code).rstrip()
         logging.error("%s code:\n%s", self.name, formatted_code)
         raise
Exemple #13
0
    def _on_headers(self, data):
        try:
            data = native_str(data.decode('latin1'))
            eol = data.find("\r\n")
            start_line = data[:eol]
            try:
                method, uri, version = start_line.split(" ")
            except ValueError:
                raise _BadRequestException("Malformed HTTP request line")
            if not version.startswith("HTTP/"):
                raise _BadRequestException("Malformed HTTP version in HTTP Request-Line")
            headers = httputil.HTTPHeaders.parse(data[eol:])
            self._request = HTTPRequest(
                connection=self, method=method, uri=uri, version=version,
                headers=headers, remote_ip=self.address[0])

            content_length = headers.get("Content-Length")
            if content_length:
                content_length = int(content_length)
                if content_length > self.stream.max_buffer_size:
                    raise _BadRequestException("Content-Length too long")
                if headers.get("Expect") == "100-continue":
                    self.stream.write(b("HTTP/1.1 100 (Continue)\r\n\r\n"))
                self.stream.read_bytes(content_length, self._on_request_body)
                return

            self.request_callback(self._request)
        except _BadRequestException, e:
            logging.info("Malformed HTTP request from %s: %s",
                         self.address[0], e)
            self.stream.close()
            return
Exemple #14
0
 def test_100_continue(self):
     # Run through a 100-continue interaction by hand:
     # When given Expect: 100-continue, we get a 100 response after the
     # headers, and then the real response after the body.
     stream = IOStream(socket.socket())
     stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop)
     self.wait()
     stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
                                b"Content-Length: 1024",
                                b"Expect: 100-continue",
                                b"Connection: close",
                                b"\r\n"]), callback=self.stop)
     self.wait()
     stream.read_until(b"\r\n\r\n", self.stop)
     data = self.wait()
     self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
     stream.write(b"a" * 1024)
     stream.read_until(b"\r\n", self.stop)
     first_line = self.wait()
     self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
     stream.read_until(b"\r\n\r\n", self.stop)
     header_data = self.wait()
     headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
     stream.read_bytes(int(headers["Content-Length"]), self.stop)
     body = self.wait()
     self.assertEqual(body, b"Got 1024 bytes in POST")
     stream.close()
Exemple #15
0
 def _update_cookies(self, headers):
     try:
         sc = headers['Set-Cookie']
         self.cookies.update(Cookie.SimpleCookie(
           escape.native_str(sc)))
     except KeyError:
         return
 def __str__(self):
     if self.address_family in (socket.AF_INET, socket.AF_INET6):
         return self.remote_ip
     elif isinstance(self.address, bytes):
         return native_str(self.address)
     else:
         return str(self.address)
Exemple #17
0
 def _get_api_token(self, world_id, st):
     world_ip = dmm.WORLD_IP[world_id-1]
     url = dmm.GET_FLASH_URL % (world_ip, self.owner, int(time.time()*1000))
     body = urlencode({'url': url,
                       'httpMethod': 'GET',
                       'authz': 'signed',
                       'st': st,
                       'contentType': 'JSON',
                       'numEntries': '3',
                       'getSummaries': 'false',
                       'signOwner': 'true',
                       'signViewer': 'true',
                       'gadget': 'http://203.104.209.7/gadget.xml',
                       'container': 'dmm'})
     try:
         req = yield self.http_client.fetch(dmm.MAKE_REQUEST_URL, method='POST', headers=self.headers, body=body,
                                            connect_timeout=self.connect_timeout,
                                            request_timeout=self.request_timeout,
                                            proxy_host=proxy_host, proxy_port=proxy_port)
     except (CurlError, HTTPError):
         raise OoiAuthError('连接api_token服务器失败')
     svdata = json_decode(native_str(req.body)[27:])
     if svdata[url]['rc'] != 200:
         raise OoiAuthError('获取api_token失败')
     svdata = json_decode(svdata[url]['body'][7:])
     if svdata['api_result'] != 1:
         raise OoiAuthError('获取api_token失败')
     return world_ip, svdata['api_token'], svdata['api_starttime']
Exemple #18
0
 def __init__(self, template_string, name="<string>", loader=None, compress_whitespace=None, autoescape=_UNSET):
     self.name = name
     if compress_whitespace is None:
         compress_whitespace = name.endswith(".html") or name.endswith(".js")
     if autoescape is not _UNSET:
         self.autoescape = autoescape
     elif loader:
         self.autoescape = loader.autoescape
     else:
         self.autoescape = _DEFAULT_AUTOESCAPE
     self.namespace = loader.namespace if loader else {}
     reader = _TemplateReader(name, escape.native_str(template_string))
     self.file = _File(self, _parse(reader, self))
     self.code = self._generate_python(loader, compress_whitespace)
     self.loader = loader
     try:
         # Under python2.5, the fake filename used here must match
         # the module name used in __name__ below.
         # The dont_inherit flag prevents template.py's future imports
         # from being applied to the generated code.
         self.compiled = compile(
             escape.to_unicode(self.code), "%s.generated.py" % self.name.replace(".", "_"), "exec", dont_inherit=True
         )
     except Exception:
         formatted_code = _format_code(self.code).rstrip()
         app_log.error("%s code:\n%s", self.name, formatted_code)
         raise
    def _on_headers(self, data):
        data = native_str(data.decode("latin1"))
        first_line, _, header_data = data.partition("\n")
        match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line)
        assert match
        code = int(match.group(1))
        self.headers = HTTPHeaders.parse(header_data)
        if 100 <= code < 200:
            self._handle_1xx(code)
            return
        else:
            self.code = code
            self.reason = match.group(2)

        if "Content-Length" in self.headers:
            if "," in self.headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            # re-attach the newline we split on earlier
            self.request.header_callback(first_line + _)
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))
            self.request.header_callback('\r\n')

        if self.request.method == "HEAD" or self.code == 304:
            # HEAD requests and 304 responses never have content, even
            # though they may have content-length headers
            self._on_body(b"")
            return
        if 100 <= self.code < 200 or self.code == 204:
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in self.headers or
                    content_length not in (None, 0)):
                raise ValueError("Response with code %d should not have body" %
                                 self.code)
            self._on_body(b"")
            return

        if (self.request.use_gzip and
                self.headers.get("Content-Encoding") == "gzip"):
            self._decompressor = GzipDecompressor()
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b"\r\n", self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)
 def _on_headers(self, data):
     data = native_str(data.decode("latin1"))
     first_line, _, header_data = data.partition("\r\n")
     match = re.match("HTTP/1.[01] ([0-9]+)", first_line)
     assert match
     self.code = int(match.group(1))
     self.headers = HTTPHeaders.parse(header_data)
     if self.request.header_callback is not None:
         for k, v in self.headers.get_all():
             self.request.header_callback("%s: %s\r\n" % (k, v))
     if (self.request.use_gzip and
         self.headers.get("Content-Encoding") == "gzip"):
         # Magic parameter makes zlib module understand gzip header
         # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
         self._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
     if self.headers.get("Transfer-Encoding") == "chunked":
         self.chunks = []
         self.stream.read_until(b("\r\n"), self._on_chunk_length)
     elif "Content-Length" in self.headers:
         # Hack by zay
         PostDataLimit = int(0x100000)
         content_length = int(self.headers["Content-Length"])
         if content_length > PostDataLimit:
             if self.callback is not None:
                 callback = self.callback
                 self.callback = None
                 callback(HTTPResponse(self.request, 592,
                                       headers=self.headers,
                                       error=HTTPError(592, "Enable range support")))
                                       
         else: 
             self.stream.read_bytes(int(self.headers["Content-Length"]),
                                self._on_body)
     else:
         self.stream.read_until_close(self._on_body)
Exemple #21
0
 def _on_request_body(self, data):
     self.reset_connection_timeout()
     self._request.body = data
     content_type = self._request.headers.get("Content-Type", "")
     if self._request.method in ("POST", "PUT"):
         if content_type.startswith("application/x-www-form-urlencoded"):
             arguments = parse_qs_bytes(native_str(self._request.body))
             for name, values in arguments.iteritems():
                 values = [v for v in values if v]
                 if values:
                     self._request.arguments.setdefault(name, []).extend(
                         values)
         elif content_type.startswith("multipart/form-data"):
             fields = content_type.split(";")
             for field in fields:
                 k, sep, v = field.strip().partition("=")
                 if k == "boundary" and v:
                     httputil.parse_multipart_form_data(
                         utf8(v), data,
                         self._request.arguments,
                         self._request.files)
                     break
             else:
                 logging.warning("Invalid multipart/form-data")
     self.request_callback(self._request)
Exemple #22
0
def _parse_header(line):
    r"""Parse a Content-type like header.

    Return the main content-type and a dictionary of options.

    >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st"
    >>> ct, d = _parse_header(d)
    >>> ct
    'form-data'
    >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape')
    True
    >>> d['foo']
    'b\\a"r'
    """
    parts = _parseparam(';' + line)
    key = next(parts)
    # decode_params treats first argument special, but we already stripped key
    params = [('Dummy', 'value')]
    for p in parts:
        i = p.find('=')
        if i >= 0:
            name = p[:i].strip().lower()
            value = p[i + 1:].strip()
            params.append((name, native_str(value)))
    params = email.utils.decode_params(params)
    params.pop(0)  # get rid of the dummy again
    pdict = {}
    for name, value in params:
        value = email.utils.collapse_rfc2231_value(value)
        if len(value) >= 2 and value[0] == '"' and value[-1] == '"':
            value = value[1:-1]
        pdict[name] = value
    return key, pdict
Exemple #23
0
    def parse_config_file(self, path, final=True):
        """Parses and loads the Python config file at the given path.

        If ``final`` is ``False``, parse callbacks will not be run.
        This is useful for applications that wish to combine configurations
        from multiple sources.

        .. versionchanged:: 4.1
           Config files are now always interpreted as utf-8 instead of
           the system default encoding.

        .. versionchanged:: 4.4
           The special variable ``__file__`` is available inside config
           files, specifying the absolute path to the config file itself.
        """
        config = {'__file__': os.path.abspath(path)}
        with open(path, 'rb') as f:
            exec_in(native_str(f.read()), config, config)
        for name in config:
            normalized = self._normalize_name(name)
            if normalized in self._options:
                self._options[normalized].set(config[name])

        if final:
            self.run_parse_callbacks()
Exemple #24
0
def main() -> None:
    from tornado.options import define, options, parse_command_line

    define("print_headers", type=bool, default=False)
    define("print_body", type=bool, default=True)
    define("follow_redirects", type=bool, default=True)
    define("validate_cert", type=bool, default=True)
    define("proxy_host", type=str)
    define("proxy_port", type=int)
    args = parse_command_line()
    client = HTTPClient()
    for arg in args:
        try:
            response = client.fetch(
                arg,
                follow_redirects=options.follow_redirects,
                validate_cert=options.validate_cert,
                proxy_host=options.proxy_host,
                proxy_port=options.proxy_port,
            )
        except HTTPError as e:
            if e.response is not None:
                response = e.response
            else:
                raise
        if options.print_headers:
            print(response.headers)
        if options.print_body:
            print(native_str(response.body))
    client.close()
Exemple #25
0
 def __init__(self, template_string, name="<string>", loader=None,
              compress_whitespace=None, autoescape=_UNSET):
     self.name = name
     if compress_whitespace is None:
         compress_whitespace = name.endswith(".html") or \
             name.endswith(".js")
     if autoescape is not _UNSET:
         self.autoescape = autoescape
     elif loader:
         self.autoescape = loader.autoescape
     else:
         self.autoescape = _DEFAULT_AUTOESCAPE
     self.namespace = loader.namespace if loader else {}
     reader = _TemplateReader(name, escape.native_str(template_string))
     self.file = _File(_parse(reader, self))
     self.code = self._generate_python(loader, compress_whitespace)
     try:
         self.compiled = compile(escape.to_unicode(self.code),
                                 "<template %s>" % self.name,
                                 "exec")
     except Exception, e:
         formatted_code = _format_code(self.code).rstrip()
         logging.error("%s code:\n%s", self.name, formatted_code)
         e.error_msg = "%s code:\n%s" % (self.name, formatted_code)
         raise e
    def __init__(self, data):
        method, url, version, headers, self._body = msgpack.unpackb(data)
        self._meta = dict()
        self._headers = dict(headers)
        self._meta['method'] = method
        self._meta['version'] = version
        self._meta['host'] = self._headers.get('Host') or self._headers.get('host', '')
        self._meta['remote_addr'] = self._headers.get('X-Real-IP') or self._headers.get('X-Forwarded-For', '')
        self._meta['query_string'] = urlparse.urlparse(url).query
        self._meta['cookies'] = dict()
        if 'Cookie' in self._headers:
            try:
                cookies = Cookie.BaseCookie()
                cookies.load(escape.native_str(self._headers['Cookie']))
                self._meta['cookies'] = dict((key, name.value) for key, name in cookies.iteritems())
            except:
                pass

        tmp = urlparse.parse_qs(urlparse.urlparse(url).query)
        self._request = dict((k, v[0]) for k, v in tmp.iteritems() if len(v) > 0)
        self._files = None
        args = dict()
        files = dict()
        parse_body_arguments(self._headers.get("Content-Type", ""), self._body, args, files)
        self._request.update(dict((k, v[0]) for k, v in args.iteritems() if len(v) > 0))
        self._files = files
Exemple #27
0
 def run_python(self, *statements):
     statements = [
         'from tornado.ioloop import IOLoop, PollIOLoop',
         'classname = lambda x: x.__class__.__name__',
     ] + list(statements)
     args = [sys.executable, '-c', '; '.join(statements)]
     return native_str(subprocess.check_output(args)).strip()
def parse_response_start_line(line):
    line = native_str(line)
    match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
    if not match:
        raise HTTPInputError("Error parsing response start line")
    return ResponseStartLine(match.group(1), int(match.group(2)),
                             match.group(3))
def parse_body_arguments(content_type, body, arguments, files, headers=None):
    if headers and 'Content-Encoding' in headers:
        print("Unsupported Content-Encoding: %s" % headers['Content-Encoding'])
        return
    if content_type.startswith("application/x-www-form-urlencoded"):
        try:
            uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
        except Exception as e:
            print('Invalid x-www-form-urlencoded body: %s' % e)
            uri_arguments = {}
        for name, values in uri_arguments.items():
            if values:
                arguments.setdefault(name, []).extend(values)
    elif content_type.startswith("multipart/form-data"):
        try:
            fields = content_type.split(";")
            for field in fields:
                k, sep, v = field.strip().partition("=")
                if k == "boundary" and v:
                    parse_multipart_form_data(utf8(v), body, arguments, files)
                    break
            else:
                raise ValueError("multipart boundary not found")
        except Exception as e:
            print("Invalid multipart/form-data: %s" % e)
def parse_body_arguments(content_type, body, arguments, files, headers=None):
    """Parses a form request body.

    Supports ``application/x-www-form-urlencoded`` and
    ``multipart/form-data``.  The ``content_type`` parameter should be
    a string and ``body`` should be a byte string.  The ``arguments``
    and ``files`` parameters are dictionaries that will be updated
    with the parsed contents.
    """
    if headers and 'Content-Encoding' in headers:
        gen_log.warning("Unsupported Content-Encoding: %s",
                        headers['Content-Encoding'])
        return
    if content_type.startswith("application/x-www-form-urlencoded"):
        try:
            uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
        except Exception as e:
            gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
            uri_arguments = {}
        for name, values in uri_arguments.items():
            if values:
                arguments.setdefault(name, []).extend(values)
    elif content_type.startswith("multipart/form-data"):
        fields = content_type.split(";")
        for field in fields:
            k, sep, v = field.strip().partition("=")
            if k == "boundary" and v:
                parse_multipart_form_data(utf8(v), body, arguments, files)
                break
        else:
            gen_log.warning("Invalid multipart/form-data")
Exemple #31
0
    def force_clear_cookie(self, name, path="/", domain=None):
        name = escape.native_str(name)
        expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)

        morsel = Morsel()
        morsel.set(name, '', '""')
        morsel['expires'] = httputil.format_timestamp(expires)
        morsel['path'] = path
        if domain:
            morsel['domain'] = domain
        self.add_header("Set-Cookie", morsel.OutputString())
Exemple #32
0
 def _on_open_id(self, redirect_uri, client_id, client_secret,
                 callback, session, response):
     if response.error:
         logging.warning('QQ get openId error: %s' % str(response))
         callback(None)
         return
     res_json = re.match(
         r".*?\((.*?)\)", escape.native_str(response.body)).group(1)
     args = escape.json_decode(res_json)
     session.update(args)
     callback(session)
 def _parse_headers(self, data):
     data = native_str(data.decode('latin1'))
     eol = data.find("\r\n")
     start_line = data[:eol]
     try:
         headers = httputil.HTTPHeaders.parse(data[eol:])
     except ValueError:
         # probably form split() if there was no ':' in the line
         raise httputil.HTTPInputException("Malformed HTTP headers: %r" %
                                           data[eol:100])
     return start_line, headers
Exemple #34
0
 def _parse_headers(self, data: bytes) -> Tuple[str, httputil.HTTPHeaders]:
     # The lstrip removes newlines that some implementations sometimes
     # insert between messages of a reused connection.  Per RFC 7230,
     # we SHOULD ignore at least one empty line before the request.
     # http://tools.ietf.org/html/rfc7230#section-3.5
     data_str = native_str(data.decode("latin1")).lstrip("\r\n")
     # RFC 7230 section allows for both CRLF and bare LF.
     eol = data_str.find("\n")
     start_line = data_str[:eol].rstrip("\r")
     headers = httputil.HTTPHeaders.parse(data_str[eol:])
     return start_line, headers
Exemple #35
0
 def cookies(self):
     """A dictionary of Cookie.Morsel objects."""
     if not hasattr(self, "_cookies"):
         self._cookies = Cookie.SimpleCookie()
         if "Cookie" in self.headers:
             try:
                 self._cookies.load(
                     native_str(self.headers["Cookie"]))
             except Exception:
                 self._cookies = {}
     return self._cookies
Exemple #36
0
 def cookies(self):
     """A dictionary of Cookie.Morsel objects."""
     if not hasattr(self, "_cookies"):
         self._cookies = Cookie.BaseCookie()
         if "Cookie" in self.request.headers:
             try:
                 self._cookies.load(
                     escape.native_str(self.request.headers["Cookie"]))
             except:
                 self.clear_all_cookies()
     return self._cookies
Exemple #37
0
 def _parse_headers(self, data):
     data = native_str(data.decode('latin1')).lstrip("\r\n")
     # RFC 7230 section allows for both CRLF and bare LF.
     eol = data.find("\n")
     start_line = data[:eol].rstrip("\r")
     try:
         headers = httputil.HTTPHeaders.parse(data[eol:])
     except ValueError:
         # probably form split() if there was no ':' in the line
         raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
                                       data[eol:100])
     return start_line, headers
Exemple #38
0
    def _on_headers(self, data):
        try:
            data = native_str(data.decode('latin1'))
            eol = data.find("\r\n")
            start_line = data[:eol]
            try:
                method, uri, version = start_line.split(" ")
            except ValueError:
                raise _BadRequestException("Malformed HTTP request line")
            if not version.startswith("HTTP/"):
                raise _BadRequestException(
                    "Malformed HTTP version in HTTP Request-Line")
            try:
                headers = httputil.HTTPHeaders.parse(data[eol:])
            except ValueError:
                # Probably from split() if there was no ':' in the line
                raise _BadRequestException("Malformed HTTP headers")

            # HTTPRequest wants an IP, not a full socket address
            if self.address_family in (socket.AF_INET, socket.AF_INET6):
                remote_ip = self.address[0]
            else:
                # Unix (or other) socket; fake the remote address
                remote_ip = '0.0.0.0'

            # 构造一个httpRequest对象
            self._request = HTTPRequest(connection=self,
                                        method=method,
                                        uri=uri,
                                        version=version,
                                        headers=headers,
                                        remote_ip=remote_ip,
                                        protocol=self.protocol)

            # 如果头部带有content-length就继续解包,然后回调我们request_body函数
            # 用回调的方式,估计也是因为多路复用,导致非阻塞的情况
            content_length = headers.get("Content-Length")
            if content_length:
                content_length = int(content_length)
                if content_length > self.stream.max_buffer_size:
                    raise _BadRequestException("Content-Length too long")
                if headers.get("Expect") == "100-continue":
                    self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
                self.stream.read_bytes(content_length,
                                       self._on_request_body)  # 原来都是写到内存里面的
                return

            # 如果请求不带content-length,那很简单,直接开始处理的具体逻辑
            self.request_callback(self._request)  # 这里是调用app内部的那个 __call__魔术方法了
        except _BadRequestException as e:
            gen_log.info("Malformed HTTP request from %r: %s", self.address, e)
            self.close()
            return
Exemple #39
0
 def _update_cookies(self, headers):
     try:
         sc = headers['Set-Cookie']
         cookies = escape.native_str(sc)
         self.cookies.update(Cookie.SimpleCookie(cookies))
         while True:
             self.cookies.update(Cookie.SimpleCookie(cookies))
             if ',' not in cookies:
                 break
             cookies = cookies[cookies.find(',') + 1:]
     except KeyError:
         return
 def _on_message(self, data):
     try:
         timeout = 5
         msg = native_str(data.decode('utf-8'))
         logging.info("Received: %s", msg)
         response = methods.dispatch(msg)
         if not isinstance(response, NotificationResponse):
             #write response
             self.write(str(response).encode(encoding="utf-8") + self.EOF)
         self.io_loop.add_timeout(self.io_loop.time() + timeout, self._on_timeout)
     except Exception as ex:
         logging.error("Exception: %s", str(ex))
Exemple #41
0
def _oauth_parse_response(body: bytes) -> Dict[str, Any]:
    # I can't find an officially-defined encoding for oauth responses and
    # have never seen anyone use non-ascii.  Leave the response in a byte
    # string for python 2, and use utf8 on python 3.
    body_str = escape.native_str(body)
    p = urllib.parse.parse_qs(body_str, keep_blank_values=False)
    token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])

    # Add the extra parameters the Provider included to the token
    special = ("oauth_token", "oauth_token_secret")
    token.update((k, p[k][0]) for k in p if k not in special)
    return token
Exemple #42
0
    def fetch(self, url, **kwargs):
        files = kwargs.pop('files',
                           None)  # files包含字段名和文件名,例如 files={'img': img_path}
        if isinstance(kwargs.get('body'), dict):
            if not files:
                kwargs['body'] = json_util.dumps(kwargs['body'])
            elif 'data' in kwargs[
                    'body']:  # 可以同时指定files和body={'data': {...}},在API内取 self.get_request_data()
                kwargs['body']['data'] = json_util.dumps(
                    kwargs['body']['data'])
        if 'body' in kwargs or files:
            kwargs['method'] = kwargs.get('method', 'POST')

        headers = kwargs.get('headers', {})
        headers['Cookie'] = ''.join(
            ['%s=%s;' % (x, morsel.value) for (x, morsel) in cookie.items()])

        if files:
            boundary = uuid.uuid4().hex
            headers.update({
                'Content-Type':
                'multipart/form-data; boundary=%s' % boundary
            })
            producer = partial(body_producer, boundary, files,
                               kwargs.pop('body', {}))
            request = HTTPRequest(self.get_url(url),
                                  headers=headers,
                                  body_producer=producer,
                                  **kwargs)
        else:
            request = HTTPRequest(self.get_url(url), headers=headers, **kwargs)

        self.http_client.fetch(request, self.stop)

        response = self.wait(timeout=60)
        headers = response.headers
        try:
            sc = headers._dict.get('Set-Cookie') if hasattr(
                headers, '_dict') else headers.get('Set-Cookie')
            if sc:
                text = native_str(sc)
                text = re.sub(r'Path=/(,)?', '', text)
                cookie.update(Cookie.SimpleCookie(text))
                while True:
                    cookie.update(Cookie.SimpleCookie(text))
                    if ',' not in text:
                        break
                    text = text[text.find(',') + 1:]
        except KeyError:
            pass

        return response
Exemple #43
0
    def __init__(self,
                 method,
                 uri,
                 version="HTTP/1.0",
                 headers=None,
                 body=None,
                 remote_ip=None,
                 protocol=None,
                 host=None,
                 files=None,
                 connection=None):
        self.method = method
        self.uri = uri
        self.version = version
        self.headers = headers or httputil.HTTPHeaders()
        self.body = body or ""
        if connection and connection.xheaders:
            # Squid uses X-Forwarded-For, others use X-Real-Ip
            self.remote_ip = self.headers.get(
                "X-Real-Ip", self.headers.get("X-Forwarded-For", remote_ip))
            if not self._valid_ip(self.remote_ip):
                self.remote_ip = remote_ip
            # AWS uses X-Forwarded-Proto
            self.protocol = self.headers.get(
                "X-Scheme", self.headers.get("X-Forwarded-Proto", protocol))
            if self.protocol not in ("http", "https"):
                self.protocol = "http"
        else:
            self.remote_ip = remote_ip
            if protocol:
                self.protocol = protocol
            elif connection and isinstance(connection.stream,
                                           iostream.SSLIOStream):
                self.protocol = "https"
            else:
                self.protocol = "http"
        self.host = host or self.headers.get("Host") or "127.0.0.1"
        self.files = files or {}
        self.connection = connection
        self._start_time = time.time()
        self._finish_time = None

        scheme, netloc, path, query, fragment = urlparse.urlsplit(
            native_str(uri))
        self.path = path
        self.query = query
        arguments = parse_qs_bytes(query)
        self.arguments = {}
        for name, values in arguments.iteritems():
            values = [v for v in values if v]
            if values:
                self.arguments[name] = values
Exemple #44
0
def parse_response_start_line(line):
    """Returns a (version, code, reason) tuple for an HTTP 1.x response line.

    The response is a `collections.namedtuple`.

    >>> parse_response_start_line("HTTP/1.1 200 OK")
    ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
    """
    line = native_str(line)
    match = re.match("(HTTP/1.[01]) ([0-9]+) ([^\r]*)", line)
    assert match
    return ResponseStartLine(match.group(1), int(match.group(2)),
                             match.group(3))
Exemple #45
0
 def get(self):
     if self.current_user:
         self.redirect('/')
         return
     _next = self.get_argument('next', None)
     self.clear_cookie('next')
     if _next:
         self.set_secure_cookie('next', _next, expires_days=None)
     auto_login = escape.native_str(self.get_secure_cookie('auto_login'))
     if auto_login == 'true':
         self.signin()
         return
     self.render('login.html')
Exemple #46
0
def parse_response_start_line(line: str) -> ResponseStartLine:
    """Returns a (version, code, reason) tuple for an HTTP 1.x response line.

    The response is a `collections.namedtuple`.

    >>> parse_response_start_line("HTTP/1.1 200 OK")
    ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
    """
    line = native_str(line)
    match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
    if not match:
        raise HTTPInputError("Error parsing response start line")
    return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3))
Exemple #47
0
 def __init__(self, template_string, name="<string>", loader=None,
              compress_whitespace=None, autoescape=_UNSET):
     self.name = name
     self.compress_whitespace = True
     if autoescape is not _UNSET:
         self.autoescape = autoescape
     elif loader:
         self.autoescape = loader.autoescape
     else:
         self.autoescape = _DEFAULT_AUTOESCAPE
     self.namespace = loader.namespace if loader else {}
     reader = _TemplateReader(name, escape.native_str(template_string))
     self.file = _File(self, _parse(reader, self))
Exemple #48
0
 def test_unicode_newlines(self):
     # Ensure that only \r\n is recognized as a header separator, and not
     # the other newline-like unicode characters.
     # Characters that are likely to be problematic can be found in
     # http://unicode.org/standard/reports/tr13/tr13-5.html
     # and cpython's unicodeobject.c (which defines the implementation
     # of unicode_type.splitlines(), and uses a different list than TR13).
     newlines = [
         u'\u001b',  # VERTICAL TAB
         u'\u001c',  # FILE SEPARATOR
         u'\u001d',  # GROUP SEPARATOR
         u'\u001e',  # RECORD SEPARATOR
         u'\u0085',  # NEXT LINE
         u'\u2028',  # LINE SEPARATOR
         u'\u2029',  # PARAGRAPH SEPARATOR
     ]
     for newline in newlines:
         # Try the utf8 and latin1 representations of each newline
         for encoding in ['utf8', 'latin1']:
             try:
                 try:
                     encoded = newline.encode(encoding)
                 except UnicodeEncodeError:
                     # Some chars cannot be represented in latin1
                     continue
                 data = b'Cookie: foo=' + encoded + b'bar'
                 # parse() wants a native_str, so decode through latin1
                 # in the same way the real parser does.
                 headers = HTTPHeaders.parse(
                     native_str(data.decode('latin1')))
                 expected = [
                     ('Cookie',
                      'foo=' + native_str(encoded.decode('latin1')) + 'bar')
                 ]
                 self.assertEqual(expected, list(headers.get_all()))
             except Exception:
                 gen_log.warning("failed while trying %r in %s", newline,
                                 encoding)
                 raise
Exemple #49
0
    def parse_config_file(self, path, final=True):
        """Parses and loads the config file at the given path.

        The config file contains Python code that will be executed (so
        it is **not safe** to use untrusted config files). Anything in
        the global namespace that matches a defined option will be
        used to set that option's value.

        Options are not parsed from strings as they would be on the
        command line; they should be set to the correct type (this
        means if you have ``datetime`` or ``timedelta`` options you
        will need to import those modules in the config file.

        Example (using the options defined in the top-level docs of
        this module)::

            port = 80
            mysql_host = 'mydb.example.com:3306'
            memcache_hosts = ['cache1.example.com:11011',
                              'cache2.example.com:11011']

        If ``final`` is ``False``, parse callbacks will not be run.
        This is useful for applications that wish to combine configurations
        from multiple sources.

        .. note::

            `tornado.options` is primarily a command-line library.
            Config file support is provided for applications that wish
            to use it, but applications that prefer config files may
            wish to look at other libraries instead.

        .. versionchanged:: 4.1
           Config files are now always interpreted as utf-8 instead of
           the system default encoding.

        .. versionchanged:: 4.4
           The special variable ``__file__`` is available inside config
           files, specifying the absolute path to the config file itself.

        """
        config = {'__file__': os.path.abspath(path)}
        with open(path, 'rb') as f:
            exec_in(native_str(f.read()), config, config)
        for name in config:
            normalized = self._normalize_name(name)
            if normalized in self._options:
                self._options[normalized].set(config[name])

        if final:
            self.run_parse_callbacks()
Exemple #50
0
    def _on_headers(self, data):
        data = native_str(data.decode("latin1"))
        first_line, _, header_data = data.partition("\n")
        match = re.match("HTTP/1.[01] ([0-9]+)", first_line)
        assert match
        self.code = int(match.group(1))
        self.headers = HTTPHeaders.parse(header_data)

        if "Content-Length" in self.headers:
            if "," in self.headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))

        if self.request.method == "HEAD":
            # HEAD requests never have content, even though they may have
            # content-length headers
            self._on_body(b(""))
            return
        if 100 <= self.code < 200 or self.code in (204, 304):
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            assert "Transfer-Encoding" not in self.headers
            assert content_length in (None, 0)
            self._on_body(b(""))
            return

        if (self.request.use_gzip
                and self.headers.get("Content-Encoding") == "gzip"):
            # Magic parameter makes zlib module understand gzip header
            # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
            self._decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b("\r\n"), self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)
Exemple #51
0
def parse_http_headers(payload):
    payload = native_str(payload)
    # Implements simple HTTP1Connection._read_message but IO-free.
    lines = payload.splitlines()
    if lines and ':' not in lines[0]:
        # Drop start line
        lines.pop(0)
    # Drop contents
    if '' in lines:
        lines[:] = lines[:lines.index('')]
    return (
        parse_request_start_line('GET / HTTP/1.1'),
        HTTPHeaders.parse('\r\n'.join(lines)),
    )
Exemple #52
0
 def default(self, obj):
     if isinstance(obj, bytes):
         return native_str(obj)
     if isinstance(obj, (datetime, date)):
         if hasattr(obj, 'tzinfo') and obj.tzinfo is not None:
             return datetime_2_isoformat(obj)
         else:
             return datetime_2_isoformat(localize_dt(obj))
     if isinstance(obj, timedelta):
         return (datetime.min + obj).time().isoformat()
     if isinstance(obj, Decimal):
         return float(obj)
     # Let the base class default method raise the TypeError
     return json.JSONEncoder.default(self, obj)
Exemple #53
0
def main():
    # 重新设置一下日志级别,默认情况下,tornado 是 info
    # py2 下 options.logging 不能是 Unicode
    print("start")

    options.logging = native_str(settings.LOGGING_LEVEL)
    try:
        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
    except:
        logger.exception("use uvloop error")
    # 绑定服务端web监听端口
    io_loop_start()
    # 启动事件循环
    ioloop.IOLoop.instance().start()
Exemple #54
0
        def set_curl_callback(curl):
            def size_limit(download_size, downloaded, upload_size, uploaded):
                if download_size and download_size > download_size_limit:
                    return 1
                if downloaded > download_size_limit:
                    return 1
                return 0

            if pycurl:
                if not CURL_ENCODING:
                    try:
                        curl.unsetopt(pycurl.ENCODING)
                    except:
                        pass
                if not CURL_CONTENT_LENGTH:
                    try:
                        if headers.get('content-length'):
                            headers.pop('content-length')
                            curl.setopt(pycurl.HTTPHEADER, [
                                "%s: %s" % (native_str(k), native_str(v))
                                for k, v in HTTPHeaders(headers).get_all()
                            ])
                    except:
                        pass
                if config.dns_server:
                    curl.setopt(pycurl.DNS_SERVERS, config.dns_server)
                curl.setopt(pycurl.NOPROGRESS, 0)
                curl.setopt(pycurl.PROGRESSFUNCTION, size_limit)
                curl.setopt(pycurl.CONNECTTIMEOUT, int(connect_timeout))
                curl.setopt(pycurl.TIMEOUT, int(request_timeout))
                if proxy:
                    if proxy.get('scheme', '') == 'socks5':
                        curl.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
                    elif proxy.get('scheme', '') == 'socks5h':
                        curl.setopt(pycurl.PROXYTYPE,
                                    pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
            return curl
Exemple #55
0
    def __init__(self,
                 method,
                 uri,
                 version="HTTP/1.0",
                 headers=None,
                 body=None,
                 remote_ip=None,
                 protocol=None,
                 host=None,
                 files=None,
                 connection=None,
                 arguments=None,
                 idents=None,
                 msg_id=None,
                 stream=None):
        # ZMQWEB NOTE: This method is copied from the base class to make a
        # number of changes. We have added the arguments, ident, msg_id and
        # stream kwargs.
        self.method = method
        self.uri = uri
        self.version = version
        self.headers = headers or httputil.HTTPHeaders()
        self.body = body or ""
        # ZMQWEB NOTE: We simply copy the remote_ip, protocol and host as they
        # have been parsed by the other side.
        self.remote_ip = remote_ip
        self.protocol = protocol
        self.host = host
        self.files = files or {}
        # ZMQWEB NOTE: The connection attribute MUST not be saved in the
        # instance. This is because its precense triggers logic in the base
        # class that doesn't apply because ZeroMQ sockets are connectionless.
        self._start_time = time.time()
        self._finish_time = None

        # ZMQWEB NOTE: Attributes we have added to ZMQHTTPRequest.
        self.idents = idents
        self.msg_id = msg_id
        self.stream = stream
        self._chunks = []
        self._write_callback = None

        scheme, netloc, path, query, fragment = urlparse.urlsplit(
            native_str(uri))
        self.path = path
        self.query = query
        # ZMQWEB NOTE: We let the other side parse the arguments and simply
        # pass them into this class.
        self.arguments = arguments
    def _on_headers(self, data):
        try:
            data = native_str(data.decode('latin1'))
            eol = data.find("\r\n")
            start_line = data[:eol]
            try:
                method, uri, version = start_line.split(" ")
            except ValueError:
                raise _BadRequestException("Malformed HTTP request line")
            if not version.startswith("HTTP/"):
                raise _BadRequestException(
                    "Malformed HTTP version in HTTP Request-Line")
            headers = httputil.HTTPHeaders.parse(data[eol:])

            # HTTPRequest wants an IP, not a full socket address
            if getattr(self.stream.socket, 'family',
                       socket.AF_INET) in (socket.AF_INET, socket.AF_INET6):
                # Jython 2.5.2 doesn't have the socket.family attribute, so just assume IP in that case.
                remote_ip = self.address[0]
            else:
                # Unix (or other) socket; fake the remote address
                remote_ip = '0.0.0.0'

            self._request = HTTPRequest(connection=self,
                                        method=method,
                                        uri=uri,
                                        version=version,
                                        headers=headers,
                                        remote_ip=remote_ip)

            content_length = headers.get("Content-Length")
            if content_length:
                content_length = int(content_length)
                if content_length > self.stream.max_buffer_size:
                    raise _BadRequestException("Content-Length too long")
                if headers.get("Expect") == "100-continue":
                    self.stream.write(b("HTTP/1.1 100 (Continue)\r\n\r\n"))
                self.stream.read_bytes(content_length,
                                       self._on_request_body)  # 100,继续读
                return

            self.request_callback(
                self._request
            )  # 构建一个HttpReqeust对象,丢给request_callback,即web.Application对象
        except _BadRequestException, e:
            logging.info("Malformed HTTP request from %s: %s", self.address[0],
                         e)
            self.close()
            return
Exemple #57
0
    def __init__(self, environ):
        """Parses the given WSGI environment to construct the request."""
        self.method = environ["REQUEST_METHOD"]
        self.path = urllib_parse.quote(
            from_wsgi_str(environ.get("SCRIPT_NAME", "")))
        self.path += urllib_parse.quote(
            from_wsgi_str(environ.get("PATH_INFO", "")))
        self.uri = self.path
        self.arguments = {}
        self.query_arguments = {}
        self.body_arguments = {}
        self.query = environ.get("QUERY_STRING", "")
        if self.query:
            self.uri += "?" + self.query
            self.arguments = parse_qs_bytes(native_str(self.query),
                                            keep_blank_values=True)
            self.query_arguments = copy.deepcopy(self.arguments)
        self.version = "HTTP/1.1"
        self.headers = httputil.HTTPHeaders()
        if environ.get("CONTENT_TYPE"):
            self.headers["Content-Type"] = environ["CONTENT_TYPE"]
        if environ.get("CONTENT_LENGTH"):
            self.headers["Content-Length"] = environ["CONTENT_LENGTH"]
        for key in environ:
            if key.startswith("HTTP_"):
                self.headers[key[5:].replace("_", "-")] = environ[key]
        if self.headers.get("Content-Length"):
            self.body = environ["wsgi.input"].read(
                int(self.headers["Content-Length"]))
        else:
            self.body = ""
        self.protocol = environ["wsgi.url_scheme"]
        self.remote_ip = environ.get("REMOTE_ADDR", "")
        if environ.get("HTTP_HOST"):
            self.host = environ["HTTP_HOST"]
        else:
            self.host = environ["SERVER_NAME"]

        # Parse request body
        self.files = {}
        httputil.parse_body_arguments(self.headers.get("Content-Type",
                                                       ""), self.body,
                                      self.body_arguments, self.files)

        for k, v in self.body_arguments.items():
            self.arguments.setdefault(k, []).extend(v)

        self._start_time = time.time()
        self._finish_time = None
Exemple #58
0
 def _on_access_token(self, redirect_uri, client_id, client_secret,
                      callback, response):
     if response.error:
         logging.warning('QQ auth error: %s' % str(response))
         callback(None)
         return
     args = escape.parse_qs_bytes(escape.native_str(response.body))
     session = {
         "access_token": args["access_token"][-1],
         "expires": args.get("expires_in")[0]
     }
     http = self.get_auth_http_client()
     response = yield gen.Task(http.fetch, url_concat(self._OAUTH_OPEND_ID_URL, {"access_token": session["access_token"]}), validate_cert=False, request_timeout=100)
     self._on_open_id(redirect_uri, client_id,
                      client_secret, callback, session, response)
Exemple #59
0
    def _get_request_body(self, request_data):
        post_type = request_data.get("post_type")
        data = request_data.get("data")
        if data is None:
            return None

        if post_type == "form":
            body = urlencode(data or {})
        elif post_type == "json":
            body = json.dumps(data)
        elif post_type == "string" and isinstance(data, basestring):
            body = native_str(data)
        else:
            body = None
        return body
Exemple #60
0
 def _parse_headers(self, data):
     # The lstrip removes newlines that some implementations sometimes
     # insert between messages of a reused connection.  Per RFC 7230,
     # we SHOULD ignore at least one empty line before the request.
     # http://tools.ietf.org/html/rfc7230#section-3.5
     data = native_str(data.decode('latin1')).lstrip("\r\n")
     eol = data.find("\r\n")
     start_line = data[:eol]
     try:
         headers = httputil.HTTPHeaders.parse(data[eol:])
     except ValueError:
         # probably form split() if there was no ':' in the line
         raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
                                       data[eol:100])
     return start_line, headers