def _auth_future_to_callback(callback, future): try: result = future.result() except AuthError as e: gen_log.warning(str(e)) result = None callback(result)
def connect(self, address, callback=None): """Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for socket.connect, i.e. a (host, port) tuple. If callback is specified, it will be called when the connection is completed. Note that it is safe to call IOStream.write while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling IOStream read methods before the socket is connected works on some platforms but is non-portable. """ self._connecting = True try: self.socket.connect(address) except socket.error, e: # In non-blocking mode we expect connect() to raise an # exception with EINPROGRESS or EWOULDBLOCK. # # On freebsd, other errors such as ECONNREFUSED may be # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. if e.args[0] not in (errno.EINPROGRESS, errno.EWOULDBLOCK): gen_log.warning("Connect error on fd %d: %s", self.socket.fileno(), e) self.close() return
def get_authenticated_user(self, callback, http_client=None): """Gets the OAuth authorized user and access token on callback. This method should be called from the handler for your registered OAuth Callback URL to complete the registration process. We call callback with the authenticated user, which in addition to standard attributes like 'name' includes the 'access_key' attribute, which contains the OAuth access you can use to make authorized requests to this service on behalf of the user. """ request_key = escape.utf8(self.get_argument("oauth_token")) oauth_verifier = self.get_argument("oauth_verifier", None) request_cookie = self.get_cookie("_oauth_request_token") if not request_cookie: gen_log.warning("Missing OAuth request token cookie") callback(None) return self.clear_cookie("_oauth_request_token") cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] if cookie_key != request_key: gen_log.info((cookie_key, request_key, request_cookie)) gen_log.warning("Request token does not match cookie") callback(None) return token = dict(key=cookie_key, secret=cookie_secret) if oauth_verifier: token["verifier"] = oauth_verifier if http_client is None: http_client = self.get_auth_http_client() http_client.fetch(self._oauth_access_token_url(token), self.async_callback(self._on_access_token, callback))
def to_json(self, content): try: if content: content = json.loads(content) except Exception as e: gen_log.warning("HttpHelper to json Error: " + str(e)) return content
def _do_ssl_handshake(self): # Based on code from test_ssl.py in the python stdlib try: self._handshake_reading = False self._handshake_writing = False self.socket.do_handshake() except ssl.SSLError, err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._handshake_reading = True return elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._handshake_writing = True return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): return self.close() elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self.socket.getpeername() except: peer = '(not connected)' gen_log.warning("SSL Error on %d %s: %s", self.socket.fileno(), peer, err) return self.close() raise
def _handle_events(self, fd, events): """This method is the actual handler for IOLoop, that gets called whenever an event on my socket is posted. It dispatches to _handle_recv, etc.""" # print "handling events" if not self.socket: gen_log.warning("Got events for closed stream %s", fd) return try: # dispatch events: if events & IOLoop.ERROR: gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense") return if events & IOLoop.READ: self._handle_recv() if not self.socket: return if events & IOLoop.WRITE: self._handle_send() if not self.socket: return # rebuild the poll state self._rebuild_io_state() except: gen_log.error("Uncaught exception, closing connection.", exc_info=True) self.close() raise
def parse_body_arguments(content_type, body, arguments, files): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if content_type.startswith("application/x-www-form-urlencoded"): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: gen_log.warning("Invalid multipart/form-data")
def _verify_cert(self, peercert): """Returns True if peercert is valid according to the configured validation mode and hostname. The ssl handshake already tested the certificate for a valid CA signature; the only thing that remains is to check the hostname. """ if isinstance(self._ssl_options, dict): verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE) elif isinstance(self._ssl_options, ssl.SSLContext): verify_mode = self._ssl_options.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) if verify_mode == ssl.CERT_NONE or self._server_hostname is None: return True cert = self.socket.getpeercert() if cert is None and verify_mode == ssl.CERT_REQUIRED: gen_log.warning("No SSL certificate given") return False try: ssl_match_hostname(peercert, self._server_hostname) except SSLCertificateError: gen_log.warning("Invalid SSL certificate", exc_info=True) return False else: return True
def initialize(self, io_loop=None, max_clients=10): self.io_loop = io_loop self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [_curl_create() for i in xrange(max_clients)] self._free_list = self._curls[:] self._requests = collections.deque() self._fds = {} self._timeout = None try: self._socket_action = self._multi.socket_action except AttributeError: # socket_action is found in pycurl since 7.18.2 (it's been # in libcurl longer than that but wasn't accessible to # python). gen_log.warning("socket_action method missing from pycurl; " "falling back to socket_all. Upgrading " "libcurl and pycurl will improve performance") self._socket_action = \ lambda fd, action: self._multi.socket_all() # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( self._handle_force_timeout, 1000, io_loop=io_loop) self._force_timeout_callback.start()
def _on_friendfeed_request(self, callback, response): if response.error: gen_log.warning("Error response %s fetching %s", response.error, response.request.url) callback(None) return callback(escape.json_decode(response.body))
def _do_ssl_handshake(self): # Based on code from test_ssl.py in the python stdlib try: self._handshake_reading = False self._handshake_writing = False self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._handshake_reading = True return elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._handshake_writing = True return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): return self.close(exc_info=True) elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self.socket.getpeername() except: peer = "(not connected)" gen_log.warning("SSL Error on %d %s: %s", self.socket.fileno(), peer, err) return self.close(exc_info=True) raise except socket.error as err: if err.args[0] in (errno.ECONNABORTED, errno.ECONNRESET): return self.close(exc_info=True) else: self._ssl_accepting = False if self._ssl_connect_callback is not None: callback = self._ssl_connect_callback self._ssl_connect_callback = None self._run_callback(callback)
def _handle_read(self): try: try: # Pretend to have a pending callback so that an EOF in # _read_to_buffer doesn't trigger an immediate close # callback. At the end of this method we'll either # estabilsh a real pending callback via # _read_from_buffer or run the close callback. # # We need two try statements here so that # pending_callbacks is decremented before the `except` # clause below (which calls `close` and does need to # trigger the callback) self._pending_callbacks += 1 while True: # Read from the socket until we get EWOULDBLOCK or equivalent. # SSL sockets do some internal buffering, and if the data is # sitting in the SSL object's buffer select() and friends # can't see it; the only way to find out if it's there is to # try to read it. if self._read_to_buffer() == 0: break finally: self._pending_callbacks -= 1 except Exception: gen_log.warning("error on read", exc_info=True) self.close() return if self._read_from_buffer(): return else: self._maybe_run_close_callback()
def _handle_write(self): while self._write_buffer: try: if not self._write_buffer_frozen: # On windows, socket.send blows up if given a # write buffer that's too large, instead of just # returning the number of bytes it was able to # process. Therefore we must not call socket.send # with more than 128KB at a time. _merge_prefix(self._write_buffer, 128 * 1024) num_bytes = self.write_to_fd(self._write_buffer[0]) if num_bytes == 0: # With OpenSSL, if we couldn't write the entire buffer, # the very same string object must be used on the # next call to send. Therefore we suppress # merging the write buffer after an incomplete send. # A cleaner solution would be to set # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is # not yet accessible from python # (http://bugs.python.org/issue8240) self._write_buffer_frozen = True break self._write_buffer_frozen = False _merge_prefix(self._write_buffer, num_bytes) self._write_buffer.popleft() except socket.error, e: if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN): self._write_buffer_frozen = True break else: gen_log.warning("Write error on %d: %s", self.fileno(), e) self.close() return
def _on_authentication_verified(self, callback, response): if response.error or b("is_valid:true") not in response.body: gen_log.warning("Invalid OpenID response: %s", response.error or response.body) callback(None) return # Make sure we got back at least an email from attribute exchange ax_ns = None for name in self.request.arguments.iterkeys(): if name.startswith("openid.ns.") and \ self.get_argument(name) == u"http://openid.net/srv/ax/1.0": ax_ns = name[10:] break def get_ax_arg(uri): if not ax_ns: return u"" prefix = "openid." + ax_ns + ".type." ax_name = None for name in self.request.arguments.iterkeys(): if self.get_argument(name) == uri and name.startswith(prefix): part = name[len(prefix):] ax_name = "openid." + ax_ns + ".value." + part break if not ax_name: return u"" return self.get_argument(ax_name, u"") email = get_ax_arg("http://axschema.org/contact/email") name = get_ax_arg("http://axschema.org/namePerson") first_name = get_ax_arg("http://axschema.org/namePerson/first") last_name = get_ax_arg("http://axschema.org/namePerson/last") username = get_ax_arg("http://axschema.org/namePerson/friendly") locale = get_ax_arg("http://axschema.org/pref/language").lower() user = dict() name_parts = [] if first_name: user["first_name"] = first_name name_parts.append(first_name) if last_name: user["last_name"] = last_name name_parts.append(last_name) if name: user["name"] = name elif name_parts: user["name"] = u" ".join(name_parts) elif email: user["name"] = email.split("@")[0] if email: user["email"] = email if locale: user["locale"] = locale if username: user["username"] = username claimed_id = self.get_argument("openid.claimed_id", None) if claimed_id: user["claimed_id"] = claimed_id callback(user)
def log_stack(self, signal, frame): """信号处理程序来记录当前线程的堆栈跟踪。 For use with `set_blocking_signal_threshold`. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame)))
def submit(self, fn, *args, **kwargs): gen_log.warning("opps~~can i use the threading pool ones?") future = TracebackFuture() try: future.set_result(fn(*args, **kwargs)) except Exception: future.set_exc_info(sys.exc_info()) return future
def log_exception(self, typ, value, tb): if isinstance(value, HTTPError): if value.log_message: info = "%d %s"%(value.status_code,value.log_message) gen_log.warning(info) else: app_log.error("Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=(typ, value, tb))
def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame)))
def _add_watching(path): try: last_modified = os.stat(path).st_mtime except: gen_log.warning("failed to watch %s" % path) traceback.print_exc() return _watched_files[path] = last_modified
def start(io_loop=None, check_time=500): io_loop = io_loop or ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("watcher started more than once in the same process") scheduler = ioloop.PeriodicCallback(_check_files, check_time, io_loop=io_loop) scheduler.start()
def _on_access_token(self, callback, response): if response.error: gen_log.warning("Could not fetch access token") callback(None) return access_token = _oauth_parse_response(response.body) self._oauth_get_user(access_token, self.async_callback( self._on_oauth_get_user, access_token, callback))
def log_exception(self, typ, value, tb): if isinstance(value, errors.SMTPError): if value.log_message: _format = '%d %s' + value.log_message args = ([value.status_code, self._request_summary()] + list(value.args)) gen_log.warning(_format, *args) else: app_log.error('Uncaught exception %s', self._request_summary(), exc_info=(typ, value, tb))
def log_exception(self, typ, value, tb): if isinstance(value, CommandError): if value.message: format = "%s: " + value.message args = ([self._command_summary()] + list(value.args)) gen_log.warning(format, *args) else: app_log.error("Uncaught exception %s\n%r", self._command_summary(), self.command, exc_info=(typ, value, tb))
def _create_threadpool(cls, num_threads): pid = os.getpid() if cls._threadpool_pid != pid: # Threads cannot survive after a fork, so if our pid isn't what it # was when we created the pool then delete it. cls._threadpool = None if cls._threadpool is None: gen_log.warning("so i have been configured.A ThreadedResolver") from concurrent.futures import ThreadPoolExecutor cls._threadpool = ThreadPoolExecutor(num_threads) cls._threadpool_pid = pid return cls._threadpool
def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. .. deprecated:: 5.1 This method will be removed in Tornado 6.0. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame)))
def _handle_read(self): try: has_packet = self._unread_packets_loop() except Exception as e: gen_log.warning("error on read: %s" % e) self.close(exc_info=True) return if has_packet: self._read_buffered_packet() return else: self._maybe_run_close_callback()
def connect(self, address, callback=None): self._connecting = True try: self._socket.connect(address) except socket.error as e: if (e.args[0] != errno.EINPROGRESS and e.args[0] not in _ERRNO_WOULDBLOCK): gen_log.warning("Connect error on fd %d: %s", self.handle.fd, e) self.close() return self._on_connect_cb = stack_context.wrap(callback) self.handle.resume_writing()
def start(io_loop=None, check_time=500): """Begins watching source files for changes using the given `.IOLoop`. """ io_loop = io_loop or ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") add_reload_hook(functools.partial(io_loop.close, all_fds=True)) modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler.start()
def handle_error(self, error): try: if self._error_cb is not None: callback = self._error_cb self._error_cb = None self._run_callback(callback, error) if error not in _ERRNO_CONNRESET: gen_log.warning("Error on stream(fd:%d) caught: %s", self.handle.fd, errno.errorcode[error]) finally: # On error, close the FD self.close()
def configurable_default(cls): if hasattr(select, "epoll") or sys.platform.startswith('linux'): try: from tornado.platform.epoll import EPollIOLoop return EPollIOLoop except ImportError: gen_log.warning("unable to import EPollIOLoop, falling back to SelectIOLoop") pass if hasattr(select, "kqueue"): # Python 2.6+ on BSD or Mac from tornado.platform.kqueue import KQueueIOLoop return KQueueIOLoop from tornado.platform.select import SelectIOLoop return SelectIOLoop
def start(check_time=500): """Begins watching source files for changes. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ io_loop = ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning( "tornado.autoreload started more than once in the same process") modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start()
def _handle_connect(self): err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0: self.error = socket.error(err, os.strerror(err)) # IOLoop implementations may vary: some of them return # an error state before the socket becomes writable, so # in that case a connection failure would be handled by the # error path in _handle_events instead of here. gen_log.warning("Connect error on fd %d: %s", self.socket.fileno(), errno.errorcode[err]) self.close() return if self._connect_callback is not None: callback = self._connect_callback self._connect_callback = None self._run_callback(callback) self._connecting = False
def parse_multipart_form_data( boundary: bytes, data: bytes, arguments: Dict[str, List[bytes]], files: Dict[str, List[HTTPFile]], ) -> None: """Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. .. versionchanged:: 5.1 Now recognizes non-ASCII filenames in RFC 2231/5987 (``filename*=``) format. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] final_boundary_index = data.rfind(b"--" + boundary + b"--") if final_boundary_index == -1: gen_log.warning("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") for part in parts: if not part: continue eoh = part.find(b"\r\n\r\n") if eoh == -1: gen_log.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b"\r\n"): gen_log.warning("Invalid multipart/form-data") continue value = part[eoh + 4 : -2] if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append( HTTPFile( filename=disp_params["filename"], body=value, content_type=ctype ) ) else: arguments.setdefault(name, []).append(value)
def start(io_loop=None, check_time=500): """Restarts the process automatically when a module is modified. We run on the I/O loop, and restarting is a destructive operation, so will terminate any pending requests. """ io_loop = io_loop or ioloop.IOLoop.instance() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") add_reload_hook(functools.partial(io_loop.close, all_fds=True)) modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler.start()
def _handle_exception(self, typ, value, tb): if self.final_callback: self._remove_timeout() gen_log.warning("uncaught exception", exc_info=(typ, value, tb)) self._run_callback(HTTPResponse(self.request, 599, error=value, request_time=self.io_loop.time() - self.start_time, )) if hasattr(self, "stream"): self.stream.close() return True else: # If our callback has already been called, we are probably # catching an exception that is not caused by us but rather # some child of our callback. Rather than drop it on the floor, # pass it along. return False
def _do_ssl_handshake(self): # Based on code from test_ssl.py in the python stdlib try: self._handshake_reading = False self._handshake_writing = False self._socket.do_handshake() except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._handshake_reading = True return elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._handshake_writing = True return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): self.close() return elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self._socket.getpeername() except Exception: peer = '(not connected)' gen_log.warning("SSL Error on %d %s: %s", self.handle.fd, peer, err) self.close() return raise except socket.error as err: if err.args[0] in _ERRNO_CONNRESET: self.close() return except AttributeError: # On Linux, if the connection was reset before the call to # wrap_socket, do_handshake will fail with an # AttributeError. self.close() return else: self._ssl_accepting = False if not self._verify_cert(self._socket.getpeercert()): self.close() return if self._ssl_on_connect_cb is not None: callback = self._ssl_on_connect_cb self._ssl_on_connect_cb = None self._run_callback(callback)
def start(io_loop=None, check_time=500): """Begins watching source files for changes. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ io_loop = io_loop or ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") if _has_execv: add_reload_hook(functools.partial(io_loop.close, all_fds=True)) modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler.start()
def _on_github_request(future, response): """ Parse the JSON from the API """ if response.error: print response.error future.set_exception( AuthError("Error response %s fetching %s" % (response.error, response.request.url))) return result = ObjectDict(code=response.code, headers=response.headers, body=None) try: result.body = json_decode(response.body) except Exception: gen_log.warning("Invalid JSON from Github: %r", response.body) future.set_result(result) return future.set_result(result)
def connect(self, address, callback=None, server_hostname=None): """Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for `socket.connect <socket.socket.connect>`, i.e. a ``(host, port)`` tuple. If ``callback`` is specified, it will be called when the connection is completed. If specified, the ``server_hostname`` parameter will be used in SSL connections for certificate validation (if requested in the ``ssl_options``) and SNI (if supported; requires Python 3.2+). Note that it is safe to call `IOStream.write <BaseIOStream.write>` while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. """ self._connecting = True try: self.socket.connect(address) except socket.error as e: # In non-blocking mode we expect connect() to raise an # exception with EINPROGRESS or EWOULDBLOCK. # # On freebsd, other errors such as ECONNREFUSED may be # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. if (e.args[0] != errno.EINPROGRESS and e.args[0] not in _ERRNO_WOULDBLOCK): gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), e) self.close(exc_info=True) return if callback is not None: self._connect_callback = stack_context.wrap(callback) future = None else: future = self._connect_future = TracebackFuture() self._add_io_state(self.io_loop.WRITE) return future
def _handle_request_exception(self, e): close_connection(self) if isinstance(e, HTTPError): if e.log_message: error_format = "%d %s: " + e.log_message args = [e.status_code, self._request_summary()] + list(e.args) gen_log.warning(error_format, *args) if e.status_code not in httputil.responses and not e.reason: gen_log.error("Bad HTTP status code: %d", e.status_code) self.send_error(500, exc_info=sys.exc_info()) else: self.send_error(e.status_code, exc_info=sys.exc_info()) elif isinstance(e, NameError): app_log.error("Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=True) self.send_error(500, exc_info=sys.exc_info()) elif isinstance(e, cx_Oracle.DatabaseError): #msg = re.sub('str:|PL/SQL|на"TEHNO.\d+"|на "TEHNO.SHIVA_TASK"| на"TEHNO.SHIVA", на| , на|ORA-\d+|:|\\n|line \d+| +', "", msg) try: msg = e.message.message except: msg = e.message text = msg.split("%%") if len(text) > 2: msg = text[1] self.write({'error': msg}) self.finish() else: app_log.error("Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=True) self.send_error(500, exc_info=sys.exc_info())
def fetch(self, url, method=None, headers=None, body=None): content = '' http_client = httpclient.HTTPClient() try: request = HTTPRequest(url, method=method, headers=headers, body=body) response = http_client.fetch(request) content = response.body except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. gen_log.warning("HttpHelper fetch Error: " + str(e)) except Exception as e: # Other errors are possible, such as IOError. gen_log("HttpHelper fetch Error: " + str(e)) http_client.close() return content
def log_exception(self, typ, value, tb): if isinstance(value, APIError): app_log.error( 'API error %s: %s\n%r', value.error_id, value.log_message % value.args if value.log_message else '', self.request, exc_info=(typ, value, tb)) elif isinstance(value, tornado.web.HTTPError): if value.log_message: format = "%d %s: " + value.log_message args = ([value.status_code, self._request_summary()] + list(value.args)) gen_log.warning(format, *args) else: value.error_id = APIError._generate_id() app_log.error("Uncaught exception %s %s\n%r", self._request_summary(), value.error_id, self.request, exc_info=(typ, value, tb))
def _on_access_token(self, redirect_uri, client_id, client_secret, callback, fields, response): if response.error: gen_log.warning('Facebook auth error: %s' % str(response)) callback(None) return args = escape.parse_qs_bytes(escape.native_str(response.body)) session = { "access_token": args["access_token"][-1], "expires": args.get("expires") } self.facebook_request(path="/me", callback=self.async_callback( self._on_get_user_info, callback, session, fields), access_token=session["access_token"], fields=",".join(fields))
def _handle_read(self): try: try: # Pretend to have a pending callback so that an EOF in # _read_to_buffer doesn't trigger an immediate close # callback. At the end of this method we'll either # estabilsh a real pending callback via # _read_from_buffer or run the close callback. # # We need two try statements here so that # pending_callbacks is decremented before the `except` # clause below (which calls `close` and does need to # trigger the callback) # 假装有一个挂起的回调函数,以便在 # _read_to_buffer不会触发立即关闭 # 回调。在这个方法的结尾,我们要么 # 建立一种真正的等待回调通过 # _read_from_buffer或运行接近回调。 # 我们这里需要两个尝试语句,以便 # pending_callbacks递减在 # `除了`下面的条款(称为“结束”),确实需要触发回调) # 等待回调 self._pending_callbacks += 1 while not self.closed(): # Read from the socket until we get EWOULDBLOCK or equivalent. # SSL sockets do some internal buffering, and if the data is # sitting in the SSL object's buffer select() and friends # can't see it; the only way to find out if it's there is to # try to read it. if self._read_to_buffer() == 0: break finally: self._pending_callbacks -= 1 except Exception: gen_log.warning("error on read", exc_info=True) self.close(exc_info=True) return if self._read_from_buffer(): return else: self._maybe_run_close_callback()
def prepare(self): # deal Json data format content_type = self.request.headers.get('Content-Type', '') if content_type.startswith('application/json'): try: json_arguments = json_decode(utf8(self.request.body)) except Exception as e: gen_log.warning('Invalid application/json body:%s', e) json_arguments = {} if isinstance(json_arguments, dict): for name, values in json_arguments.items(): if isinstance(values, list): _values = [str(v) for v in values] self.request.arguments.setdefault(name, _values) self.request.body_arguments.setdefault(name, values) else: _values = str(values) self.request.arguments.setdefault(name, []).append(_values) self.request.body_arguments.setdefault(name, []).append(_values)
def _handle_write(self): while self._write_buffer: try: if not self._write_buffer_frozen: # On windows, socket.send blows up if given a # write buffer that's too large, instead of just # returning the number of bytes it was able to # process. Therefore we must not call socket.send # with more than 128KB at a time. _merge_prefix(self._write_buffer, 128 * 1024) num_bytes = self.write_to_fd(self._write_buffer[0]) if num_bytes == 0: # With OpenSSL, if we couldn't write the entire buffer, # the very same string object must be used on the # next call to send. Therefore we suppress # merging the write buffer after an incomplete send. # A cleaner solution would be to set # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is # not yet accessible from python # (http://bugs.python.org/issue8240) self._write_buffer_frozen = True break self._write_buffer_frozen = False _merge_prefix(self._write_buffer, num_bytes) self._write_buffer.popleft() except (socket.error, IOError, OSError) as e: if e.args[0] in _ERRNO_WOULDBLOCK: self._write_buffer_frozen = True break else: if e.args[0] not in _ERRNO_CONNRESET: # Broken pipe errors are usually caused by connection # reset, and its better to not log EPIPE errors to # minimize log spam gen_log.warning("Write error on %d: %s", self.fileno(), e) self.close(exc_info=True) return if not self._write_buffer and self._write_callback: callback = self._write_callback self._write_callback = None self._run_callback(callback)
def _read_to_buffer(self): """Reads from the socket and appends the result to the read buffer. Returns the number of bytes read. Returns 0 if there is nothing to read (i.e. the read returns EWOULDBLOCK or equivalent). On error closes the socket and raises an exception. """ try: chunk = self.read_from_fd() except (socket.error, IOError, OSError), e: # ssl.SSLError is a subclass of socket.error if e.args[0] == errno.ECONNRESET: # Treat ECONNRESET as a connection close rather than # an error to minimize log spam (the exception will # be available on self.error for apps that care). self.close() return gen_log.warning("Read error on %d: %s", self.fileno(), e) self.close() raise
def _insert(cls, entity: object) -> int: '''插入封装 :param cls: :param entity: :class:`BaseModel` ''' if not SESSION: gen_log.error('session is null') return ResultCode(0, '无法获取session对象,数据库可能连接错误') session = SESSION() try: session.add(entity) session.commit() session.close() gen_log.debug('add {entity} successed'.format(entity=entity)) return ResultCode(1, '插入成功') except Exception as e: gen_log.warning('add {entity} failed, error: {e}'.format( entity=entity, e=e)) session.close() return ResultCode(0, '插入失败')
def query(cls, field: str, value: object) -> (None, list): """根据指定字段查询 :param cls: :param field: 字段 :param value: 值 :return: ``None`` 或者 ``list`` """ if not SESSION: gen_log.error('session is null') return None session = SESSION() try: ret = session.query(cls).filter(field == value).all() session.close() return ret except Exception as e: gen_log.warning( 'query {model}({field}={value}) failed, error: {e}'.format( model=cls, field=field, value=value, e=e)) session.close() return None
def log_exception(self, typ, value, tb): """Override to customize logging of uncaught exceptions. By default logs instances of `HTTPError` as warnings without stack traces (on the ``tornado.general`` logger), and all other exceptions as errors with stack traces (on the ``tornado.application`` logger). .. versionadded:: 3.1 """ if isinstance(value, HTTPError): if value.log_message: # format = "%d %s: " + value.log_message # args = ([value.status_code, # self._request_summary()] + list(value.args)) gen_log.warning('\033[0;31m' + value.log_message + '\033[0m') else: app_log.error("Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=(typ, value, tb))
def parse_body_arguments(content_type, body, arguments, files, headers=None): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if headers and 'Content-Encoding' in headers: gen_log.warning("Unsupported Content-Encoding: %s", headers['Content-Encoding']) return if content_type.startswith("application/x-www-form-urlencoded"): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e)
def _on_content_headers(self, data, buf=b''): self._content_length_left -= len(data) data = self._boundary_buffer + data gen_log.debug('file header is %r', data) self._boundary_buffer = buf header_data = data[self._boundary_len + 2:].decode('utf-8') headers = tornado.httputil.HTTPHeaders.parse(header_data) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = tornado.httputil._parse_header(disp_header) if disposition != "form-data": gen_log.warning("Invalid multipart/form-data") self._read_content_body(None) if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") self._read_content_body(None) name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") fd, tmp_filename = tempfile.mkstemp(suffix='.tmp', prefix='tornado') self._request.files.setdefault(name, []).append( tornado.httputil.HTTPFile( filename=disp_params['filename'], tmp_filename=tmp_filename, content_type=ctype, )) self._read_content_body(os.fdopen(fd, 'wb')) else: gen_log.warning( "multipart/form-data is not file upload, skipping...") self._read_content_body(None)
def delete(cls, field: str, value: object) -> int: '''根据指定字段和值删除 :param cls: :param field: 字段 :param value: 值 :return: ``int`` ''' if not SESSION: gen_log.error('session is null') return ResultCode(0, '无法获取session对象,数据库可能连接错误') session = SESSION() try: ret = session.query(cls).filter(field == value).delete() session.commit() session.close() return ret except Exception as e: gen_log.warning( 'delete {model}({field}={value}) failed, error: {e}'.format( model=cls, field=field, value=value, e=e)) session.close() return ResultCode(0, '删除失败')
def initialize(self, io_loop, max_clients=10, defaults=None): super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [_curl_create() for i in range(max_clients)] self._free_list = self._curls[:] self._requests = collections.deque() self._fds = {} self._timeout = None try: self._socket_action = self._multi.socket_action except AttributeError: # socket_action is found in pycurl since 7.18.2 (it's been # in libcurl longer than that but wasn't accessible to # python). gen_log.warning("socket_action method missing from pycurl; " "falling back to socket_all. Upgrading " "libcurl and pycurl will improve performance") self._socket_action = \ lambda fd, action: self._multi.socket_all() # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( self._handle_force_timeout, 1000, io_loop=io_loop) self._force_timeout_callback.start() # Work around a bug in libcurl 7.29.0: Some fields in the curl # multi object are initialized lazily, and its destructor will # segfault if it is destroyed without having been used. Add # and remove a dummy handle to make sure everything is # initialized. dummy_curl_handle = pycurl.Curl() self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle)
def test_unicode_newlines(self): # Ensure that only \r\n is recognized as a header separator, and not # the other newline-like unicode characters. # Characters that are likely to be problematic can be found in # http://unicode.org/standard/reports/tr13/tr13-5.html # and cpython's unicodeobject.c (which defines the implementation # of unicode_type.splitlines(), and uses a different list than TR13). newlines = [ u'\u001b', # VERTICAL TAB u'\u001c', # FILE SEPARATOR u'\u001d', # GROUP SEPARATOR u'\u001e', # RECORD SEPARATOR u'\u0085', # NEXT LINE u'\u2028', # LINE SEPARATOR u'\u2029', # PARAGRAPH SEPARATOR ] for newline in newlines: # Try the utf8 and latin1 representations of each newline for encoding in ['utf8', 'latin1']: try: try: encoded = newline.encode(encoding) except UnicodeEncodeError: # Some chars cannot be represented in latin1 continue data = b'Cookie: foo=' + encoded + b'bar' # parse() wants a native_str, so decode through latin1 # in the same way the real parser does. headers = HTTPHeaders.parse( native_str(data.decode('latin1'))) expected = [ ('Cookie', 'foo=' + native_str(encoded.decode('latin1')) + 'bar') ] self.assertEqual(expected, list(headers.get_all())) except Exception: gen_log.warning("failed while trying %r in %s", newline, encoding) raise
def fetch(self, url, method=None, headers=None, body=None, callback=None): """ 执行请求 :param url: :param method: :param headers: :param body: :param callback: 回调函数的值为HttpResponse :return: """ http_client = AsyncHTTPClient() try: request = HTTPRequest(url, method=method, headers=headers, body=body) http_client.fetch(request, callback) except HTTPError as e: gen_log.warning("HttpHelper fetch Error: " + str(e)) except Exception as e: gen_log("HttpHelper fetch Error: " + str(e)) http_client.close()
def _handle_events(self, fd, events): if self.closed(): gen_log.warning("Got events for closed stream %d", fd) return try: if events & self.io_loop.READ: self._handle_read() if self.closed(): return if events & self.io_loop.WRITE: if self._connecting: self._handle_connect() self._handle_write() if self.closed(): return if events & self.io_loop.ERROR: self.error = self.get_fd_error() # We may have queued up a user callback in _handle_read or # _handle_write, so don't close the IOStream until those # callbacks have had a chance to run. self.io_loop.add_callback(self.close) return state = self.io_loop.ERROR if self.reading(): state |= self.io_loop.READ if self.writing(): state |= self.io_loop.WRITE if state == self.io_loop.ERROR: state |= self.io_loop.READ if state != self._state: assert self._state is not None, \ "shouldn't happen: _handle_events without self._state" self._state = state self.io_loop.update_handler(self.fileno(), self._state) except Exception: gen_log.error("Uncaught exception, closing connection.", exc_info=True) self.close(exc_info=True) raise
def _handle_write(self): while self._write_buffer: try: data = self._write_buffer[0] num_bytes = self.write_to_fd(data) self._write_buffer.popleft() if num_bytes < len(data): self._write_buffer.appendleft(data[num_bytes:]) except (IOError, OSError) as e: if e.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): break else: gen_log.warning("Write error on %s: %s", self.fileno(), e) return if not self._write_buffer: if isinstance(self._write_callback, Future): future = self._write_callback self._write_callback = None future.set_result(None) elif callable(self._write_callback): callback = self._write_callback self._write_callback = None callback()
def _handle_write(self): while self._write_buffer: try: data, address = self._write_buffer[0] num_bytes = self.write_to_fd(data, address) self._write_buffer.popleft() if num_bytes < len(data): self._write_buffer.appendleft((data[num_bytes], address)) except(socket.error, IOError, OSError) as e: if e.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): break else: gen_log.warning("Write error on %s: %s", self.fileno(), e) return if not self._write_buffer: if self._write_callback: callback = self._write_callback self._write_callback = None callback() if self._write_future: future = self._write_future self._write_future = None future.set_result(None)
def parse_body_arguments(content_type, body, arguments, files): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if content_type.startswith("application/x-www-form-urlencoded"): uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: gen_log.warning("Invalid multipart/form-data")