def _auth_future_to_callback(callback, future): try: result = future.result() except AuthError as e: gen_log.warning(str(e)) result = None callback(result)
def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. .. deprecated:: 5.1 This method will be removed in Tornado 6.0. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame)))
def start(check_time=500): """Begins watching source files for changes. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ io_loop = ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning( "tornado.autoreload started more than once in the same process") modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start()
def parse_body_arguments(content_type, body, arguments, files, headers=None): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if headers and 'Content-Encoding' in headers: gen_log.warning("Unsupported Content-Encoding: %s", headers['Content-Encoding']) return if content_type.startswith("application/x-www-form-urlencoded"): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e)
def parse_multipart_form_data(boundary, data, arguments, files): """Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. .. versionchanged:: 5.1 Now recognizes non-ASCII filenames in RFC 2231/5987 (``filename*=``) format. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] final_boundary_index = data.rfind(b"--" + boundary + b"--") if final_boundary_index == -1: gen_log.warning("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") for part in parts: if not part: continue eoh = part.find(b"\r\n\r\n") if eoh == -1: gen_log.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b"\r\n"): gen_log.warning("Invalid multipart/form-data") continue value = part[eoh + 4:-2] if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append(HTTPFile( # type: ignore filename=disp_params["filename"], body=value, content_type=ctype)) else: arguments.setdefault(name, []).append(value)
def pgettext(self, context, message, plural_message=None, count=None): if self.translations: gen_log.warning('pgettext is not supported by CSVLocale') return self.translate(message, plural_message, count)
def fork_processes(num_processes, max_restarts=100): """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. """ global _task_id assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i): pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: try: pid, status = os.wait() except OSError as e: if errno_from_exception(e) == errno.EINTR: continue raise if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning( "child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status)) elif os.WEXITSTATUS(status) != 0: gen_log.warning( "child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status)) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0)
def main(): """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ # Remember that we were launched with autoreload as main. # The main module can be tricky; set the variables both in our globals # (which may be __main__) and the real importable version. import tornado_py2.autoreload global _autoreload_is_main global _original_argv, _original_spec tornado_py2.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv tornado_py2.autoreload._original_argv = _original_argv = original_argv original_spec = getattr(sys.modules['__main__'], '__spec__', None) tornado_py2.autoreload._original_spec = _original_spec = original_spec sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module" module = sys.argv[2] del sys.argv[1:3] elif len(sys.argv) >= 2: mode = "script" script = sys.argv[1] sys.argv = sys.argv[1:] else: print(_USAGE, file=sys.stderr) sys.exit(1) try: if mode == "module": import runpy runpy.run_module(module, run_name="__main__", alter_sys=True) elif mode == "script": with open(script) as f: # Execute the script in our namespace instead of creating # a new one so that something that tries to import __main__ # (e.g. the unittest module) will see names defined in the # script instead of just those defined in this module. global __file__ __file__ = script # If __package__ is defined, imports may be incorrectly # interpreted as relative to this module. global __package__ del __package__ exec_in(f.read(), globals(), globals()) except SystemExit as e: logging.basicConfig() gen_log.info("Script exited with status %s", e.code) except Exception as e: logging.basicConfig() gen_log.warning("Script exited with uncaught exception", exc_info=True) # If an exception occurred at import time, the file with the error # never made it into sys.modules and so we won't know to watch it. # Just to make sure we've covered everything, walk the stack trace # from the exception and watch every file. for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): watch(filename) if isinstance(e, SyntaxError): # SyntaxErrors are special: their innermost stack frame is fake # so extract_tb won't see it and we have to get the filename # from the exception object. watch(e.filename) else: logging.basicConfig() gen_log.info("Script exited normally") # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv if mode == 'module': # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(module) if loader is not None: watch(loader.get_filename()) wait()