def _auth_future_to_callback(callback, future): try: result = future.result() except AuthError as e: gen_log.warning(str(e)) result = None callback(result)
def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame)))
def start(io_loop=None, check_time=500): """Begins watching source files for changes. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ io_loop = io_loop or ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning( "tornado.autoreload started more than once in the same process") if _has_execv: add_reload_hook(functools.partial(io_loop.close, all_fds=True)) modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler.start()
def parse_body_arguments(content_type, body, arguments, files, headers=None): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if headers and 'Content-Encoding' in headers: gen_log.warning("Unsupported Content-Encoding: %s", headers['Content-Encoding']) return if content_type.startswith("application/x-www-form-urlencoded"): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e)
def test_unicode_newlines(self): # Ensure that only \r\n is recognized as a header separator, and not # the other newline-like unicode characters. # Characters that are likely to be problematic can be found in # http://unicode.org/standard/reports/tr13/tr13-5.html # and cpython's unicodeobject.c (which defines the implementation # of unicode_type.splitlines(), and uses a different list than TR13). newlines = [ u'\u001b', # VERTICAL TAB u'\u001c', # FILE SEPARATOR u'\u001d', # GROUP SEPARATOR u'\u001e', # RECORD SEPARATOR u'\u0085', # NEXT LINE u'\u2028', # LINE SEPARATOR u'\u2029', # PARAGRAPH SEPARATOR ] for newline in newlines: # Try the utf8 and latin1 representations of each newline for encoding in ['utf8', 'latin1']: try: try: encoded = newline.encode(encoding) except UnicodeEncodeError: # Some chars cannot be represented in latin1 continue data = b'Cookie: foo=' + encoded + b'bar' # parse() wants a native_str, so decode through latin1 # in the same way the real parser does. headers = HTTPHeaders.parse( native_str(data.decode('latin1'))) expected = [('Cookie', 'foo=' + native_str(encoded.decode('latin1')) + 'bar')] self.assertEqual( expected, list(headers.get_all())) except Exception: gen_log.warning("failed while trying %r in %s", newline, encoding) raise
def parse_multipart_form_data(boundary, data, arguments, files): """Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] final_boundary_index = data.rfind(b"--" + boundary + b"--") if final_boundary_index == -1: gen_log.warning("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") for part in parts: if not part: continue eoh = part.find(b"\r\n\r\n") if eoh == -1: gen_log.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b"\r\n"): gen_log.warning("Invalid multipart/form-data") continue value = part[eoh + 4:-2] if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append( HTTPFile( # type: ignore filename=disp_params["filename"], body=value, content_type=ctype)) else: arguments.setdefault(name, []).append(value)
def main(): """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ original_argv = sys.argv sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module" module = sys.argv[2] del sys.argv[1:3] elif len(sys.argv) >= 2: mode = "script" script = sys.argv[1] sys.argv = sys.argv[1:] else: print(_USAGE, file=sys.stderr) sys.exit(1) try: if mode == "module": import runpy runpy.run_module(module, run_name="__main__", alter_sys=True) elif mode == "script": with open(script) as f: # Execute the script in our namespace instead of creating # a new one so that something that tries to import __main__ # (e.g. the unittest module) will see names defined in the # script instead of just those defined in this module. global __file__ __file__ = script # If __package__ is defined, imports may be incorrectly # interpreted as relative to this module. global __package__ del __package__ exec_in(f.read(), globals(), globals()) except SystemExit as e: logging.basicConfig() gen_log.info("Script exited with status %s", e.code) except Exception as e: logging.basicConfig() gen_log.warning("Script exited with uncaught exception", exc_info=True) # If an exception occurred at import time, the file with the error # never made it into sys.modules and so we won't know to watch it. # Just to make sure we've covered everything, walk the stack trace # from the exception and watch every file. for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): watch(filename) if isinstance(e, SyntaxError): # SyntaxErrors are special: their innermost stack frame is fake # so extract_tb won't see it and we have to get the filename # from the exception object. watch(e.filename) else: logging.basicConfig() gen_log.info("Script exited normally") # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv if mode == 'module': # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(module) if loader is not None: watch(loader.get_filename()) wait()
def pgettext(self, context, message, plural_message=None, count=None): if self.translations: gen_log.warning('pgettext is not supported by CSVLocale') return self.translate(message, plural_message, count)
def fork_processes(num_processes, max_restarts=100, set_affinity=False, enforce_all_cores=False): """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. """ global _task_id assert _task_id is None num_cores = cpu_count() if num_processes is None or num_processes <= 0: num_processes = num_cores elif set_affinity: if num_processes > num_cores: raise RuntimeError( "Using AFFINITY tornado mode, number of processes to fork cannot be more than the number of CPU cores" ) elif num_processes != num_cores: message = "Using AFFINITY tornado mode and number of processes to fork ({}) != number of CPU cores ({})".format( num_processes, num_cores) if enforce_all_cores: raise RuntimeError(message) else: gen_log.warning(message) if ioloop.IOLoop.initialized(): raise RuntimeError( "Cannot run in multiple processes: IOLoop instance " "has already been initialized. You cannot call " "IOLoop.instance() before calling start_processes()") gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i): pid = os.fork() if pid == 0: if set_affinity: gen_log.debug("Setting process affinity for CPU core [%s]", i) process = psutil.Process() process.cpu_affinity([i]) # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: try: pid, status = os.wait() except OSError as e: if errno_from_exception(e) == errno.EINTR: continue raise if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning( "child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status)) elif os.WEXITSTATUS(status) != 0: gen_log.warning( "child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status)) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0)