示例#1
0
def _check_file(modify_times: Dict[str, float], path: str) -> None:
    try:
        modified = os.stat(path).st_mtime
    except Exception:
        return
    if path not in modify_times:
        modify_times[path] = modified
        return
    if modify_times[path] != modified:
        gen_log.info("%s modified; restarting server", path)
        _reload()
示例#2
0
 async def _read_message(self,
                         delegate: httputil.HTTPMessageDelegate) -> bool:
     need_delegate_close = False
     try:
         header_future = self.stream.read_until_regex(
             b"\r?\n\r?\n", max_bytes=self.params.max_header_size)
         if self.params.header_timeout is None:
             header_data = await header_future
         else:
             try:
                 header_data = await gen.with_timeout(
                     self.stream.io_loop.time() +
                     self.params.header_timeout,
                     header_future,
                     quiet_exceptions=iostream.StreamClosedError,
                 )
             except gen.TimeoutError:
                 self.close()
                 return False
         start_line_str, headers = self._parse_headers(header_data)
         if self.is_client:
             resp_start_line = httputil.parse_response_start_line(
                 start_line_str)
             self._response_start_line = resp_start_line
             start_line = (
                 resp_start_line
             )  # type: Union[httputil.RequestStartLine, httputil.ResponseStartLine]
             # TODO: this will need to change to support client-side keepalive
             self._disconnect_on_finish = False
         else:
             req_start_line = httputil.parse_request_start_line(
                 start_line_str)
             self._request_start_line = req_start_line
             self._request_headers = headers
             start_line = req_start_line
             self._disconnect_on_finish = not self._can_keep_alive(
                 req_start_line, headers)
         need_delegate_close = True
         with _ExceptionLoggingContext(app_log):
             header_recv_future = delegate.headers_received(
                 start_line, headers)
             if header_recv_future is not None:
                 await header_recv_future
         if self.stream is None:
             # We've been detached.
             need_delegate_close = False
             return False
         skip_body = False
         if self.is_client:
             assert isinstance(start_line, httputil.ResponseStartLine)
             if (self._request_start_line is not None
                     and self._request_start_line.method == "HEAD"):
                 skip_body = True
             code = start_line.code
             if code == 304:
                 # 304 responses may include the content-length header
                 # but do not actually have a body.
                 # http://tools.ietf.org/html/rfc7230#section-3.3
                 skip_body = True
             if code >= 100 and code < 200:
                 # 1xx responses should never indicate the presence of
                 # a body.
                 if "Content-Length" in headers or "Transfer-Encoding" in headers:
                     raise httputil.HTTPInputError(
                         "Response code %d cannot have body" % code)
                 # TODO: client delegates will get headers_received twice
                 # in the case of a 100-continue.  Document or change?
                 await self._read_message(delegate)
         else:
             if headers.get(
                     "Expect"
             ) == "100-continue" and not self._write_finished:
                 self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
         if not skip_body:
             body_future = self._read_body(
                 resp_start_line.code if self.is_client else 0, headers,
                 delegate)
             if body_future is not None:
                 if self._body_timeout is None:
                     await body_future
                 else:
                     try:
                         await gen.with_timeout(
                             self.stream.io_loop.time() +
                             self._body_timeout,
                             body_future,
                             quiet_exceptions=iostream.StreamClosedError,
                         )
                     except gen.TimeoutError:
                         gen_log.info("Timeout reading body from %s",
                                      self.context)
                         self.stream.close()
                         return False
         self._read_finished = True
         if not self._write_finished or self.is_client:
             need_delegate_close = False
             with _ExceptionLoggingContext(app_log):
                 delegate.finish()
         # If we're waiting for the application to produce an asynchronous
         # response, and we're not detached, register a close callback
         # on the stream (we didn't need one while we were reading)
         if (not self._finish_future.done() and self.stream is not None
                 and not self.stream.closed()):
             self.stream.set_close_callback(self._on_connection_close)
             await self._finish_future
         if self.is_client and self._disconnect_on_finish:
             self.close()
         if self.stream is None:
             return False
     except httputil.HTTPInputError as e:
         gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
         if not self.is_client:
             await self.stream.write(b"HTTP/1.1 400 Bad Request\r\n\r\n")
         self.close()
         return False
     finally:
         if need_delegate_close:
             with _ExceptionLoggingContext(app_log):
                 delegate.on_connection_close()
         header_future = None  # type: ignore
         self._clear_callbacks()
     return True
示例#3
0
def fork_processes(
    num_processes: Optional[int], max_restarts: Optional[int] = None
) -> int:
    """Starts multiple worker processes.

    If ``num_processes`` is None or <= 0, we detect the number of cores
    available on this machine and fork that number of child
    processes. If ``num_processes`` is given and > 0, we fork that
    specific number of sub-processes.

    Since we use processes and not threads, there is no shared memory
    between any server code.

    Note that multiple processes are not compatible with the autoreload
    module (or the ``autoreload=True`` option to `tornado.web.Application`
    which defaults to True when ``debug=True``).
    When using multiple processes, no IOLoops can be created or
    referenced until after the call to ``fork_processes``.

    In each child process, ``fork_processes`` returns its *task id*, a
    number between 0 and ``num_processes``.  Processes that exit
    abnormally (due to a signal or non-zero exit status) are restarted
    with the same id (up to ``max_restarts`` times).  In the parent
    process, ``fork_processes`` returns None if all child processes
    have exited normally, but will otherwise only exit by throwing an
    exception.

    max_restarts defaults to 100.

    Availability: Unix
    """
    if max_restarts is None:
        max_restarts = 100

    global _task_id
    assert _task_id is None
    if num_processes is None or num_processes <= 0:
        num_processes = cpu_count()
    gen_log.info("Starting %d processes", num_processes)
    children = {}

    def start_child(i: int) -> Optional[int]:
        pid = os.fork()
        if pid == 0:
            # child process
            _reseed_random()
            global _task_id
            _task_id = i
            return i
        else:
            children[pid] = i
            return None

    for i in range(num_processes):
        id = start_child(i)
        if id is not None:
            return id
    num_restarts = 0
    while children:
        pid, status = os.wait()
        if pid not in children:
            continue
        id = children.pop(pid)
        if os.WIFSIGNALED(status):
            gen_log.warning(
                "child %d (pid %d) killed by signal %d, restarting",
                id,
                pid,
                os.WTERMSIG(status),
            )
        elif os.WEXITSTATUS(status) != 0:
            gen_log.warning(
                "child %d (pid %d) exited with status %d, restarting",
                id,
                pid,
                os.WEXITSTATUS(status),
            )
        else:
            gen_log.info("child %d (pid %d) exited normally", id, pid)
            continue
        num_restarts += 1
        if num_restarts > max_restarts:
            raise RuntimeError("Too many child restarts, giving up")
        new_id = start_child(id)
        if new_id is not None:
            return new_id
    # All child processes exited cleanly, so exit the master process
    # instead of just returning to right after the call to
    # fork_processes (which will probably just start up another IOLoop
    # unless the caller checks the return value).
    sys.exit(0)
示例#4
0
def main() -> None:
    """Command-line wrapper to re-run a script whenever its source changes.

    Scripts may be specified by filename or module name::

        python -m tornado.autoreload -m tornado.test.runtests
        python -m tornado.autoreload tornado/test/runtests.py

    Running a script with this wrapper is similar to calling
    `tornado.autoreload.wait` at the end of the script, but this wrapper
    can catch import-time problems like syntax errors that would otherwise
    prevent the script from reaching its call to `wait`.
    """
    # Remember that we were launched with autoreload as main.
    # The main module can be tricky; set the variables both in our globals
    # (which may be __main__) and the real importable version.
    import tornado_py3.autoreload

    global _autoreload_is_main
    global _original_argv, _original_spec
    tornado_py3.autoreload._autoreload_is_main = _autoreload_is_main = True
    original_argv = sys.argv
    tornado_py3.autoreload._original_argv = _original_argv = original_argv
    original_spec = getattr(sys.modules["__main__"], "__spec__", None)
    tornado_py3.autoreload._original_spec = _original_spec = original_spec
    sys.argv = sys.argv[:]
    if len(sys.argv) >= 3 and sys.argv[1] == "-m":
        mode = "module"
        module = sys.argv[2]
        del sys.argv[1:3]
    elif len(sys.argv) >= 2:
        mode = "script"
        script = sys.argv[1]
        sys.argv = sys.argv[1:]
    else:
        print(_USAGE, file=sys.stderr)
        sys.exit(1)

    try:
        if mode == "module":
            import runpy

            runpy.run_module(module, run_name="__main__", alter_sys=True)
        elif mode == "script":
            with open(script) as f:
                # Execute the script in our namespace instead of creating
                # a new one so that something that tries to import __main__
                # (e.g. the unittest module) will see names defined in the
                # script instead of just those defined in this module.
                global __file__
                __file__ = script
                # If __package__ is defined, imports may be incorrectly
                # interpreted as relative to this module.
                global __package__
                del __package__
                exec_in(f.read(), globals(), globals())
    except SystemExit as e:
        logging.basicConfig()
        gen_log.info("Script exited with status %s", e.code)
    except Exception as e:
        logging.basicConfig()
        gen_log.warning("Script exited with uncaught exception", exc_info=True)
        # If an exception occurred at import time, the file with the error
        # never made it into sys.modules and so we won't know to watch it.
        # Just to make sure we've covered everything, walk the stack trace
        # from the exception and watch every file.
        for (filename, lineno, name,
             line) in traceback.extract_tb(sys.exc_info()[2]):
            watch(filename)
        if isinstance(e, SyntaxError):
            # SyntaxErrors are special:  their innermost stack frame is fake
            # so extract_tb won't see it and we have to get the filename
            # from the exception object.
            watch(e.filename)
    else:
        logging.basicConfig()
        gen_log.info("Script exited normally")
    # restore sys.argv so subsequent executions will include autoreload
    sys.argv = original_argv

    if mode == "module":
        # runpy did a fake import of the module as __main__, but now it's
        # no longer in sys.modules.  Figure out where it is and watch it.
        loader = pkgutil.get_loader(module)
        if loader is not None:
            watch(loader.get_filename())  # type: ignore

    wait()