Example #1
0
def watch_logs(log_filename, pipes):
    with file_or_stderr(log_filename, mode="a", encoding="utf-8") as log_file:
        while pipes:
            try:
                events = multiprocessing.connection.wait(pipes, timeout=1)
                for event in events:
                    try:
                        while event.poll(1):
                            # StreamHandler writes newlines into the pipe separately
                            # from the actual log entry; to avoid back-to-back newlines
                            # in the events pipe (causing multiple entries on a single
                            # line), discard newline-only data from the pipe
                            try:
                                data = event.recv()
                            except EOFError:
                                event.close()
                                raise

                            if data == "\n":
                                break

                            log_file.write(data + "\n")
                            log_file.flush()
                    except BrokenPipeError:
                        event.close()
                        raise
            # If one of the worker processes is killed, its handle will be
            # closed so waiting for it is going to fail with this OSError.
            # Additionally, event.recv() raises EOFError when its pipe
            # is closed, and event.poll() raises BrokenPipeError when
            # its pipe is closed.  When any of these events happen, we
            # just take the closed pipes out of the waitlist.
            except (BrokenPipeError, EOFError, OSError):
                pipes = [p for p in pipes if not p.closed]
Example #2
0
def main(args=None):  # noqa
    args = args or make_argument_parser().parse_args()
    for path in args.path:
        sys.path.insert(0, path)

    if args.use_spawn:
        multiprocessing.set_start_method("spawn")

    try:
        if args.pid_file:
            setup_pidfile(args.pid_file)
    except RuntimeError as e:
        with file_or_stderr(args.log_file) as stream:
            logger = setup_parent_logging(args, stream=stream)
            logger.critical(e)
            return RET_PIDFILE

    canteen = multiprocessing.Value(Canteen)
    worker_pipes = []
    worker_processes = []
    for worker_id in range(args.processes):
        read_pipe, write_pipe = multiprocessing.Pipe()
        proc = multiprocessing.Process(
            target=worker_process,
            args=(args, worker_id, StreamablePipe(write_pipe), canteen),
            daemon=True,
        )
        proc.start()
        worker_pipes.append(read_pipe)
        worker_processes.append(proc)

    fork_pipes = []
    fork_processes = []
    for fork_id, fork_path in enumerate(chain(args.forks,
                                              canteen_get(canteen))):
        read_pipe, write_pipe = multiprocessing.Pipe()
        proc = multiprocessing.Process(
            target=fork_process,
            args=(args, fork_id, fork_path, StreamablePipe(write_pipe)),
            daemon=True,
        )
        proc.start()
        fork_pipes.append(read_pipe)
        fork_processes.append(proc)

    parent_read_pipe, parent_write_pipe = multiprocessing.Pipe()
    logger = setup_parent_logging(args,
                                  stream=StreamablePipe(parent_write_pipe))
    logger.info("Dramatiq %r is booting up." % __version__)
    if args.pid_file:
        atexit.register(remove_pidfile, args.pid_file, logger)

    running, reload_process = True, False

    # To avoid issues with signal delivery to user threads on
    # platforms such as FreeBSD 10.3, we make the main thread block
    # the signals it expects to handle before spawning the file
    # watcher and log watcher threads so that those threads can
    # inherit the blocking behaviour.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_BLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    if HAS_WATCHDOG and args.watch:
        file_watcher = setup_file_watcher(args.watch, args.watch_use_polling)

    log_watcher = Thread(
        target=watch_logs,
        args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes]),
        daemon=False,
    )
    log_watcher.start()

    def stop_subprocesses(signum):
        nonlocal running
        running = False

        for proc in chain(worker_processes, fork_processes):
            try:
                os.kill(proc.pid, signum)
            except OSError:  # pragma: no cover
                if proc.exitcode is None:
                    logger.warning("Failed to send %r to PID %d.", signum.name,
                                   proc.pid)

    def sighandler(signum, frame):
        nonlocal reload_process
        reload_process = signum == getattr(signal, "SIGHUP", None)
        if signum == signal.SIGINT:
            signum = signal.SIGTERM

        logger.info("Sending signal %r to subprocesses...",
                    getattr(signum, "name", signum))
        stop_subprocesses(signum)

    # Now that the watcher threads have been started, it should be
    # safe to unblock the signals that were previously blocked.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_UNBLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    retcode = RET_OK
    signal.signal(signal.SIGINT, sighandler)
    signal.signal(signal.SIGTERM, sighandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, sighandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, sighandler)

    # Wait for all workers to terminate.  If any of the processes
    # terminates unexpectedly, then shut down the rest as well.
    while any(p.exitcode is None for p in worker_processes):
        for proc in worker_processes:
            proc.join(timeout=1)
            if proc.exitcode is None:
                continue

            if running:  # pragma: no cover
                logger.critical(
                    "Worker with PID %r exited unexpectedly (code %r). Shutting down...",
                    proc.pid, proc.exitcode)
                stop_subprocesses(signal.SIGTERM)
                retcode = proc.exitcode
                break

            else:
                retcode = max(retcode, proc.exitcode)

    for pipe in [
            parent_read_pipe, parent_write_pipe, *worker_pipes, *fork_pipes
    ]:
        try:
            pipe.close()
        # If the worker process was killed, the handle may already be
        # closed.
        except (EOFError, OSError):
            pass

    # The log watcher can't be a daemon in case we log to a file.  So
    # we have to wait for it to complete on exit.  Closing all the
    # pipes above is what should trigger said exit.
    log_watcher.join()

    if HAS_WATCHDOG and args.watch:
        file_watcher.stop()
        file_watcher.join()

    if reload_process:
        if sys.argv[0].endswith("/dramatiq/__main__.py"):
            return os.execvp(sys.executable,
                             ["python", "-m", "dramatiq", *sys.argv[1:]])
        return os.execvp(sys.argv[0], sys.argv)

    return retcode
Example #3
0
def main(args=None):  # noqa
    args = args or make_argument_parser().parse_args()
    for path in args.path:
        sys.path.insert(0, path)

    if args.use_spawn:
        multiprocessing.set_start_method("spawn")

    try:
        if args.pid_file:
            setup_pidfile(args.pid_file)
    except RuntimeError as e:
        with file_or_stderr(args.log_file) as stream:
            logger = setup_parent_logging(args, stream=stream)
            logger.critical(e)
            return RET_PIDFILE

    canteen = multiprocessing.Value(Canteen)
    worker_pipes = []
    worker_write_pipes = []
    worker_processes = []
    worker_process_events = []
    pid_to_worker_id = {}

    def create_worker_proc(worker_id, write_pipe, event):
        proc = multiprocessing.Process(
            target=worker_process,
            args=(args, worker_id, StreamablePipe(write_pipe), canteen, event),
            daemon=False,
        )
        return proc

    for worker_id in range(args.processes):
        read_pipe, write_pipe = multiprocessing.Pipe()
        event = multiprocessing.Event()
        proc = create_worker_proc(worker_id, write_pipe, event)
        proc.start()
        worker_pipes.append(read_pipe)
        worker_write_pipes.append(write_pipe)
        worker_processes.append(proc)
        worker_process_events.append(event)
        pid_to_worker_id[proc.pid] = worker_id

    # Wait for all worker processes to come online before starting the
    # fork processes.  This is required to avoid race conditions like
    # in #297.
    for event, proc in zip(worker_process_events, worker_processes):
        if proc.is_alive():
            if not event.wait(timeout=30):
                break

    fork_pipes = []
    fork_processes = []
    for fork_id, fork_path in enumerate(chain(args.forks,
                                              canteen_get(canteen))):
        read_pipe, write_pipe = multiprocessing.Pipe()
        proc = multiprocessing.Process(
            target=fork_process,
            args=(args, fork_id, fork_path, StreamablePipe(write_pipe)),
            daemon=True,
        )
        proc.start()
        fork_pipes.append(read_pipe)
        fork_processes.append(proc)

    parent_read_pipe, parent_write_pipe = multiprocessing.Pipe()
    logger = setup_parent_logging(args,
                                  stream=StreamablePipe(parent_write_pipe))
    logger.info("Dramatiq %r is booting up." % __version__)
    if args.pid_file:
        atexit.register(remove_pidfile, args.pid_file, logger)

    running, reload_process = True, False

    # To avoid issues with signal delivery to user threads on
    # platforms such as FreeBSD 10.3, we make the main thread block
    # the signals it expects to handle before spawning the file
    # watcher and log watcher threads so that those threads can
    # inherit the blocking behaviour.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_BLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    if HAS_WATCHDOG and args.watch:
        file_watcher = setup_file_watcher(args.watch, args.watch_use_polling)

    log_watcher_stop_event = Event()
    log_watcher = Thread(
        target=watch_logs,
        args=(args.log_file, [parent_read_pipe, *worker_pipes,
                              *fork_pipes], log_watcher_stop_event),
        daemon=False,
    )
    log_watcher.start()

    def stop_subprocesses(signum):
        nonlocal running
        running = False

        for proc in chain(worker_processes, fork_processes):
            try:
                os.kill(proc.pid, signum)
            except OSError:  # pragma: no cover
                if proc.exitcode is None:
                    logger.warning("Failed to send %r to PID %d.", signum,
                                   proc.pid)

    def sighandler(signum, frame):
        nonlocal reload_process
        reload_process = signum == getattr(signal, "SIGHUP", None)
        if signum == signal.SIGINT:
            signum = signal.SIGTERM

        logger.info("Sending signal %r to subprocesses...",
                    getattr(signum, "name", signum))
        stop_subprocesses(signum)

    # Now that the watcher threads have been started, it should be
    # safe to unblock the signals that were previously blocked.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_UNBLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    retcode = RET_OK
    signal.signal(signal.SIGINT, sighandler)
    signal.signal(signal.SIGTERM, sighandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, sighandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, sighandler)

    # Wait for all workers to terminate.  If any of the processes
    # terminates unexpectedly, then shut down the rest as well.  The
    # use of `waited' here avoids a race condition where the processes
    # could potentially exit before we even get a chance to wait on
    # them.
    waited = False
    while not waited or any(
            p.exitcode is None or (p.exitcode == RET_RESTART and running)
            for p in worker_processes):
        waited = True
        for proc in list(worker_processes):
            proc.join(timeout=1)
            if proc.exitcode is None:
                continue

            if proc.exitcode == RET_RESTART and running:
                logger.debug(
                    "Worker with PID %r asking for restart (code %r).",
                    proc.pid, proc.exitcode)
                prev_worker_pid = proc.pid
                worker_id = pid_to_worker_id[proc.pid]
                write_pipe = worker_write_pipes[worker_id]

                proc = create_worker_proc(worker_id, write_pipe,
                                          multiprocessing.Event())
                proc.start()
                worker_processes[worker_id] = proc
                pid_to_worker_id[proc.pid] = worker_id
                logger.debug(
                    "Spawned new worker with PID %r (replacing PID %r).",
                    proc.pid, prev_worker_pid)
                continue

            if running:  # pragma: no cover
                logger.critical(
                    "Worker with PID %r exited unexpectedly (code %r). Shutting down...",
                    proc.pid, proc.exitcode)
                stop_subprocesses(signal.SIGTERM)
                retcode = proc.exitcode
                break

            else:
                retcode = max(retcode, proc.exitcode)

    # The log watcher can't be a daemon in case we log to a file so we
    # have to wait for it to complete on exit.
    log_watcher_stop_event.set()
    log_watcher.join()

    if HAS_WATCHDOG and args.watch:
        file_watcher.stop()
        file_watcher.join()

    if reload_process:
        if sys.argv[0].endswith("/dramatiq/__main__.py"):
            return os.execvp(sys.executable,
                             ["python", "-m", "dramatiq", *sys.argv[1:]])
        return os.execvp(sys.argv[0], sys.argv)

    return retcode
Example #4
0
def main(args=None):  # noqa
    args = args or make_argument_parser().parse_args()
    for path in args.path:
        sys.path.insert(0, path)

    if args.use_spawn:
        multiprocessing.set_start_method("spawn")

    try:
        if args.pid_file:
            setup_pidfile(args.pid_file)
    except RuntimeError as e:
        with file_or_stderr(args.log_file) as stream:
            logger = setup_parent_logging(args, stream=stream)
            logger.critical(e)
            return RET_PIDFILE

    canteen = multiprocessing.Value(Canteen)

    # To prevent the main process from exiting due to signals after worker
    # processes and fork processes have been defined but before the signal
    # handling has been configured for those processes, block those signals
    # that the main process is expected to handle.
    try_block_signals()

    worker_pipes = []
    worker_processes = []
    worker_process_events = []
    for worker_id in range(args.processes):
        read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
        event = multiprocessing.Event()
        proc = multiprocessing.Process(
            target=worker_process,
            args=(args, worker_id, StreamablePipe(write_pipe), canteen, event),
            daemon=False,
        )
        proc.start()
        worker_pipes.append(read_pipe)
        worker_processes.append(proc)
        worker_process_events.append(event)

    # Wait for all worker processes to come online before starting the
    # fork processes.  This is required to avoid race conditions like
    # in #297.
    for event in worker_process_events:
        if proc.is_alive():
            if not event.wait(timeout=30):
                break

    fork_pipes = []
    fork_processes = []
    for fork_id, fork_path in enumerate(chain(args.forks,
                                              canteen_get(canteen))):
        read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
        proc = multiprocessing.Process(
            target=fork_process,
            args=(args, fork_id, fork_path, StreamablePipe(write_pipe)),
            daemon=True,
        )
        proc.start()
        fork_pipes.append(read_pipe)
        fork_processes.append(proc)

    parent_read_pipe, parent_write_pipe = multiprocessing.Pipe(duplex=False)
    logger = setup_parent_logging(args,
                                  stream=StreamablePipe(parent_write_pipe))
    logger.info("Dramatiq %r is booting up." % __version__)
    if args.pid_file:
        atexit.register(remove_pidfile, args.pid_file, logger)

    running, reload_process = True, False

    # The file watcher and log watcher threads should inherit the
    # signal blocking behaviour, so do not unblock the signals when
    # starting those threads.
    if HAS_WATCHDOG and args.watch:
        if not hasattr(signal, "SIGHUP"):
            raise RuntimeError(
                "Watching for source changes is not supported on %s." %
                sys.platform)
        file_watcher = setup_file_watcher(args.watch, args.watch_use_polling)

    log_watcher_stop_event = Event()
    log_watcher = Thread(
        target=watch_logs,
        args=(args.log_file, [parent_read_pipe, *worker_pipes,
                              *fork_pipes], log_watcher_stop_event),
        daemon=False,
    )
    log_watcher.start()

    def stop_subprocesses(signum):
        nonlocal running
        running = False

        for proc in chain(worker_processes, fork_processes):
            try:
                os.kill(proc.pid, signum)
            except OSError:  # pragma: no cover
                if proc.exitcode is None:
                    logger.warning("Failed to send %r to PID %d.", signum.name,
                                   proc.pid)

    def sighandler(signum, frame):
        nonlocal reload_process
        reload_process = signum == getattr(signal, "SIGHUP", None)
        if signum == signal.SIGINT:
            signum = signal.SIGTERM

        logger.info("Sending signal %r to subprocesses...",
                    getattr(signum, "name", signum))
        stop_subprocesses(signum)

    retcode = RET_OK
    for sig in HANDLED_SIGNALS:
        signal.signal(sig, sighandler)

    # Now that the watcher threads have been started and the
    # sighandler for the main process has been defined, it should be
    # safe to unblock the signals that were previously blocked.
    try_unblock_signals()

    # Wait for all workers to terminate.  If any of the processes
    # terminates unexpectedly, then shut down the rest as well.  The
    # use of `waited' here avoids a race condition where the processes
    # could potentially exit before we even get a chance to wait on
    # them.
    waited = False
    while not waited or any(p.exitcode is None for p in worker_processes):
        waited = True
        for proc in worker_processes:
            proc.join(timeout=1)
            if proc.exitcode is None:
                continue

            if running:  # pragma: no cover
                logger.critical(
                    "Worker with PID %r exited unexpectedly (code %r). Shutting down...",
                    proc.pid, proc.exitcode)
                stop_subprocesses(signal.SIGTERM)
                retcode = proc.exitcode
                break

            else:
                retcode = retcode or proc.exitcode

    # The log watcher can't be a daemon in case we log to a file so we
    # have to wait for it to complete on exit.
    log_watcher_stop_event.set()
    log_watcher.join()

    if HAS_WATCHDOG and args.watch:
        file_watcher.stop()
        file_watcher.join()

    if reload_process:
        if sys.argv[0].endswith("/dramatiq/__main__.py"):
            return os.execvp(sys.executable,
                             ["python", "-m", "dramatiq", *sys.argv[1:]])
        return os.execvp(sys.argv[0], sys.argv)

    return RET_KILLED if retcode < 0 else retcode