def test_canteen_add_adds_paths(): # Given that I have a Canteen c = multiprocessing.Value(Canteen) # When I append a couple of paths and mark it ready with canteen_try_init(c): canteen_add(c, "hello") canteen_add(c, "there") # Then those paths should be stored in the canteen assert canteen_get(c) == ["hello", "there"]
def test_canteen_try_init_runs_at_most_once(): # Given that I have a Canteen c = multiprocessing.Value(Canteen) # When I run two canteen_try_init blocks with canteen_try_init(c) as acquired: if acquired: canteen_add(c, "hello") with canteen_try_init(c) as acquired: if acquired: canteen_add(c, "goodbye") # Then only the first one should run assert canteen_get(c) == ["hello"]
def main(args=None): # noqa args = args or make_argument_parser().parse_args() for path in args.path: sys.path.insert(0, path) if args.use_spawn: multiprocessing.set_start_method("spawn") try: if args.pid_file: setup_pidfile(args.pid_file) except RuntimeError as e: with file_or_stderr(args.log_file) as stream: logger = setup_parent_logging(args, stream=stream) logger.critical(e) return RET_PIDFILE canteen = multiprocessing.Value(Canteen) worker_pipes = [] worker_processes = [] for worker_id in range(args.processes): read_pipe, write_pipe = multiprocessing.Pipe() proc = multiprocessing.Process( target=worker_process, args=(args, worker_id, StreamablePipe(write_pipe), canteen), daemon=True, ) proc.start() worker_pipes.append(read_pipe) worker_processes.append(proc) fork_pipes = [] fork_processes = [] for fork_id, fork_path in enumerate(chain(args.forks, canteen_get(canteen))): read_pipe, write_pipe = multiprocessing.Pipe() proc = multiprocessing.Process( target=fork_process, args=(args, fork_id, fork_path, StreamablePipe(write_pipe)), daemon=True, ) proc.start() fork_pipes.append(read_pipe) fork_processes.append(proc) parent_read_pipe, parent_write_pipe = multiprocessing.Pipe() logger = setup_parent_logging(args, stream=StreamablePipe(parent_write_pipe)) logger.info("Dramatiq %r is booting up." % __version__) if args.pid_file: atexit.register(remove_pidfile, args.pid_file, logger) running, reload_process = True, False # To avoid issues with signal delivery to user threads on # platforms such as FreeBSD 10.3, we make the main thread block # the signals it expects to handle before spawning the file # watcher and log watcher threads so that those threads can # inherit the blocking behaviour. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) if HAS_WATCHDOG and args.watch: file_watcher = setup_file_watcher(args.watch, args.watch_use_polling) log_watcher = Thread( target=watch_logs, args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes]), daemon=False, ) log_watcher.start() def stop_subprocesses(signum): nonlocal running running = False for proc in chain(worker_processes, fork_processes): try: os.kill(proc.pid, signum) except OSError: # pragma: no cover if proc.exitcode is None: logger.warning("Failed to send %r to PID %d.", signum.name, proc.pid) def sighandler(signum, frame): nonlocal reload_process reload_process = signum == getattr(signal, "SIGHUP", None) if signum == signal.SIGINT: signum = signal.SIGTERM logger.info("Sending signal %r to subprocesses...", getattr(signum, "name", signum)) stop_subprocesses(signum) # Now that the watcher threads have been started, it should be # safe to unblock the signals that were previously blocked. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) retcode = RET_OK signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, sighandler) # Wait for all workers to terminate. If any of the processes # terminates unexpectedly, then shut down the rest as well. while any(p.exitcode is None for p in worker_processes): for proc in worker_processes: proc.join(timeout=1) if proc.exitcode is None: continue if running: # pragma: no cover logger.critical( "Worker with PID %r exited unexpectedly (code %r). Shutting down...", proc.pid, proc.exitcode) stop_subprocesses(signal.SIGTERM) retcode = proc.exitcode break else: retcode = max(retcode, proc.exitcode) for pipe in [ parent_read_pipe, parent_write_pipe, *worker_pipes, *fork_pipes ]: try: pipe.close() # If the worker process was killed, the handle may already be # closed. except (EOFError, OSError): pass # The log watcher can't be a daemon in case we log to a file. So # we have to wait for it to complete on exit. Closing all the # pipes above is what should trigger said exit. log_watcher.join() if HAS_WATCHDOG and args.watch: file_watcher.stop() file_watcher.join() if reload_process: if sys.argv[0].endswith("/dramatiq/__main__.py"): return os.execvp(sys.executable, ["python", "-m", "dramatiq", *sys.argv[1:]]) return os.execvp(sys.argv[0], sys.argv) return retcode
def main(args=None): # noqa args = args or make_argument_parser().parse_args() for path in args.path: sys.path.insert(0, path) if args.use_spawn: multiprocessing.set_start_method("spawn") try: if args.pid_file: setup_pidfile(args.pid_file) except RuntimeError as e: with file_or_stderr(args.log_file) as stream: logger = setup_parent_logging(args, stream=stream) logger.critical(e) return RET_PIDFILE canteen = multiprocessing.Value(Canteen) worker_pipes = [] worker_write_pipes = [] worker_processes = [] worker_process_events = [] pid_to_worker_id = {} def create_worker_proc(worker_id, write_pipe, event): proc = multiprocessing.Process( target=worker_process, args=(args, worker_id, StreamablePipe(write_pipe), canteen, event), daemon=False, ) return proc for worker_id in range(args.processes): read_pipe, write_pipe = multiprocessing.Pipe() event = multiprocessing.Event() proc = create_worker_proc(worker_id, write_pipe, event) proc.start() worker_pipes.append(read_pipe) worker_write_pipes.append(write_pipe) worker_processes.append(proc) worker_process_events.append(event) pid_to_worker_id[proc.pid] = worker_id # Wait for all worker processes to come online before starting the # fork processes. This is required to avoid race conditions like # in #297. for event, proc in zip(worker_process_events, worker_processes): if proc.is_alive(): if not event.wait(timeout=30): break fork_pipes = [] fork_processes = [] for fork_id, fork_path in enumerate(chain(args.forks, canteen_get(canteen))): read_pipe, write_pipe = multiprocessing.Pipe() proc = multiprocessing.Process( target=fork_process, args=(args, fork_id, fork_path, StreamablePipe(write_pipe)), daemon=True, ) proc.start() fork_pipes.append(read_pipe) fork_processes.append(proc) parent_read_pipe, parent_write_pipe = multiprocessing.Pipe() logger = setup_parent_logging(args, stream=StreamablePipe(parent_write_pipe)) logger.info("Dramatiq %r is booting up." % __version__) if args.pid_file: atexit.register(remove_pidfile, args.pid_file, logger) running, reload_process = True, False # To avoid issues with signal delivery to user threads on # platforms such as FreeBSD 10.3, we make the main thread block # the signals it expects to handle before spawning the file # watcher and log watcher threads so that those threads can # inherit the blocking behaviour. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) if HAS_WATCHDOG and args.watch: file_watcher = setup_file_watcher(args.watch, args.watch_use_polling) log_watcher_stop_event = Event() log_watcher = Thread( target=watch_logs, args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes], log_watcher_stop_event), daemon=False, ) log_watcher.start() def stop_subprocesses(signum): nonlocal running running = False for proc in chain(worker_processes, fork_processes): try: os.kill(proc.pid, signum) except OSError: # pragma: no cover if proc.exitcode is None: logger.warning("Failed to send %r to PID %d.", signum, proc.pid) def sighandler(signum, frame): nonlocal reload_process reload_process = signum == getattr(signal, "SIGHUP", None) if signum == signal.SIGINT: signum = signal.SIGTERM logger.info("Sending signal %r to subprocesses...", getattr(signum, "name", signum)) stop_subprocesses(signum) # Now that the watcher threads have been started, it should be # safe to unblock the signals that were previously blocked. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) retcode = RET_OK signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, sighandler) # Wait for all workers to terminate. If any of the processes # terminates unexpectedly, then shut down the rest as well. The # use of `waited' here avoids a race condition where the processes # could potentially exit before we even get a chance to wait on # them. waited = False while not waited or any( p.exitcode is None or (p.exitcode == RET_RESTART and running) for p in worker_processes): waited = True for proc in list(worker_processes): proc.join(timeout=1) if proc.exitcode is None: continue if proc.exitcode == RET_RESTART and running: logger.debug( "Worker with PID %r asking for restart (code %r).", proc.pid, proc.exitcode) prev_worker_pid = proc.pid worker_id = pid_to_worker_id[proc.pid] write_pipe = worker_write_pipes[worker_id] proc = create_worker_proc(worker_id, write_pipe, multiprocessing.Event()) proc.start() worker_processes[worker_id] = proc pid_to_worker_id[proc.pid] = worker_id logger.debug( "Spawned new worker with PID %r (replacing PID %r).", proc.pid, prev_worker_pid) continue if running: # pragma: no cover logger.critical( "Worker with PID %r exited unexpectedly (code %r). Shutting down...", proc.pid, proc.exitcode) stop_subprocesses(signal.SIGTERM) retcode = proc.exitcode break else: retcode = max(retcode, proc.exitcode) # The log watcher can't be a daemon in case we log to a file so we # have to wait for it to complete on exit. log_watcher_stop_event.set() log_watcher.join() if HAS_WATCHDOG and args.watch: file_watcher.stop() file_watcher.join() if reload_process: if sys.argv[0].endswith("/dramatiq/__main__.py"): return os.execvp(sys.executable, ["python", "-m", "dramatiq", *sys.argv[1:]]) return os.execvp(sys.argv[0], sys.argv) return retcode
def main(args=None): # noqa args = args or make_argument_parser().parse_args() for path in args.path: sys.path.insert(0, path) if args.use_spawn: multiprocessing.set_start_method("spawn") try: if args.pid_file: setup_pidfile(args.pid_file) except RuntimeError as e: with file_or_stderr(args.log_file) as stream: logger = setup_parent_logging(args, stream=stream) logger.critical(e) return RET_PIDFILE canteen = multiprocessing.Value(Canteen) # To prevent the main process from exiting due to signals after worker # processes and fork processes have been defined but before the signal # handling has been configured for those processes, block those signals # that the main process is expected to handle. try_block_signals() worker_pipes = [] worker_processes = [] worker_process_events = [] for worker_id in range(args.processes): read_pipe, write_pipe = multiprocessing.Pipe(duplex=False) event = multiprocessing.Event() proc = multiprocessing.Process( target=worker_process, args=(args, worker_id, StreamablePipe(write_pipe), canteen, event), daemon=False, ) proc.start() worker_pipes.append(read_pipe) worker_processes.append(proc) worker_process_events.append(event) # Wait for all worker processes to come online before starting the # fork processes. This is required to avoid race conditions like # in #297. for event in worker_process_events: if proc.is_alive(): if not event.wait(timeout=30): break fork_pipes = [] fork_processes = [] for fork_id, fork_path in enumerate(chain(args.forks, canteen_get(canteen))): read_pipe, write_pipe = multiprocessing.Pipe(duplex=False) proc = multiprocessing.Process( target=fork_process, args=(args, fork_id, fork_path, StreamablePipe(write_pipe)), daemon=True, ) proc.start() fork_pipes.append(read_pipe) fork_processes.append(proc) parent_read_pipe, parent_write_pipe = multiprocessing.Pipe(duplex=False) logger = setup_parent_logging(args, stream=StreamablePipe(parent_write_pipe)) logger.info("Dramatiq %r is booting up." % __version__) if args.pid_file: atexit.register(remove_pidfile, args.pid_file, logger) running, reload_process = True, False # The file watcher and log watcher threads should inherit the # signal blocking behaviour, so do not unblock the signals when # starting those threads. if HAS_WATCHDOG and args.watch: if not hasattr(signal, "SIGHUP"): raise RuntimeError( "Watching for source changes is not supported on %s." % sys.platform) file_watcher = setup_file_watcher(args.watch, args.watch_use_polling) log_watcher_stop_event = Event() log_watcher = Thread( target=watch_logs, args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes], log_watcher_stop_event), daemon=False, ) log_watcher.start() def stop_subprocesses(signum): nonlocal running running = False for proc in chain(worker_processes, fork_processes): try: os.kill(proc.pid, signum) except OSError: # pragma: no cover if proc.exitcode is None: logger.warning("Failed to send %r to PID %d.", signum.name, proc.pid) def sighandler(signum, frame): nonlocal reload_process reload_process = signum == getattr(signal, "SIGHUP", None) if signum == signal.SIGINT: signum = signal.SIGTERM logger.info("Sending signal %r to subprocesses...", getattr(signum, "name", signum)) stop_subprocesses(signum) retcode = RET_OK for sig in HANDLED_SIGNALS: signal.signal(sig, sighandler) # Now that the watcher threads have been started and the # sighandler for the main process has been defined, it should be # safe to unblock the signals that were previously blocked. try_unblock_signals() # Wait for all workers to terminate. If any of the processes # terminates unexpectedly, then shut down the rest as well. The # use of `waited' here avoids a race condition where the processes # could potentially exit before we even get a chance to wait on # them. waited = False while not waited or any(p.exitcode is None for p in worker_processes): waited = True for proc in worker_processes: proc.join(timeout=1) if proc.exitcode is None: continue if running: # pragma: no cover logger.critical( "Worker with PID %r exited unexpectedly (code %r). Shutting down...", proc.pid, proc.exitcode) stop_subprocesses(signal.SIGTERM) retcode = proc.exitcode break else: retcode = retcode or proc.exitcode # The log watcher can't be a daemon in case we log to a file so we # have to wait for it to complete on exit. log_watcher_stop_event.set() log_watcher.join() if HAS_WATCHDOG and args.watch: file_watcher.stop() file_watcher.join() if reload_process: if sys.argv[0].endswith("/dramatiq/__main__.py"): return os.execvp(sys.executable, ["python", "-m", "dramatiq", *sys.argv[1:]]) return os.execvp(sys.argv[0], sys.argv) return RET_KILLED if retcode < 0 else retcode