예제 #1
0
def worker_process(args, worker_id, logging_pipe, canteen):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        logger.debug("Loading broker...")
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        logger.debug("Loading modules...")
        for module in args.modules:
            importlib.import_module(module)

        with canteen_try_init(canteen) as acquired:
            if acquired:
                logger.debug("Sending forks to main process...")
                for middleware in broker.middleware:
                    for fork in middleware.forks:
                        fork_path = "%s:%s" % (fork.__module__, fork.__name__)
                        canteen_add(canteen, fork_path)

        logger.debug("Starting worker threads...")
        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
예제 #2
0
def test_canteen_add_fails_when_adding_too_many_paths():
    # Given that I have a Canteen
    c = Canteen()

    # When I append too many paths
    # Then a RuntimeError should be raised
    with pytest.raises(RuntimeError):
        for _ in range(1024):
            canteen_add(c, "0" * 1024)
예제 #3
0
def test_canteen_add_adds_paths():
    # Given that I have a Canteen
    c = multiprocessing.Value(Canteen)

    # When I append a couple of paths and mark it ready
    with canteen_try_init(c):
        canteen_add(c, "hello")
        canteen_add(c, "there")

    # Then those paths should be stored in the canteen
    assert canteen_get(c) == ["hello", "there"]
예제 #4
0
def test_canteen_try_init_runs_at_most_once():
    # Given that I have a Canteen
    c = multiprocessing.Value(Canteen)

    # When I run two canteen_try_init blocks
    with canteen_try_init(c) as acquired:
        if acquired:
            canteen_add(c, "hello")

    with canteen_try_init(c) as acquired:
        if acquired:
            canteen_add(c, "goodbye")

    # Then only the first one should run
    assert canteen_get(c) == ["hello"]
예제 #5
0
파일: cli.py 프로젝트: Sovetnikov/dramatiq
def worker_process(args, worker_id, logging_pipe, canteen, event):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        logger.debug("Loading broker...")
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        logger.debug("Loading modules...")
        for module in args.modules:
            importlib.import_module(module)

        if not canteen.initialized:
            with canteen.get_lock():
                if not canteen.initialized:
                    logger.debug("Sending forks to main process...")
                    for middleware in broker.middleware:
                        for fork in middleware.forks:
                            fork_path = "%s:%s" % (fork.__module__,
                                                   fork.__name__)
                            canteen_add(canteen, fork_path)

        logger.debug("Starting worker threads...")
        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)
    finally:
        # Signal to the master process that this process has booted,
        # regardless of whether it failed or not.  If it did fail, the
        # worker process will realize that soon enough.
        event.set()

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running and not worker.restart_requested:
        time.sleep(1)

    if worker.restart_requested:
        logger.info("Requesting worker restart.")
    worker.stop()
    logger.info("Worker stopped.")
    broker.close()

    logging_pipe.close()
    if worker.restart_requested:
        sys.exit(RET_RESTART)