def test_check_process_leak_post_cleanup(ignore_sigterm):
    barrier = mp_context.Barrier(parties=2)
    with check_process_leak(check=False, term_timeout=0.2):
        p = mp_context.Process(target=garbage_process,
                               args=(barrier, ignore_sigterm))
        p.start()
        barrier.wait()
    assert not p.is_alive()
def test_check_process_leak():
    barrier = mp_context.Barrier(parties=2)
    with pytest.raises(AssertionError):
        with check_process_leak(check=True, check_timeout=0.01):
            p = mp_context.Process(target=garbage_process, args=(barrier, ))
            p.start()
            barrier.wait()
    assert not p.is_alive()
def test_check_process_leak_slow_cleanup():
    """check_process_leak waits a bit for processes to terminate themselves"""
    barrier = mp_context.Barrier(parties=2)
    with check_process_leak(check=True):
        p = mp_context.Process(target=garbage_process,
                               args=(barrier, False, 0.2))
        p.start()
        barrier.wait()
    assert not p.is_alive()
Beispiel #4
0
def test_workspace_concurrency(tmpdir, timeout, max_procs):
    """WorkSpace concurrency test. We merely check that no exception or
    deadlock happens.
    """
    base_dir = str(tmpdir)

    err_q = mp_context.Queue()
    purged_q = mp_context.Queue()
    stop_evt = mp_context.Event()
    ws = WorkSpace(base_dir)
    # Make sure purging only happens in the child processes
    ws._purge_leftovers = lambda: None

    # Run a bunch of child processes that will try to purge concurrently
    barrier = mp_context.Barrier(parties=max_procs + 1)
    processes = [
        mp_context.Process(
            target=_workspace_concurrency,
            args=(base_dir, purged_q, err_q, stop_evt, barrier),
        ) for _ in range(max_procs)
    ]
    for p in processes:
        p.start()
    barrier.wait()
    n_created = 0
    n_purged = 0
    t1 = time()
    try:
        while time() - t1 < timeout:
            # Add a bunch of locks, and simulate forgetting them.
            # The concurrent processes should try to purge them.
            for i in range(50):
                d = ws.new_work_dir(prefix="workspace-concurrency-")
                d._finalizer.detach()
                n_created += 1
            sleep(0.01)
    finally:
        stop_evt.set()
        for p in processes:
            p.join()

    # Any errors?
    try:
        err = err_q.get_nowait()
    except queue.Empty:
        pass
    else:
        raise err

    try:
        while True:
            n_purged += purged_q.get_nowait()
    except queue.Empty:
        pass
    assert n_created >= 100
    # We attempted to purge most directories at some point
    assert n_purged >= 0.5 * n_created > 0
def test_workspace_concurrency(tmpdir):
    """WorkSpace concurrency test. We merely check that no exception or
    deadlock happens.
    """
    base_dir = str(tmpdir)

    err_q = mp_context.Queue()
    purged_q = mp_context.Queue()
    stop_evt = mp_context.Event()
    ws = WorkSpace(base_dir)
    # Make sure purging only happens in the child processes
    ws._purge_leftovers = lambda: None

    # Windows (or at least Windows GitHub CI) has been observed to be exceptionally
    # slow. Don't stress it too much.
    max_procs = 2 if WINDOWS else 16

    # Run a bunch of child processes that will try to purge concurrently
    barrier = mp_context.Barrier(parties=max_procs + 1)
    processes = [
        mp_context.Process(
            target=_workspace_concurrency,
            args=(base_dir, purged_q, err_q, stop_evt, barrier),
        )
        for _ in range(max_procs)
    ]
    for p in processes:
        p.start()
    barrier.wait()
    n_created = 0
    n_purged = 0
    t1 = time()
    try:
        # On Linux, you will typically end with n_created > 10.000
        # On Windows, it can take 60 seconds to create 50 locks!
        while time() - t1 < 10:
            # Add a bunch of locks and simulate forgetting them.
            # The concurrent processes should try to purge them.
            for _ in range(100):
                d = ws.new_work_dir(prefix="workspace-concurrency-")
                d._finalizer.detach()
                n_created += 1

    finally:
        stop_evt.set()
        for p in processes:
            p.join()

    # Any errors?
    try:
        err = err_q.get_nowait()
    except queue.Empty:
        pass
    else:
        raise err

    try:
        while True:
            n_purged += purged_q.get_nowait()
    except queue.Empty:
        pass

    # We attempted to purge most directories at some point
    assert n_purged >= 0.5 * n_created > 0