Beispiel #1
0
def check_thread_leak():
    active_threads_start = set(threading._active)

    yield

    start = time()
    while True:
        bad = [
            t
            for t, v in threading._active.items()
            if t not in active_threads_start
            and "Threaded" not in v.name
            and "watch message" not in v.name
            and "TCP-Executor" not in v.name
        ]
        if not bad:
            break
        else:
            sleep(0.01)
        if time() > start + 5:
            from distributed import profile

            tid = bad[0]
            thread = threading._active[tid]
            call_stacks = profile.call_stack(sys._current_frames()[tid])
            assert False, (thread, call_stacks)
def check_thread_leak():
    """Context manager to ensure we haven't leaked any threads"""
    active_threads_start = threading.enumerate()

    yield

    start = time()
    while True:
        bad_threads = [
            thread
            for thread in threading.enumerate()
            if thread not in active_threads_start
            and "Threaded" not in thread.name
            and "watch message" not in thread.name
            and "TCP-Executor" not in thread.name
            # TODO: Make sure profile thread is cleaned up
            # and remove the line below
            and "Profile" not in thread.name
        ]
        if not bad_threads:
            break
        else:
            sleep(0.01)
        if time() > start + 5:
            # Raise an error with information about leaked threads
            from distributed import profile

            bad_thread = bad_threads[0]
            call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
            assert False, (bad_thread, call_stacks)
Beispiel #3
0
def test_call_stack():
    frame = sys._current_frames()[threading.get_ident()]
    L = call_stack(frame)
    assert isinstance(L, list)
    assert all(isinstance(s, str) for s in L)
    assert "test_call_stack" in str(L[-1])
Beispiel #4
0
        def test_func():
            del _global_workers[:]
            _global_clients.clear()
            active_threads_start = set(threading._active)

            reset_config()

            dask.config.set({'distributed.comm.timeouts.connect': '5s'})
            # Restore default logging levels
            # XXX use pytest hooks/fixtures instead?
            for name, level in logging_levels.items():
                logging.getLogger(name).setLevel(level)

            result = None
            workers = []

            with pristine_loop() as loop:
                with check_active_rpc(loop, active_rpc_timeout):
                    @gen.coroutine
                    def coro():
                        with dask.config.set(config):
                            s = False
                            for i in range(5):
                                try:
                                    s, ws = yield start_cluster(
                                        ncores, scheduler, loop, security=security,
                                        Worker=Worker, scheduler_kwargs=scheduler_kwargs,
                                        worker_kwargs=worker_kwargs)
                                except Exception as e:
                                    logger.error("Failed to start gen_cluster, retryng", exc_info=True)
                                else:
                                    workers[:] = ws
                                    args = [s] + workers
                                    break
                            if s is False:
                                raise Exception("Could not start cluster")
                            if client:
                                c = yield Client(s.address, loop=loop, security=security,
                                                 asynchronous=True, **client_kwargs)
                                args = [c] + args
                            try:
                                future = func(*args)
                                if timeout:
                                    future = gen.with_timeout(timedelta(seconds=timeout),
                                                              future)
                                result = yield future
                                if s.validate:
                                    s.validate_state()
                            finally:
                                if client:
                                    yield c._close(fast=s.status == 'closed')
                                yield end_cluster(s, workers)
                                yield gen.with_timeout(timedelta(seconds=1),
                                                       cleanup_global_workers())

                            try:
                                c = yield default_client()
                            except ValueError:
                                pass
                            else:
                                yield c._close(fast=True)

                            raise gen.Return(result)

                    result = loop.run_sync(coro, timeout=timeout * 2 if timeout else timeout)

                for w in workers:
                    if getattr(w, 'data', None):
                        try:
                            w.data.clear()
                        except EnvironmentError:
                            # zict backends can fail if their storage directory
                            # was already removed
                            pass
                        del w.data
                DequeHandler.clear_all_instances()
                for w in _global_workers:
                    w = w()
                    w._close(report=False, executor_wait=False)
                    if w.status == 'running':
                        w.close()
                del _global_workers[:]

            if PY3 and not WINDOWS and check_new_threads:
                start = time()
                while True:
                    bad = [t for t, v in threading._active.items()
                           if t not in active_threads_start and
                          "Threaded" not in v.name and
                          "watch message" not in v.name]
                    if not bad:
                        break
                    else:
                        sleep(0.01)
                    if time() > start + 5:
                        from distributed import profile
                        tid = bad[0]
                        thread = threading._active[tid]
                        call_stacks = profile.call_stack(sys._current_frames()[tid])
                        assert False, (thread, call_stacks)
            _cleanup_dangling()
            with ignoring(AttributeError):
                del thread_state.on_event_loop_thread
            return result
def test_call_stack():
    frame = sys._current_frames()[get_thread_identity()]
    L = call_stack(frame)
    assert isinstance(L, list)
    assert all(isinstance(s, str) for s in L)
    assert 'test_call_stack' in str(L[-1])
def test_call_stack_f_lineno(f_lasti: int, f_lineno: int) -> None:
    assert call_stack(FakeFrame(f_lasti=f_lasti, f_code=FAKE_CODE)) == [
        f'  File "<stdin>", line {f_lineno}, in example\n\t'
    ]
Beispiel #7
0
        def test_func():
            del _global_workers[:]
            _global_clients.clear()
            active_threads_start = set(threading._active)

            reset_config()

            dask.config.set({'distributed.comm.timeouts.connect': '5s'})
            # Restore default logging levels
            # XXX use pytest hooks/fixtures instead?
            for name, level in logging_levels.items():
                logging.getLogger(name).setLevel(level)

            result = None
            workers = []

            with pristine_loop() as loop:
                with check_active_rpc(loop, active_rpc_timeout):
                    @gen.coroutine
                    def coro():
                        with dask.config.set(config):
                            s = False
                            for i in range(5):
                                try:
                                    s, ws = yield start_cluster(
                                        ncores, scheduler, loop, security=security,
                                        Worker=Worker, scheduler_kwargs=scheduler_kwargs,
                                        worker_kwargs=worker_kwargs)
                                except Exception as e:
                                    logger.error("Failed to start gen_cluster, retryng", exc_info=True)
                                else:
                                    workers[:] = ws
                                    args = [s] + workers
                                    break
                            if s is False:
                                raise Exception("Could not start cluster")
                            if client:
                                c = yield Client(s.address, loop=loop, security=security,
                                                 asynchronous=True, **client_kwargs)
                                args = [c] + args
                            try:
                                future = func(*args)
                                if timeout:
                                    future = gen.with_timeout(timedelta(seconds=timeout),
                                                              future)
                                result = yield future
                                if s.validate:
                                    s.validate_state()
                            finally:
                                if client:
                                    yield c._close(fast=s.status == 'closed')
                                yield end_cluster(s, workers)
                                yield gen.with_timeout(timedelta(seconds=1),
                                                       cleanup_global_workers())

                            try:
                                c = yield default_client()
                            except ValueError:
                                pass
                            else:
                                yield c._close(fast=True)

                            raise gen.Return(result)

                    result = loop.run_sync(coro, timeout=timeout * 2 if timeout else timeout)

                for w in workers:
                    if getattr(w, 'data', None):
                        try:
                            w.data.clear()
                        except EnvironmentError:
                            # zict backends can fail if their storage directory
                            # was already removed
                            pass
                        del w.data
                DequeHandler.clear_all_instances()
                for w in _global_workers:
                    w = w()
                    w._close(report=False, executor_wait=False)
                    if w.status == 'running':
                        w.close()
                del _global_workers[:]

            if PY3 and not WINDOWS and check_new_threads:
                start = time()
                while True:
                    bad = [t for t, v in threading._active.items()
                           if t not in active_threads_start and
                          "Threaded" not in v.name and
                          "watch message" not in v.name]
                    if not bad:
                        break
                    else:
                        sleep(0.01)
                    if time() > start + 5:
                        from distributed import profile
                        tid = bad[0]
                        thread = threading._active[tid]
                        call_stacks = profile.call_stack(sys._current_frames()[tid])
                        assert False, (thread, call_stacks)
            _cleanup_dangling()
            with ignoring(AttributeError):
                del thread_state.on_event_loop_thread
            return result
Beispiel #8
0
def test_call_stack():
    frame = sys._current_frames()[get_thread_identity()]
    L = call_stack(frame)
    assert isinstance(L, list)
    assert all(isinstance(s, str) for s in L)
    assert 'test_call_stack' in str(L[-1])