Beispiel #1
0
async def test_basic(c, s, a, b):
    for component in [
            StateTable,
            ExecutingTimeSeries,
            CommunicatingTimeSeries,
            CrossFilter,
            SystemMonitor,
    ]:

        aa = component(a)
        bb = component(b)

        xs = c.map(inc, range(10), workers=a.address)
        ys = c.map(dec, range(10), workers=b.address)

        def slowall(*args):
            sleep(1)

        x = c.submit(slowall, xs, ys, 1, workers=a.address)
        y = c.submit(slowall, xs, ys, 2, workers=b.address)
        await asyncio.sleep(0.1)

        aa.update()
        bb.update()

        assert len(first(aa.source.data.values())) and len(
            first(bb.source.data.values()))
Beispiel #2
0
async def test_service_hosts_match_worker(s):
    async with Worker(s.address, host="tcp://0.0.0.0") as w:
        sock = first(w.http_server._sockets.values())
        assert sock.getsockname()[0] in ("::", "0.0.0.0")

    async with Worker(
        s.address, host="tcp://127.0.0.1", dashboard_address="0.0.0.0:0"
    ) as w:
        sock = first(w.http_server._sockets.values())
        assert sock.getsockname()[0] in ("::", "0.0.0.0")

    async with Worker(s.address, host="tcp://127.0.0.1") as w:
        sock = first(w.http_server._sockets.values())
        assert sock.getsockname()[0] in ("::", "0.0.0.0")
Beispiel #3
0
async def test_logs(cleanup):
    worker = {"cls": Worker, "options": {"nthreads": 1}}
    async with SpecCluster(asynchronous=True,
                           scheduler=scheduler,
                           worker=worker) as cluster:
        cluster.scale(2)
        await cluster

        logs = await cluster.get_logs()
        assert is_valid_xml("<div>" + logs._repr_html_() + "</div>")
        assert "Scheduler" in logs
        for worker in cluster.scheduler.workers:
            assert worker in logs

        assert "Registered" in str(logs)

        logs = await cluster.get_logs(scheduler=True, workers=False)
        assert list(logs) == ["Scheduler"]

        logs = await cluster.get_logs(scheduler=False, workers=False)
        assert list(logs) == []

        logs = await cluster.get_logs(scheduler=False, workers=True)
        assert set(logs) == set(cluster.scheduler.workers)

        w = toolz.first(cluster.scheduler.workers)
        logs = await cluster.get_logs(scheduler=False, workers=[w])
        assert set(logs) == {w}
Beispiel #4
0
    def get(self):
        try:
            from bokeh.server.tornado import BokehTornado

            bokeh_application = first(
                app for app in self.server.http_application.applications
                if isinstance(app, BokehTornado))
            individual_bokeh = {
                uri.strip("/").replace("-", " ").title(): uri
                for uri in bokeh_application.app_paths
                if uri.lstrip("/").startswith("individual-")
                and not uri.endswith(".json")
            }
            individual_static = {
                uri.strip("/").replace(".html", "").replace("-", " ").title():
                "/statics/" + uri
                for uri in os.listdir(
                    os.path.join(os.path.dirname(__file__), "..", "static"))
                if uri.lstrip("/").startswith("individual-")
                and uri.endswith(".html")
            }
            result = {**individual_bokeh, **individual_static}
            self.write(result)
        except (ImportError, StopIteration):
            self.write({})
Beispiel #5
0
async def test_CommunicatingStream(c, s, a, b):
    aa = CommunicatingStream(a)
    bb = CommunicatingStream(b)

    xs = c.map(inc, range(10), workers=a.address)
    ys = c.map(dec, range(10), workers=b.address)
    adds = c.map(add, xs, ys, workers=a.address)
    subs = c.map(sub, xs, ys, workers=b.address)

    await wait([adds, subs])

    aa.update()
    bb.update()

    assert len(first(aa.outgoing.data.values())) and len(
        first(bb.outgoing.data.values()))
    assert len(first(aa.incoming.data.values())) and len(
        first(bb.incoming.data.values()))
Beispiel #6
0
async def test_basic(c, s, a, b):
    for component in [TaskStream, SystemMonitor, Occupancy, StealingTimeSeries]:
        ss = component(s)

        ss.update()
        data = ss.source.data
        assert len(first(data.values()))
        if component is Occupancy:
            assert all("127.0.0.1" in addr for addr in data["escaped_worker"])
Beispiel #7
0
async def test_worker_uses_same_host_as_nanny(c, s):
    for host in ["tcp://0.0.0.0", "tcp://127.0.0.2"]:
        n = await Nanny(s.address, host=host)

        def func(dask_worker):
            return dask_worker.listener.listen_address

        result = await c.run(func)
        assert host in first(result.values())
        await n.close()
Beispiel #8
0
def single_partition_join(left, right, **kwargs):
    # if the merge is performed on_index, divisions can be kept, otherwise the
    # new index will not necessarily correspond with the current divisions

    meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)
    kwargs["empty_index_dtype"] = meta.index.dtype
    kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns

    name = "merge-" + tokenize(left, right, **kwargs)
    if left.npartitions == 1 and kwargs["how"] in allowed_right:
        left_key = first(left.__dask_keys__())
        dsk = {
            (name, i): (apply, merge_chunk, [left_key, right_key], kwargs)
            for i, right_key in enumerate(right.__dask_keys__())
        }

        if kwargs.get("right_index") or right._contains_index_name(
            kwargs.get("right_on")
        ):
            divisions = right.divisions
        else:
            divisions = [None for _ in right.divisions]

    elif right.npartitions == 1 and kwargs["how"] in allowed_left:
        right_key = first(right.__dask_keys__())
        dsk = {
            (name, i): (apply, merge_chunk, [left_key, right_key], kwargs)
            for i, left_key in enumerate(left.__dask_keys__())
        }

        if kwargs.get("left_index") or left._contains_index_name(kwargs.get("left_on")):
            divisions = left.divisions
        else:
            divisions = [None for _ in left.divisions]
    else:
        raise NotImplementedError(
            "single_partition_join has no fallback for invalid calls"
        )

    graph = HighLevelGraph.from_collections(name, dsk, dependencies=[left, right])
    return new_dd_object(graph, name, meta, divisions)
Beispiel #9
0
async def test_stealing_events(c, s, a, b):
    se = StealingEvents(s)

    futures = c.map(
        slowinc, range(10), delay=0.1, workers=a.address, allow_other_workers=True
    )

    await wait(futures)
    se.update()
    assert len(first(se.source.data.values()))
    assert b.tasks
    assert sum(se.source.data["count"]) >= len(b.tasks)
Beispiel #10
0
async def test_service_hosts_match_worker(s):
    pytest.importorskip("bokeh")
    from distributed.dashboard import BokehWorker

    async with Worker(s.address,
                      services={("dashboard", ":0"): BokehWorker},
                      host="tcp://0.0.0.0") as w:
        sock = first(w.services["dashboard"].server._http._sockets.values())
        assert sock.getsockname()[0] in ("::", "0.0.0.0")

    async with Worker(s.address,
                      services={("dashboard", ":0"): BokehWorker},
                      host="tcp://127.0.0.1") as w:
        sock = first(w.services["dashboard"].server._http._sockets.values())
        assert sock.getsockname()[0] in ("::", "0.0.0.0")

    async with Worker(s.address,
                      services={("dashboard", 0): BokehWorker},
                      host="tcp://127.0.0.1") as w:
        sock = first(w.services["dashboard"].server._http._sockets.values())
        assert sock.getsockname()[0] == "127.0.0.1"
Beispiel #11
0
async def test_stealing_events(c, s, a, b):
    se = StealingEvents(s)

    futures = c.map(
        slowinc, range(100), delay=0.1, workers=a.address, allow_other_workers=True
    )

    while not b.task_state:  # will steal soon
        await asyncio.sleep(0.01)

    se.update()

    assert len(first(se.source.data.values()))
def test_start_ipython_qtconsole(loop):
    Popen = mock.Mock()
    with cluster() as (s, [a, b]):
        with mock.patch("distributed._ipython_utils.Popen", Popen), Client(
            s["address"], loop=loop
        ) as e:
            worker = first(e.nthreads())
            e.start_ipython_workers(worker, qtconsole=True)
            e.start_ipython_workers(worker, qtconsole=True, qtconsole_args=["--debug"])
    assert Popen.call_count == 2
    (cmd,), kwargs = Popen.call_args_list[0]
    assert cmd[:3] == ["jupyter", "qtconsole", "--existing"]
    (cmd,), kwargs = Popen.call_args_list[1]
    assert cmd[-1:] == ["--debug"]
def test_start_ipython_remote(loop, zmq_ctx):
    from distributed._ipython_utils import remote_magic

    with cluster(1) as (s, [a]):
        with Client(s["address"], loop=loop) as e, mock_ipython() as ip:
            worker = first(e.nthreads())
            ip.user_ns["info"] = e.start_ipython_workers(worker)[worker]
            remote_magic("info 1")  # line magic
            remote_magic("info", "worker")  # cell magic

        expected = [
            ((remote_magic,), {"magic_kind": "line", "magic_name": "remote"}),
            ((remote_magic,), {"magic_kind": "cell", "magic_name": "remote"}),
        ]
        assert ip.register_magic_function.call_args_list == expected
        assert ip.register_magic_function.call_count == 2
Beispiel #14
0
def test_start_ipython_workers(loop, zmq_ctx):
    from jupyter_client import BlockingKernelClient

    with cluster(1) as (s, [a]):
        with Client(s["address"], loop=loop) as e:
            info_dict = e.start_ipython_workers()
            info = first(info_dict.values())
            kc = BlockingKernelClient()
            kc.load_connection_info(info)
            kc.start_channels()
            kc.wait_for_ready(timeout=10)
            msg_id = kc.execute("worker")
            reply = kc.get_shell_msg(timeout=10)
            assert reply["parent_header"]["msg_id"] == msg_id
            assert reply["content"]["status"] == "ok"
            kc.stop_channels()
Beispiel #15
0
async def test_gather_then_submit_after_failed_workers(c, s, w, x, y, z):
    L = c.map(inc, range(20))
    await wait(L)

    w.process.process._process.terminate()
    total = c.submit(sum, L)

    for _ in range(3):
        await wait(total)
        addr = first(s.tasks[total.key].who_has).address
        for worker in [x, y, z]:
            if worker.worker_address == addr:
                worker.process.process._process.terminate()
                break

        result = await c.gather([total])
        assert result == [sum(map(inc, range(20)))]
Beispiel #16
0
def test_basic():
    def test_g():
        sleep(0.01)

    def test_h():
        sleep(0.02)

    def test_f():
        for i in range(100):
            test_g()
            test_h()

    thread = threading.Thread(target=test_f)
    thread.daemon = True
    thread.start()

    state = create()

    for i in range(100):
        sleep(0.02)
        frame = sys._current_frames()[thread.ident]
        process(frame, None, state)

    assert state["count"] == 100
    d = state
    while len(d["children"]) == 1:
        d = first(d["children"].values())

    assert d["count"] == 100
    assert "test_f" in str(d["description"])
    g = [
        c for c in d["children"].values() if "test_g" in str(c["description"])
    ][0]
    h = [
        c for c in d["children"].values() if "test_h" in str(c["description"])
    ][0]

    assert g["count"] < h["count"]
    assert 95 < g["count"] + h["count"] <= 100

    pd = plot_data(state)
    assert len(set(map(len, pd.values()))) == 1  # all same length
    assert len(set(pd["color"])) > 1  # different colors
Beispiel #17
0
async def test_global_workers(s, a, b):
    n = len(Worker._instances)
    w = first(Worker._instances)
    assert w is a or w is b