コード例 #1
0
ファイル: test_scheduler.py プロジェクト: sonlia/distributed
def test_filtered_communication(s, a, b):
    e = yield connect(ip=s.ip, port=s.port)
    f = yield connect(ip=s.ip, port=s.port)
    yield write(e, {'op': 'register-client', 'client': 'e'})
    yield write(f, {'op': 'register-client', 'client': 'f'})
    yield read(e)
    yield read(f)
    e = BatchedStream(e, 0)
    f = BatchedStream(f, 0)

    assert set(s.streams) == {'e', 'f'}

    yield write(e, {'op': 'update-graph',
                    'tasks': {'x': dumps_task((inc, 1)),
                              'y': dumps_task((inc, 'x'))},
                    'dependencies': {'x': [], 'y': ['x']},
                    'client': 'e',
                    'keys': ['y']})

    yield write(f, {'op': 'update-graph',
                    'tasks': {'x': dumps_task((inc, 1)),
                              'z': dumps_task((add, 'x', 10))},
                    'dependencies': {'x': [], 'z': ['x']},
                    'client': 'f',
                    'keys': ['z']})

    msg = yield read(e)
    assert msg['op'] == 'key-in-memory'
    assert msg['key'] == 'y'
    msg = yield read(f)
    assert msg['op'] == 'key-in-memory'
    assert msg['key'] == 'z'
コード例 #2
0
ファイル: test_batched.py プロジェクト: dask/distributed
def test_send_after_stream_start():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)

        b.start(stream)
        b.send('hello')
        b.send('world')
        result = yield read(stream)
        if len(result) < 2:
            result += yield read(stream)
        assert result == ['hello', 'world']
コード例 #3
0
    def f():
        server = Server({'ping': pingpong})
        server.listen(8887)

        stream = yield connect('127.0.0.1', 8887)

        yield write(stream, {'op': 'ping'})
        response = yield read(stream)
        assert response == b'pong'

        yield write(stream, {'op': 'ping', 'close': True})
        response = yield read(stream)
        assert response == b'pong'

        server.stop()
コード例 #4
0
ファイル: test_core.py プロジェクト: thrasibule/distributed
    def f():
        server = Server({"ping": pingpong})
        server.listen(8887)

        stream = yield connect("127.0.0.1", 8887)

        yield write(stream, {"op": "ping"})
        response = yield read(stream)
        assert response == b"pong"

        yield write(stream, {"op": "ping", "close": True})
        response = yield read(stream)
        assert response == b"pong"

        server.stop()
コード例 #5
0
ファイル: test_batched.py プロジェクト: dask/distributed
def _run_traffic_jam(nsends, nbytes):
    # This test eats `nsends * nbytes` bytes in RAM
    np = pytest.importorskip('numpy')
    from distributed.protocol import to_serialize
    data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data)
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=0.01)
        b.start(stream)

        msg = {'x': to_serialize(data)}
        for i in range(nsends):
            b.send(assoc(msg, 'i', i))
            if np.random.random() > 0.5:
                yield gen.sleep(0.001)

        results = []
        count = 0
        while len(results) < nsends:
            # If this times out then I think it's a backpressure issue
            # Somehow we're able to flood the socket so that the receiving end
            # loses some of our messages
            L = yield gen.with_timeout(timedelta(seconds=5), read(stream))
            count += 1
            results.extend(r['i'] for r in L)

        assert count == b.batch_count == e.count
        assert b.message_count == nsends

        assert results == list(range(nsends))

        stream.close()  # external closing
        yield b.close(ignore_closed=True)
コード例 #6
0
ファイル: test_nanny.py プロジェクト: canavandl/distributed
def test_monitor_resources():
    pytest.importorskip('psutil')
    c = Center(ip='127.0.0.1')
    c.listen(0)
    n = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1')

    yield n._start()
    nn = rpc(ip=n.ip, port=n.port)
    assert n.process.is_alive()
    d = n.resource_collect()
    assert {'cpu_percent', 'memory_percent'}.issubset(d)

    assert isinstance(d['timestamp'], datetime)

    stream = yield connect(ip=n.ip, port=n.port)
    yield write(stream, {'op': 'monitor_resources', 'interval': 0.01})

    for i in range(3):
        msg = yield read(stream)
        assert isinstance(msg, dict)
        assert {'cpu_percent', 'memory_percent'}.issubset(msg)

    stream.close()
    yield n._close()
    c.stop()
コード例 #7
0
def test_progress_stream(e, s, a, b):
    futures = e.map(div, [1] * 10, range(10))

    x = 1
    for i in range(5):
        x = do(inc)(x)
    future = e.compute(x)

    yield _wait(futures + [future])

    stream = yield progress_stream(s.address, interval=0.010)
    msg = yield read(stream)
    assert msg == {'all': {'div': 10, 'inc': 5, 'finalize': 1},
                   'erred': {'div': 1},
                   'in_memory': {'div': 9, 'finalize': 1},
                   'released': {'div': 1, 'inc': 5}}

    d = progress_quads(msg)

    assert d == {'name': ['div', 'inc', 'finalize'],
                 'all': [10, 5, 1],
                 'in_memory': [9, 0, 1],
                 'in_memory_right': [1, 1, 1],
                 'fraction': ['10 / 10', '5 / 5', '1 / 1'],
                 'erred': [1, 0, 0],
                 'erred_left': [0.9, 1, 1],
                 'released': [1, 5, 0],
                 'released_right': [0.1, 1, 0],
                 'top': [0.7, 1.7, 2.7],
                 'center': [0.5, 1.5, 2.5],
                 'bottom': [0.3, 1.3, 2.3]}

    stream.close()
コード例 #8
0
ファイル: test_scheduler.py プロジェクト: sonlia/distributed
def test_feed_setup_teardown(s, a, b):
    def setup(scheduler):
        return 1

    def func(scheduler, state):
        assert state == 1
        return 'OK'

    def teardown(scheduler, state):
        scheduler.flag = 'done'

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed',
                         'function': dumps(func),
                         'setup': dumps(setup),
                         'teardown': dumps(teardown),
                         'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        assert response == 'OK'

    stream.close()
    start = time()
    while not hasattr(s, 'flag'):
        yield gen.sleep(0.01)
        assert time() - start < 5
コード例 #9
0
def test_server(s, a, b):
    stream = yield connect('127.0.0.1', s.port)
    yield write(stream, {'op': 'start-control'})
    yield write(stream, {'op': 'update-graph',
                         'dsk': {'x': (inc, 1), 'y': (inc, 'x')},
                         'keys': ['y']})

    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'y':
            break

    yield write(stream, {'op': 'close-stream'})
    msg = yield read(stream)
    assert msg == {'op': 'stream-closed'}
    assert stream.closed()
コード例 #10
0
ファイル: server_lifecycle.py プロジェクト: dask/distributed
def task_events(interval, deque, times, index, rectangles, workers, last_seen):
    i = 0
    try:
        stream = yield eventstream("%(host)s:%(tcp-port)d" % options, 0.100)
        while True:
            msgs = yield read(stream)
            if not msgs:
                continue

            last_seen[0] = time()
            for msg in msgs:
                if "compute_start" in msg:
                    deque.append(msg)
                    times.append(msg["compute_start"])
                    index.append(i)
                    i += 1
                    if msg.get("transfer_start") is not None:
                        index.append(i)
                        i += 1
                    if msg.get("disk_load_start") is not None:
                        index.append(i)
                        i += 1
                    task_stream_append(rectangles, msg, workers)
    except StreamClosedError:
        pass  # don't log StreamClosedErrors
    except Exception as e:
        logger.exception(e)
    finally:
        try:
            sys.exit(0)
        except:
            pass
コード例 #11
0
ファイル: test_batched.py プロジェクト: HugoTian/distributed
 def recv():
     while True:
         result = yield gen.with_timeout(timedelta(seconds=1), read(stream))
         print(result)
         L.extend(result)
         if result[-1] == 9999:
             break
コード例 #12
0
ファイル: test_batched.py プロジェクト: dask/distributed
 def handle_stream(self, stream, address):
     while True:
         try:
             msg = yield read(stream)
             self.count += 1
             yield write(stream, msg)
         except StreamClosedError as e:
             return
コード例 #13
0
ファイル: test_core.py プロジェクト: martindurant/distributed
    def f():
        server = Server({'ping': pingpong})
        with pytest.raises(OSError):
            server.port
        server.listen(8887)
        assert server.port == 8887

        stream = yield connect('127.0.0.1', 8887)

        yield write(stream, {'op': 'ping'})
        response = yield read(stream)
        assert response == b'pong'

        yield write(stream, {'op': 'ping', 'close': True})
        response = yield read(stream)
        assert response == b'pong'

        server.stop()
コード例 #14
0
def progress():
    with log_errors():
        stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)
        while True:
            try:
                msg = yield read(stream)
            except StreamClosedError:
                break
            else:
                messages['progress'] = msg
コード例 #15
0
def progress():
    with log_errors():
        stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)
        while True:
            try:
                msg = yield read(stream)
            except StreamClosedError:
                break
            else:
                messages['progress'] = msg
コード例 #16
0
def progress():
    with log_errors():
        stream = yield progress_stream('localhost:8786', 0.050)
        while True:
            try:
                msg = yield read(stream)
            except StreamClosedError:
                break
            else:
                messages['progress'] = msg
コード例 #17
0
def test_server(s, a, b):
    stream = yield connect('127.0.0.1', s.port)
    yield write(stream, {'op': 'register-client', 'client': 'ident'})
    yield write(stream, {'op': 'update-graph',
                         'tasks': {'x': (inc, 1), 'y': (inc, 'x')},
                         'dependencies': {'x': set(), 'y': {'x'}},
                         'keys': ['y'],
                         'client': 'ident'})

    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'y':
            break

    yield write(stream, {'op': 'close-stream'})
    msg = yield read(stream)
    assert msg == {'op': 'stream-closed'}
    assert stream.closed()
    stream.close()
コード例 #18
0
ファイル: test_batched.py プロジェクト: HugoTian/distributed
def test_send_after_stream_finish():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.last_send

        b.send('hello')
        result = yield read(stream); assert result == ['hello']
コード例 #19
0
ファイル: test_batched.py プロジェクト: HugoTian/distributed
def test_BatchedSend():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.last_send

        yield gen.sleep(0.020)

        b.send('hello')
        b.send('hello')
        b.send('world')
        yield gen.sleep(0.020)
        b.send('HELLO')
        b.send('HELLO')

        result = yield read(stream); assert result == ['hello', 'hello', 'world']
        result = yield read(stream); assert result == ['HELLO', 'HELLO']
コード例 #20
0
ファイル: test_scheduler.py プロジェクト: indera/distributed
def test_server(s, a, b):
    stream = yield connect('127.0.0.1', s.port)
    yield write(stream, {'op': 'register-client', 'client': 'ident'})
    stream = BatchedStream(stream, 0)
    stream.send({'op': 'update-graph',
                 'tasks': {'x': dumps_task((inc, 1)),
                           'y': dumps_task((inc, 'x'))},
                 'dependencies': {'x': [], 'y': ['x']},
                 'keys': ['y'],
                 'client': 'ident'})

    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'y':
            break

    stream.send({'op': 'close-stream'})
    msg = yield read(stream)
    assert msg == {'op': 'stream-closed'}
    assert stream.closed()
    stream.close()
コード例 #21
0
    def f():
        server = Server({'ping': pingpong})
        with pytest.raises(OSError):
            server.port
        server.listen(8887)
        assert server.port == 8887

        stream = yield connect('127.0.0.1', 8887)

        n = yield write(stream, {'op': 'ping'})
        assert isinstance(n, int)
        assert 4 <= n <= 1000

        response = yield read(stream)
        assert response == b'pong'

        yield write(stream, {'op': 'ping', 'close': True})
        response = yield read(stream)
        assert response == b'pong'

        server.stop()
コード例 #22
0
ファイル: test_batched.py プロジェクト: zegami/distributed
def test_BatchedSend():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        assert str(len(b.buffer)) in str(b)
        assert str(len(b.buffer)) in repr(b)
        b.start(stream)

        yield gen.sleep(0.020)

        b.send('hello')
        b.send('hello')
        b.send('world')
        yield gen.sleep(0.020)
        b.send('HELLO')
        b.send('HELLO')

        result = yield read(stream); assert result == ['hello', 'hello', 'world']
        result = yield read(stream); assert result == ['HELLO', 'HELLO']
コード例 #23
0
ファイル: test_batched.py プロジェクト: zegami/distributed
def test_send_before_start():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)

        b.send('hello')
        b.send('world')

        b.start(stream)
        result = yield read(stream); assert result == ['hello', 'world']
コード例 #24
0
def test_feed(s, a, b):
    def func(scheduler):
        return scheduler.processing, scheduler.stacks

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed', 'function': func, 'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        expected = s.processing, s.stacks

    stream.close()
コード例 #25
0
def test_send_after_stream_finish():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.last_send

        b.send('hello')
        result = yield read(stream)
        assert result == ['hello']
コード例 #26
0
ファイル: server_lifecycle.py プロジェクト: dask/distributed
def processing():
    with log_errors():
        from distributed.diagnostics.scheduler import processing

        stream = yield connect(ip=options["host"], port=options["tcp-port"])
        yield write(stream, {"op": "feed", "function": dumps(processing), "interval": 0.200})
        while True:
            try:
                msg = yield read(stream)
            except StreamClosedError:
                break
            else:
                messages["processing"] = msg
コード例 #27
0
def test_server(s, a, b):
    stream = yield connect('127.0.0.1', s.port)
    yield write(stream, {'op': 'start-control'})
    yield write(
        stream, {
            'op': 'update-graph',
            'dsk': {
                'x': (inc, 1),
                'y': (inc, 'x')
            },
            'keys': ['y']
        })

    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'y':
            break

    yield write(stream, {'op': 'close-stream'})
    msg = yield read(stream)
    assert msg == {'op': 'stream-closed'}
    assert stream.closed()
コード例 #28
0
def test_feed(s, a, b):
    def func(scheduler):
        return scheduler.processing, scheduler.stacks

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed',
                         'function': func,
                         'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        expected = s.processing, s.stacks

    stream.close()
コード例 #29
0
def processing():
    with log_errors():
        from distributed.diagnostics.scheduler import processing
        stream = yield connect(ip=options['host'], port=options['tcp-port'])
        yield write(stream, {'op': 'feed',
                             'function': dumps(processing),
                             'interval': 0.200})
        while True:
            try:
                msg = yield read(stream)
            except StreamClosedError:
                break
            else:
                messages['processing'] = msg
コード例 #30
0
def processing():
    with log_errors():
        from distributed.diagnostics.scheduler import processing
        stream = yield connect(ip=options['host'], port=options['tcp-port'])
        yield write(stream, {'op': 'feed',
                             'function': dumps(processing),
                             'interval': 0.200})
        while True:
            try:
                msg = yield read(stream)
            except StreamClosedError:
                break
            else:
                messages['processing'] = msg
コード例 #31
0
def test_progress_stream(e, s, a, b):
    futures = e.map(div, [1] * 10, range(10))

    x = 1
    for i in range(5):
        x = do(inc)(x)
    future = e.compute(x)

    yield _wait(futures + [future])

    stream = yield progress_stream(s.address, interval=0.010)
    msg = yield read(stream)
    assert msg == {
        'all': {
            'div': 10,
            'inc': 5,
            'finalize': 1
        },
        'erred': {
            'div': 1
        },
        'in_memory': {
            'div': 9,
            'finalize': 1
        },
        'released': {
            'div': 1,
            'inc': 5
        }
    }

    d = progress_quads(msg)

    assert d == {
        'name': ['div', 'inc', 'finalize'],
        'all': [10, 5, 1],
        'in_memory': [9, 0, 1],
        'in_memory_right': [1, 1, 1],
        'fraction': ['10 / 10', '5 / 5', '1 / 1'],
        'erred': [1, 0, 0],
        'erred_left': [0.9, 1, 1],
        'released': [1, 5, 0],
        'released_right': [0.1, 1, 0],
        'top': [0.7, 1.7, 2.7],
        'center': [0.5, 1.5, 2.5],
        'bottom': [0.3, 1.3, 2.3]
    }

    stream.close()
コード例 #32
0
def test_compute_stream(s, a, b):
    stream = yield connect(a.ip, a.port)
    yield write(stream, {'op': 'compute-stream'})
    msgs = [{'op': 'compute-task', 'function': dumps(inc), 'args': dumps((i,)), 'key': 'x-%d' % i}
            for i in range(10)]

    bstream = BatchedStream(stream, 0)
    for msg in msgs[:5]:
        yield write(stream, msg)

    for i in range(5):
        msg = yield read(bstream)
        assert msg['status'] == 'OK'
        assert msg['key'][0] == 'x'

    for msg in msgs[5:]:
        yield write(stream, msg)

    for i in range(5):
        msg = yield read(bstream)
        assert msg['status'] == 'OK'
        assert msg['key'][0] == 'x'

    yield write(stream, {'op': 'close'})
コード例 #33
0
def test_feed(s, a, b):
    def func(scheduler):
        return dumps((scheduler.processing, scheduler.stacks))

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed',
                         'function': dumps(func),
                         'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        expected = s.processing, s.stacks
        assert cloudpickle.loads(response) == expected

    close(stream)
コード例 #34
0
def test_compute_stream(s, a, b):
    stream = yield connect(a.ip, a.port)
    yield write(stream, {'op': 'compute-stream'})
    msgs = [{'op': 'compute-task', 'function': dumps(inc), 'args': dumps((i,)), 'key': 'x-%d' % i}
            for i in range(10)]

    bstream = BatchedStream(stream, 0)
    for msg in msgs[:5]:
        yield write(stream, msg)

    for i in range(5):
        msg = yield read(bstream)
        assert msg['status'] == 'OK'
        assert msg['key'][0] == 'x'

    for msg in msgs[5:]:
        yield write(stream, msg)

    for i in range(5):
        msg = yield read(bstream)
        assert msg['status'] == 'OK'
        assert msg['key'][0] == 'x'

    yield write(stream, {'op': 'close'})
コード例 #35
0
ファイル: test_scheduler.py プロジェクト: sonlia/distributed
def test_feed(s, a, b):
    def func(scheduler):
        return dumps((scheduler.processing, scheduler.stacks))

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed',
                         'function': dumps(func),
                         'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        expected = s.processing, s.stacks
        assert cloudpickle.loads(response) == expected

    stream.close()
コード例 #36
0
def test_feed_large_bytestring(s, a, b):
    np = pytest.importorskip('numpy')

    x = np.ones(10000000)

    def func(scheduler):
        y = x
        return True

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed',
                         'function': dumps(func),
                         'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        assert response == True

    close(stream)
コード例 #37
0
ファイル: test_scheduler.py プロジェクト: dask/distributed
def test_feed_large_bytestring(s, a, b):
    np = pytest.importorskip('numpy')

    x = np.ones(10000000)

    def func(scheduler):
        y = x
        return True

    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'feed',
                         'function': dumps(func),
                         'interval': 0.01})

    for i in range(5):
        response = yield read(stream)
        assert response == True

    close(stream)
コード例 #38
0
ファイル: test_nanny.py プロジェクト: aterrel/distributed
    def f():
        nn = rpc(ip=n.ip, port=n.port)
        yield n._start()
        assert n.process.is_alive()
        d = n.resource_collect()
        assert {'cpu_percent', 'memory_percent'}.issubset(d)

        assert isinstance(d['timestamp'], datetime)

        stream = yield connect(ip=n.ip, port=n.port)
        yield write(stream, {'op': 'monitor_resources', 'interval': 0.01})

        for i in range(3):
            msg = yield read(stream)
            assert isinstance(msg, dict)
            assert {'cpu_percent', 'memory_percent'}.issubset(msg)

        stream.close()
        yield n._close()
        c.stop()
コード例 #39
0
def test_monitor_resources(s):
    pytest.importorskip('psutil')
    n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop)

    yield n._start()
    assert isalive(n.process)
    d = n.resource_collect()
    assert {'cpu_percent', 'memory_percent'}.issubset(d)

    assert 'timestamp' in d

    stream = yield connect(ip=n.ip, port=n.port)
    yield write(stream, {'op': 'monitor_resources', 'interval': 0.01})

    for i in range(3):
        msg = yield read(stream)
        assert isinstance(msg, dict)
        assert {'cpu_percent', 'memory_percent'}.issubset(msg)

    close(stream)
    yield n._close()
    s.stop()
コード例 #40
0
def task_events(interval, deque, times, index, rectangles, workers, last_seen):
    i = 0
    with log_errors():
        stream = yield eventstream('%(host)s:%(tcp-port)d' % options, 0.100)
        while True:
            try:
                msgs = yield read(stream)
            except StreamClosedError:
                break
            else:
                if not msgs:
                    continue

                last_seen[0] = time()
                for msg in msgs:
                    if 'compute_start' in msg:
                        deque.append(msg)
                        times.append(msg['compute_start'])
                        index.append(i); i += 1
                        if msg.get('transfer_start') is not None:
                            index.append(i); i += 1
                        task_stream_append(rectangles, msg, workers)
コード例 #41
0
ファイル: test_nanny.py プロジェクト: dask/distributed
def test_monitor_resources(s):
    pytest.importorskip('psutil')
    n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop)

    yield n._start()
    assert isalive(n.process)
    d = n.resource_collect()
    assert {'cpu_percent', 'memory_percent'}.issubset(d)

    assert 'timestamp' in d

    stream = yield connect(ip=n.ip, port=n.port)
    yield write(stream, {'op': 'monitor_resources', 'interval': 0.01})

    for i in range(3):
        msg = yield read(stream)
        assert isinstance(msg, dict)
        assert {'cpu_percent', 'memory_percent'}.issubset(msg)

    close(stream)
    yield n._close()
    s.stop()
コード例 #42
0
def task_events(interval, deque, times, index, rectangles, workers, last_seen):
    i = 0
    with log_errors():
        stream = yield eventstream('localhost:8786', 0.100)
        while True:
            try:
                msgs = yield read(stream)
            except StreamClosedError:
                break
            else:
                if not msgs:
                    continue

                last_seen[0] = time()
                for msg in msgs:
                    if 'compute-start' in msg:
                        deque.append(msg)
                        times.append(msg['compute-start'])
                        index.append(i); i += 1
                        if 'transfer-start' in msg:
                            index.append(i); i += 1
                        task_stream_append(rectangles, msg, workers)
コード例 #43
0
def test_progress_stream(c, s, a, b):
    futures = c.map(div, [1] * 10, range(10))

    x = 1
    for i in range(5):
        x = do(inc)(x)
    future = c.compute(x)

    yield _wait(futures + [future])

    stream = yield progress_stream(s.address, interval=0.010)
    msg = yield read(stream)
    nbytes = msg.pop('nbytes')
    assert msg == {'all': {'div': 10, 'inc': 5, 'finalize': 1},
                   'erred': {'div': 1},
                   'memory': {'div': 9, 'finalize': 1},
                   'released': {'inc': 5}}
    assert set(nbytes) == set(msg['all'])
    assert all(v > 0 for v in nbytes.values())

    assert progress_quads(msg)

    stream.close()
コード例 #44
0
def test_eventstream_remote(e, s, a, b):
    stream = yield eventstream(s.address, interval=0.010)

    start = time()
    while not s.plugins:
        yield gen.sleep(0.01)
        assert time() < start + 5

    futures = e.map(div, [1] * 10, range(10))

    start = time()
    total = []
    while len(total) < 10:
        msgs = yield read(stream)
        assert isinstance(msgs, list)
        total.extend(msgs)
        assert time() < start + 5

    stream.close()
    start = time()
    while s.plugins:
        yield gen.sleep(0.01)
        assert time() < start + 5
コード例 #45
0
def test_progress_stream(c, s, a, b):
    futures = c.map(div, [1] * 10, range(10))

    x = 1
    for i in range(5):
        x = do(inc)(x)
    future = c.compute(x)

    yield _wait(futures + [future])

    stream = yield progress_stream(s.address, interval=0.010)
    msg = yield read(stream)
    nbytes = msg.pop('nbytes')
    assert msg == {
        'all': {
            'div': 10,
            'inc': 5,
            'finalize': 1
        },
        'erred': {
            'div': 1
        },
        'memory': {
            'div': 9,
            'finalize': 1
        },
        'released': {
            'inc': 5
        }
    }
    assert set(nbytes) == set(msg['all'])
    assert all(v > 0 for v in nbytes.values())

    assert progress_quads(msg)

    stream.close()
コード例 #46
0
def task_events(interval, deque, times, index, rectangles, workers, last_seen):
    i = 0
    with log_errors():
        stream = yield eventstream('localhost:8786', 0.100)
        while True:
            try:
                msgs = yield read(stream)
            except StreamClosedError:
                break
            else:
                if not msgs:
                    continue

                last_seen[0] = time()
                for msg in msgs:
                    if 'compute-start' in msg:
                        deque.append(msg)
                        times.append(msg['compute-start'])
                        index.append(i)
                        i += 1
                        if 'transfer-start' in msg:
                            index.append(i)
                            i += 1
                        task_stream_append(rectangles, msg, workers)
コード例 #47
0
def task_events(interval, deque, times, index, rectangles, workers, last_seen):
    i = 0
    with log_errors():
        stream = yield eventstream('%(host)s:%(tcp-port)d' % options, 0.100)
        while True:
            try:
                msgs = yield read(stream)
            except StreamClosedError:
                break
            else:
                if not msgs:
                    continue

                last_seen[0] = time()
                for msg in msgs:
                    if 'compute_start' in msg:
                        deque.append(msg)
                        times.append(msg['compute_start'])
                        index.append(i)
                        i += 1
                        if msg.get('transfer_start') is not None:
                            index.append(i)
                            i += 1
                        task_stream_append(rectangles, msg, workers)
コード例 #48
0
 def handle_stream(self, stream, address):
     while True:
         msg = yield read(stream)
         self.count += 1
         yield write(stream, msg)
コード例 #49
0
ファイル: test_scheduler.py プロジェクト: ogrisel/distributed
def test_scheduler(s, a, b):
    stream = yield connect(s.ip, s.port)
    yield write(stream, {'op': 'register-client', 'client': 'ident'})
    msg = yield read(stream)
    assert msg['op'] == 'stream-start'

    # Test update graph
    yield write(
        stream, {
            'op': 'update-graph',
            'tasks': {
                'x': (inc, 1),
                'y': (inc, 'x'),
                'z': (inc, 'y')
            },
            'dependencies': {
                'x': set(),
                'y': {'x'},
                'z': {'y'}
            },
            'keys': ['x', 'z'],
            'client': 'ident'
        })
    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break

    assert a.data.get('x') == 2 or b.data.get('x') == 2

    # Test erring tasks
    yield write(
        stream, {
            'op': 'update-graph',
            'tasks': {
                'a': (div, 1, 0),
                'b': (inc, 'a')
            },
            'dependencies': {
                'a': set(),
                'b': {'a'}
            },
            'keys': ['a', 'b'],
            'client': 'ident'
        })

    while True:
        msg = yield read(stream)
        if msg['op'] == 'task-erred' and msg['key'] == 'b':
            break

    # Test missing data
    yield write(stream, {'op': 'missing-data', 'missing': ['z']})

    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break

    # Test missing data without being informed
    for w in [a, b]:
        if 'z' in w.data:
            del w.data['z']
    yield write(
        stream, {
            'op': 'update-graph',
            'tasks': {
                'zz': (inc, 'z')
            },
            'dependencies': {
                'zz': {'z'}
            },
            'keys': ['zz'],
            'client': 'ident'
        })
    while True:
        msg = yield read(stream)
        if msg['op'] == 'key-in-memory' and msg['key'] == 'zz':
            break

    write(stream, {'op': 'close'})
    stream.close()
コード例 #50
0
ファイル: test_batched.py プロジェクト: zegami/distributed
 def recv():
     while True:
         result = yield gen.with_timeout(timedelta(seconds=1), read(stream))
         L.extend(result)
         if result[-1] == 9999:
             break