def test_filtered_communication(s, a, b): e = yield connect(ip=s.ip, port=s.port) f = yield connect(ip=s.ip, port=s.port) yield write(e, {'op': 'register-client', 'client': 'e'}) yield write(f, {'op': 'register-client', 'client': 'f'}) yield read(e) yield read(f) e = BatchedStream(e, 0) f = BatchedStream(f, 0) assert set(s.streams) == {'e', 'f'} yield write(e, {'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'client': 'e', 'keys': ['y']}) yield write(f, {'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10))}, 'dependencies': {'x': [], 'z': ['x']}, 'client': 'f', 'keys': ['z']}) msg = yield read(e) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg = yield read(f) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_filtered_communication(s, a, b): c = yield connect(s.address) f = yield connect(s.address) yield c.write({'op': 'register-client', 'client': 'c'}) yield f.write({'op': 'register-client', 'client': 'f'}) yield c.read() yield f.read() assert set(s.client_comms) == {'c', 'f'} yield c.write({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'client': 'c', 'keys': ['y']}) yield f.write({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10))}, 'dependencies': {'x': [], 'z': ['x']}, 'client': 'f', 'keys': ['z']}) msg, = yield c.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg, = yield f.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_stress(): with echo_server() as e: comm = yield connect(e.address) L = [] @gen.coroutine def send(): b = BatchedSend(interval=3) b.start(comm) for i in range(0, 10000, 2): b.send(i) b.send(i + 1) yield gen.sleep(0.00001 * random.randint(1, 10)) @gen.coroutine def recv(): while True: result = yield gen.with_timeout(timedelta(seconds=1), comm.read()) L.extend(result) if result[-1] == 9999: break yield All([send(), recv()]) assert L == list(range(0, 10000, 1)) comm.close()
def run_traffic_jam(nsends, nbytes): # This test eats `nsends * nbytes` bytes in RAM np = pytest.importorskip('numpy') from distributed.protocol import to_serialize data = bytes(np.random.randint(0, 255, size=(nbytes, )).astype('u1').data) with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=0.01) b.start(comm) msg = {'x': to_serialize(data)} for i in range(nsends): b.send(assoc(msg, 'i', i)) if np.random.random() > 0.5: yield gen.sleep(0.001) results = [] count = 0 while len(results) < nsends: # If this times out then I think it's a backpressure issue # Somehow we're able to flood the socket so that the receiving end # loses some of our messages L = yield gen.with_timeout(timedelta(seconds=5), comm.read()) count += 1 results.extend(r['i'] for r in L) assert count == b.batch_count == e.count assert b.message_count == nsends assert results == list(range(nsends)) comm.close() # external closing yield b.close()
def test_serializers(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval='10ms', serializers=['msgpack']) b.start(comm) b.send({'x': to_serialize(123)}) b.send({'x': to_serialize('hello')}) yield gen.sleep(0.100) b.send({'x': to_serialize(lambda x: x + 1)}) with captured_logger('distributed.protocol') as sio: yield gen.sleep(0.100) value = sio.getvalue() assert 'serialize' in value assert 'type' in value assert 'function' in value msg = yield comm.read() assert msg == [{'x': 123}, {'x': 'hello'}] with pytest.raises(gen.TimeoutError): msg = yield gen.with_timeout(timedelta(milliseconds=100), comm.read())
def test_monitor_resources(): pytest.importorskip('psutil') c = Center(ip='127.0.0.1') c.listen(0) n = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') yield n._start() nn = rpc(ip=n.ip, port=n.port) assert n.process.is_alive() d = n.resource_collect() assert {'cpu_percent', 'memory_percent'}.issubset(d) assert isinstance(d['timestamp'], datetime) stream = yield connect(ip=n.ip, port=n.port) yield write(stream, {'op': 'monitor_resources', 'interval': 0.01}) for i in range(3): msg = yield read(stream) assert isinstance(msg, dict) assert {'cpu_percent', 'memory_percent'}.issubset(msg) stream.close() yield n._close() c.stop()
def test_server(s, a, b): comm = yield connect(s.address) yield comm.write({'op': 'register-client', 'client': 'ident'}) yield comm.write({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')) }, 'dependencies': { 'x': [], 'y': ['x'] }, 'keys': ['y'], 'client': 'ident' }) while True: msg = yield readone(comm) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break yield comm.write({'op': 'close-stream'}) msg = yield readone(comm) assert msg == {'op': 'stream-closed'} with pytest.raises(CommClosedError): yield readone(comm) yield comm.close()
def f(): server = Server({'ping': pingpong}) with pytest.raises(ValueError): server.port server.listen(8881) assert server.port == 8881 assert server.address == ('tcp://%s:8881' % get_ip()) for addr in ('127.0.0.1:8881', 'tcp://127.0.0.1:8881', server.address): comm = yield connect(addr) n = yield comm.write({'op': 'ping'}) assert isinstance(n, int) assert 4 <= n <= 1000 response = yield comm.read() assert response == b'pong' yield comm.write({'op': 'ping', 'close': True}) response = yield comm.read() assert response == b'pong' yield comm.close() server.stop()
def test_feed_setup_teardown(s, a, b): def setup(scheduler): return 1 def func(scheduler, state): assert state == 1 return "OK" def teardown(scheduler, state): scheduler.flag = "done" comm = yield connect(s.address) yield comm.write({ "op": "feed", "function": dumps(func), "setup": dumps(setup), "teardown": dumps(teardown), "interval": 0.01, }) for i in range(5): response = yield comm.read() assert response == "OK" yield comm.close() start = time() while not hasattr(s, "flag"): yield gen.sleep(0.01) assert time() - start < 5
def f(): server = Server({"ping": pingpong}) with pytest.raises(ValueError): server.port server.listen(8881) assert server.port == 8881 assert server.address == ("tcp://%s:8881" % get_ip()) for addr in ("127.0.0.1:8881", "tcp://127.0.0.1:8881", server.address): comm = yield connect(addr) n = yield comm.write({"op": "ping"}) assert isinstance(n, int) assert 4 <= n <= 1000 response = yield comm.read() assert response == b"pong" yield comm.write({"op": "ping", "close": True}) response = yield comm.read() assert response == b"pong" yield comm.close() server.stop()
def test_server(s, a, b): stream = yield connect('127.0.0.1', s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) stream = BatchedStream(stream, 0) stream.send({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')) }, 'dependencies': { 'x': [], 'y': ['x'] }, 'keys': ['y'], 'client': 'ident' }) while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break stream.send({'op': 'close-stream'}) msg = yield read(stream) assert msg == {'op': 'stream-closed'} assert stream.closed() stream.close()
def test_serializers(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval="10ms", serializers=["msgpack"]) b.start(comm) b.send({"x": to_serialize(123)}) b.send({"x": to_serialize("hello")}) yield gen.sleep(0.100) b.send({"x": to_serialize(lambda x: x + 1)}) with captured_logger("distributed.protocol") as sio: yield gen.sleep(0.100) value = sio.getvalue() assert "serialize" in value assert "type" in value assert "function" in value msg = yield comm.read() assert list(msg) == [{"x": 123}, {"x": "hello"}] with pytest.raises(gen.TimeoutError): msg = yield gen.with_timeout(timedelta(milliseconds=100), comm.read())
def test_compute_stream(s, a, b): stream = yield connect(a.ip, a.port) yield write(stream, {'op': 'compute-stream'}) msgs = [{ 'op': 'compute-task', 'function': dumps(inc), 'args': dumps((i, )), 'key': 'x-%d' % i } for i in range(10)] bstream = BatchedStream(stream, 0) for msg in msgs[:5]: yield write(stream, msg) for i in range(5): msg = yield read(bstream) assert msg['status'] == 'OK' assert msg['key'][0] == 'x' for msg in msgs[5:]: yield write(stream, msg) for i in range(5): msg = yield read(bstream) assert msg['status'] == 'OK' assert msg['key'][0] == 'x' yield write(stream, {'op': 'close'})
def f(): reader, writer = yield from connect('127.0.0.1', 8006, loop=loop) cc = rpc(reader, writer) response = yield from cc.register(address='alice', ncores=4) assert 'alice' in c.has_what assert c.ncores['alice'] == 4 response = yield from cc.add_keys(address='alice', keys=['x', 'y']) assert response == b'OK' response = yield from cc.register(address='bob', ncores=4) response = yield from cc.add_keys(address='bob', keys=['y', 'z']) assert response == b'OK' response = yield from cc.who_has(keys=['x', 'y']) assert response == {'x': set(['alice']), 'y': set(['alice', 'bob'])} response = yield from cc.remove_keys(address='bob', keys=['y']) assert response == b'OK' response = yield from cc.has_what(keys=['alice', 'bob']) assert response == {'alice': set(['x', 'y']), 'bob': set(['z'])} response = yield from cc.ncores() assert response == {'alice': 4, 'bob': 4} response = yield from cc.unregister(address='alice', close=True) assert response == b'OK' assert 'alice' not in c.has_what assert 'alice' not in c.ncores yield from c._close()
def test_feed_setup_teardown(s, a, b): def setup(scheduler): return 1 def func(scheduler, state): assert state == 1 return 'OK' def teardown(scheduler, state): scheduler.flag = 'done' stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'feed', 'function': dumps(func), 'setup': dumps(setup), 'teardown': dumps(teardown), 'interval': 0.01}) for i in range(5): response = yield read(stream) assert response == 'OK' stream.close() start = time() while not hasattr(s, 'flag'): yield gen.sleep(0.01) assert time() - start < 5
def test_serializers(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval='10ms', serializers=['msgpack']) b.start(comm) b.send({'x': to_serialize(123)}) b.send({'x': to_serialize('hello')}) yield gen.sleep(0.100) b.send({'x': to_serialize(lambda x: x + 1)}) with captured_logger('distributed.protocol') as sio: yield gen.sleep(0.100) value = sio.getvalue() assert 'serialize' in value assert 'type' in value assert 'function' in value msg = yield comm.read() assert list(msg) == [{'x': 123}, {'x': 'hello'}] with pytest.raises(gen.TimeoutError): msg = yield gen.with_timeout(timedelta(milliseconds=100), comm.read())
def test_feed_setup_teardown(s, a, b): def setup(scheduler): return 1 def func(scheduler, state): assert state == 1 return 'OK' def teardown(scheduler, state): scheduler.flag = 'done' comm = yield connect(s.address) yield comm.write({ 'op': 'feed', 'function': dumps(func), 'setup': dumps(setup), 'teardown': dumps(teardown), 'interval': 0.01 }) for i in range(5): response = yield comm.read() assert response == 'OK' yield comm.close() start = time() while not hasattr(s, 'flag'): yield gen.sleep(0.01) assert time() - start < 5
def run_traffic_jam(nsends, nbytes): # This test eats `nsends * nbytes` bytes in RAM np = pytest.importorskip('numpy') from distributed.protocol import to_serialize data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data) with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=0.01) b.start(comm) msg = {'x': to_serialize(data)} for i in range(nsends): b.send(assoc(msg, 'i', i)) if np.random.random() > 0.5: yield gen.sleep(0.001) results = [] count = 0 while len(results) < nsends: # If this times out then I think it's a backpressure issue # Somehow we're able to flood the socket so that the receiving end # loses some of our messages L = yield gen.with_timeout(timedelta(seconds=5), comm.read()) count += 1 results.extend(r['i'] for r in L) assert count == b.batch_count == e.count assert b.message_count == nsends assert results == list(range(nsends)) comm.close() # external closing yield b.close()
def test_close_twice(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.start(comm) yield b.close() yield b.close()
def test_filtered_communication(s, a, b): e = yield connect(ip=s.ip, port=s.port) f = yield connect(ip=s.ip, port=s.port) yield write(e, {'op': 'register-client', 'client': 'e'}) yield write(f, {'op': 'register-client', 'client': 'f'}) yield read(e) yield read(f) assert set(s.streams) == {'e', 'f'} yield write( e, { 'op': 'update-graph', 'tasks': { 'x': (inc, 1), 'y': (inc, 'x') }, 'dependencies': { 'x': set(), 'y': {'x'} }, 'client': 'e', 'keys': ['y'] }) yield write( f, { 'op': 'update-graph', 'tasks': { 'x': (inc, 1), 'z': (add, 'x', 10) }, 'dependencies': { 'x': set(), 'z': {'x'} }, 'client': 'f', 'keys': ['z'] }) msg = yield read(e) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg = yield read(f) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_filtered_communication(s, a, b): c = yield connect(s.address) f = yield connect(s.address) yield c.write({"op": "register-client", "client": "c"}) yield f.write({"op": "register-client", "client": "f"}) yield c.read() yield f.read() assert set(s.client_comms) == {"c", "f"} yield c.write({ "op": "update-graph", "tasks": { "x": dumps_task((inc, 1)), "y": dumps_task((inc, "x")) }, "dependencies": { "x": [], "y": ["x"] }, "client": "c", "keys": ["y"], }) yield f.write({ "op": "update-graph", "tasks": { "x": dumps_task((inc, 1)), "z": dumps_task((add, "x", 10)) }, "dependencies": { "x": [], "z": ["x"] }, "client": "f", "keys": ["z"], }) msg, = yield c.read() assert msg["op"] == "key-in-memory" assert msg["key"] == "y" msg, = yield f.read() assert msg["op"] == "key-in-memory" assert msg["key"] == "z"
def test_filtered_communication(s, a, b): c = yield connect(s.address) f = yield connect(s.address) yield c.write({'op': 'register-client', 'client': 'c'}) yield f.write({'op': 'register-client', 'client': 'f'}) yield c.read() yield f.read() assert set(s.comms) == {'c', 'f'} yield c.write({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')) }, 'dependencies': { 'x': [], 'y': ['x'] }, 'client': 'c', 'keys': ['y'] }) yield f.write({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10)) }, 'dependencies': { 'x': [], 'z': ['x'] }, 'client': 'f', 'keys': ['z'] }) msg, = yield c.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg, = yield f.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_deserialize_error(): server = Server({'throws': throws}) server.listen(0) comm = yield connect(server.address, deserialize=False) with pytest.raises(Exception) as info: yield send_recv(comm, op='throws') assert type(info.value) == Exception for c in str(info.value): assert c.isalpha() or c in "(',!)" # no crazy bytestrings
def test_close_closed(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.start(comm) b.send(123) comm.close() # external closing yield b.close()
def test_send_before_start(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.send('hello') b.send('world') b.start(comm) result = yield comm.read() assert result == ('hello', 'world')
def test_feed(s, a, b): def func(scheduler): return scheduler.processing, scheduler.stacks stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'feed', 'function': func, 'interval': 0.01}) for i in range(5): response = yield read(stream) expected = s.processing, s.stacks stream.close()
def test_send_before_start(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.send('hello') b.send('world') b.start(comm) result = yield comm.read() assert result == ['hello', 'world']
def test_send_after_stream_start(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.start(comm) b.send('hello') b.send('world') result = yield comm.read() if len(result) < 2: result += yield comm.read() assert result == ('hello', 'world')
def test_feed(s, a, b): def func(scheduler): return dumps(dict(scheduler.worker_info)) comm = yield connect(s.address) yield comm.write({"op": "feed", "function": dumps(func), "interval": 0.01}) for i in range(5): response = yield comm.read() expected = dict(s.worker_info) assert cloudpickle.loads(response) == expected yield comm.close()
def test_feed(s, a, b): def func(scheduler): return dumps(scheduler.processing) comm = yield connect(s.address) yield comm.write({'op': 'feed', 'function': dumps(func), 'interval': 0.01}) for i in range(5): response = yield comm.read() expected = s.processing assert cloudpickle.loads(response) == expected yield comm.close()
def test_send_after_stream_start(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.start(comm) b.send('hello') b.send('world') result = yield comm.read() if len(result) < 2: result += yield comm.read() assert result == ['hello', 'world']
def test_close_closed(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.start(comm) b.send(123) comm.close() # external closing yield b.close() assert 'closed' in repr(b) assert 'closed' in str(b)
def processing(): with log_errors(): from distributed.diagnostics.scheduler import processing stream = yield connect(ip=options["host"], port=options["tcp-port"]) yield write(stream, {"op": "feed", "function": dumps(processing), "interval": 0.200}) while True: try: msg = yield read(stream) except StreamClosedError: break else: messages["processing"] = msg
def processing(): with log_errors(): from distributed.diagnostics.scheduler import processing stream = yield connect(ip=options['host'], port=options['tcp-port']) yield write(stream, {'op': 'feed', 'function': dumps(processing), 'interval': 0.200}) while True: try: msg = yield read(stream) except StreamClosedError: break else: messages['processing'] = msg
def f(): server = Server({"ping": pingpong}, blocked_handlers=["ping"]) server.listen(8881) comm = yield connect(server.address) yield comm.write({"op": "ping"}) msg = yield comm.read() assert "exception" in msg assert isinstance(msg["exception"], ValueError) assert "'ping' handler has been explicitly disallowed" in repr(msg["exception"]) comm.close() server.stop()
def test_feed(s, a, b): def func(scheduler): return dumps((scheduler.processing, scheduler.stacks)) stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'feed', 'function': dumps(func), 'interval': 0.01}) for i in range(5): response = yield read(stream) expected = s.processing, s.stacks assert cloudpickle.loads(response) == expected close(stream)
def processing(): with log_errors(): from distributed.diagnostics.scheduler import processing addr = options['scheduler-address'] comm = yield connect(addr) yield comm.write({'op': 'feed', 'function': dumps(processing), 'interval': 0.200}) while True: try: msg = yield comm.read() except CommClosedError: break else: messages['processing'] = msg
def f(): server = Server({"ping": pingpong}) server.listen(8887) stream = yield connect("127.0.0.1", 8887) yield write(stream, {"op": "ping"}) response = yield read(stream) assert response == b"pong" yield write(stream, {"op": "ping", "close": True}) response = yield read(stream) assert response == b"pong" server.stop()
def test_feed(s, a, b): def func(scheduler): return dumps(dict(scheduler.worker_info)) comm = yield connect(s.address) yield comm.write({'op': 'feed', 'function': dumps(func), 'interval': 0.01}) for i in range(5): response = yield comm.read() expected = dict(s.worker_info) assert cloudpickle.loads(response) == expected yield comm.close()
def f(): server = Server({'ping': pingpong}) server.listen(8887) stream = yield connect('127.0.0.1', 8887) yield write(stream, {'op': 'ping'}) response = yield read(stream) assert response == b'pong' yield write(stream, {'op': 'ping', 'close': True}) response = yield read(stream) assert response == b'pong' server.stop()
def test_feed(s, a, b): def func(scheduler): return dumps((scheduler.processing, scheduler.stacks)) stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'feed', 'function': dumps(func), 'interval': 0.01}) for i in range(5): response = yield read(stream) expected = s.processing, s.stacks assert cloudpickle.loads(response) == expected stream.close()
def test_blocked_handlers_are_respected(s, a, b): def func(scheduler): return dumps(dict(scheduler.worker_info)) comm = yield connect(s.address) yield comm.write({"op": "feed", "function": dumps(func), "interval": 0.01}) response = yield comm.read() assert "exception" in response assert isinstance(response["exception"], ValueError) assert "'feed' handler has been explicitly disallowed" in repr( response["exception"]) yield comm.close()
def test_server(s, a, b): stream = yield connect('127.0.0.1', s.port) yield write(stream, {'op': 'start-control'}) yield write(stream, {'op': 'update-graph', 'dsk': {'x': (inc, 1), 'y': (inc, 'x')}, 'keys': ['y']}) while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break yield write(stream, {'op': 'close-stream'}) msg = yield read(stream) assert msg == {'op': 'stream-closed'} assert stream.closed()
def test_send_recv_args(): server = Server({'echo': echo}) server.listen(0) comm = yield connect(server.address) result = yield send_recv(comm, op='echo', x=b'1') assert result == b'1' assert not comm.closed() result = yield send_recv(comm, op='echo', x=b'2', reply=False) assert result is None assert not comm.closed() result = yield send_recv(comm, op='echo', x=b'3', close=True) assert result == b'3' assert comm.closed() server.stop()
def f(): server = Server({'ping': pingpong}) with pytest.raises(OSError): server.port server.listen(8887) assert server.port == 8887 stream = yield connect('127.0.0.1', 8887) yield write(stream, {'op': 'ping'}) response = yield read(stream) assert response == b'pong' yield write(stream, {'op': 'ping', 'close': True}) response = yield read(stream) assert response == b'pong' server.stop()
def test_server(s, a, b): stream = yield connect('127.0.0.1', s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) yield write(stream, {'op': 'update-graph', 'tasks': {'x': (inc, 1), 'y': (inc, 'x')}, 'dependencies': {'x': set(), 'y': {'x'}}, 'keys': ['y'], 'client': 'ident'}) while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break yield write(stream, {'op': 'close-stream'}) msg = yield read(stream) assert msg == {'op': 'stream-closed'} assert stream.closed() stream.close()
def test_feed_large_bytestring(s, a, b): np = pytest.importorskip('numpy') x = np.ones(10000000) def func(scheduler): y = x return True comm = yield connect(s.address) yield comm.write({'op': 'feed', 'function': dumps(func), 'interval': 0.05}) for i in range(5): response = yield comm.read() assert response is True yield comm.close()
def test_send_before_close(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=10) b.start(comm) cnt = int(e.count) b.send('hello') yield b.close() # close immediately after sending assert not b.buffer start = time() while e.count != cnt + 1: yield gen.sleep(0.01) assert time() < start + 5 with pytest.raises(CommClosedError): b.send('123')
def test_feed_large_bytestring(s, a, b): np = pytest.importorskip('numpy') x = np.ones(10000000) def func(scheduler): y = x return True stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'feed', 'function': dumps(func), 'interval': 0.01}) for i in range(5): response = yield read(stream) assert response == True close(stream)
def f(): nn = rpc(ip=n.ip, port=n.port) yield n._start() assert n.process.is_alive() d = n.resource_collect() assert {'cpu_percent', 'memory_percent'}.issubset(d) assert isinstance(d['timestamp'], datetime) stream = yield connect(ip=n.ip, port=n.port) yield write(stream, {'op': 'monitor_resources', 'interval': 0.01}) for i in range(3): msg = yield read(stream) assert isinstance(msg, dict) assert {'cpu_percent', 'memory_percent'}.issubset(msg) stream.close() yield n._close() c.stop()
def test_server(s, a, b): stream = yield connect('127.0.0.1', s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) yield write(stream, {'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'keys': ['y'], 'client': 'ident'}) while True: msg = yield readone(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break yield write(stream, {'op': 'close-stream'}) msg = yield readone(stream) assert msg == {'op': 'stream-closed'} with pytest.raises(StreamClosedError): yield readone(stream) close(stream)
def test_monitor_resources(s): pytest.importorskip('psutil') n = Nanny(s.ip, s.port, ncores=2, ip='127.0.0.1', loop=s.loop) yield n._start() assert isalive(n.process) d = n.resource_collect() assert {'cpu_percent', 'memory_percent'}.issubset(d) assert 'timestamp' in d stream = yield connect(ip=n.ip, port=n.port) yield write(stream, {'op': 'monitor_resources', 'interval': 0.01}) for i in range(3): msg = yield read(stream) assert isinstance(msg, dict) assert {'cpu_percent', 'memory_percent'}.issubset(msg) close(stream) yield n._close() s.stop()