def test_filtered_communication(s, a, b): e = yield connect(ip=s.ip, port=s.port) f = yield connect(ip=s.ip, port=s.port) yield write(e, {'op': 'register-client', 'client': 'e'}) yield write(f, {'op': 'register-client', 'client': 'f'}) yield read(e) yield read(f) e = BatchedStream(e, 0) f = BatchedStream(f, 0) assert set(s.streams) == {'e', 'f'} yield write(e, {'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'client': 'e', 'keys': ['y']}) yield write(f, {'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10))}, 'dependencies': {'x': [], 'z': ['x']}, 'client': 'f', 'keys': ['z']}) msg = yield read(e) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg = yield read(f) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_filtered_communication(s, a, b): c = yield connect(s.address) f = yield connect(s.address) yield c.write({'op': 'register-client', 'client': 'c'}) yield f.write({'op': 'register-client', 'client': 'f'}) yield c.read() yield f.read() assert set(s.client_comms) == {'c', 'f'} yield c.write({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'client': 'c', 'keys': ['y']}) yield f.write({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10))}, 'dependencies': {'x': [], 'z': ['x']}, 'client': 'f', 'keys': ['z']}) msg, = yield c.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg, = yield f.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_diagnostic(s, a, b): sched, report = Queue(), Queue(); s.handle_queues(sched, report) msg = yield report.get(); assert msg['op'] == 'stream-start' class Counter(SchedulerPlugin): def start(self, scheduler): scheduler.add_plugin(self) self.count = 0 def task_finished(self, scheduler, key, worker, nbytes): self.count += 1 counter = Counter() counter.start(s) assert counter.count == 0 sched.put_nowait({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')), 'z': dumps_task((inc, 'y'))}, 'dependencies': {'y': ['x'], 'z': ['y']}, 'keys': ['z']}) while True: msg = yield report.get() if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break assert counter.count == 3
def test_update_graph_culls(s, a, b): s.update_graph(tasks={'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')), 'z': dumps_task((inc, 2))}, keys=['y'], dependencies={'y': 'x', 'x': [], 'z': []}, client='client') assert 'z' not in s.tasks assert 'z' not in s.dependencies
def test_dumps_task(): d = dumps_task((inc, 1)) assert set(d) == {"function", "args"} f = lambda x, y=2: x + y d = dumps_task((apply, f, (1,), {"y": 10})) assert cloudpickle.loads(d["function"])(1, 2) == 3 assert cloudpickle.loads(d["args"]) == (1,) assert cloudpickle.loads(d["kwargs"]) == {"y": 10} d = dumps_task((apply, f, (1,))) assert cloudpickle.loads(d["function"])(1, 2) == 3 assert cloudpickle.loads(d["args"]) == (1,) assert set(d) == {"function", "args"}
def test_dumps_task(): d = dumps_task((inc, 1)) assert set(d) == {'function', 'args'} f = lambda x, y=2: x + y d = dumps_task((apply, f, (1, ), {'y': 10})) assert cloudpickle.loads(d['function'])(1, 2) == 3 assert cloudpickle.loads(d['args']) == (1, ) assert cloudpickle.loads(d['kwargs']) == {'y': 10} d = dumps_task((apply, f, (1, ))) assert cloudpickle.loads(d['function'])(1, 2) == 3 assert cloudpickle.loads(d['args']) == (1, ) assert set(d) == {'function', 'args'}
def test_remove_client(s, a, b): s.update_graph(tasks={'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, dependencies={'x': [], 'y': ['x']}, keys=['y'], client='ident') assert s.tasks assert s.dependencies s.remove_client(client='ident') assert not s.tasks assert not s.dependencies
def test_dumps_task(): d = dumps_task((inc, 1)) assert set(d) == {'function', 'args'} f = lambda x, y=2: x + y d = dumps_task((apply, f, (1,), {'y': 10})) assert cloudpickle.loads(d['function'])(1, 2) == 3 assert cloudpickle.loads(d['args']) == (1,) assert cloudpickle.loads(d['kwargs']) == {'y': 10} d = dumps_task((apply, f, (1,))) assert cloudpickle.loads(d['function'])(1, 2) == 3 assert cloudpickle.loads(d['args']) == (1,) assert set(d) == {'function', 'args'}
def test_remove_client(s, a, b): s.update_graph( tasks={"x": dumps_task((inc, 1)), "y": dumps_task((inc, "x"))}, dependencies={"x": [], "y": ["x"]}, keys=["y"], client="ident", ) assert s.tasks assert s.dependencies s.remove_client(client="ident") assert not s.tasks assert not s.dependencies
def test_update_graph_culls(s, a, b): s.update_graph(tasks={ 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')), 'z': dumps_task((inc, 2)) }, keys=['y'], dependencies={ 'y': 'x', 'x': [], 'z': [] }, client='client') assert 'z' not in s.tasks assert 'z' not in s.dependencies
def test_filtered_communication(s, a, b): e = yield connect(ip=s.ip, port=s.port) f = yield connect(ip=s.ip, port=s.port) yield write(e, {'op': 'register-client', 'client': 'e'}) yield write(f, {'op': 'register-client', 'client': 'f'}) yield read(e) yield read(f) e = BatchedStream(e, 0) f = BatchedStream(f, 0) assert set(s.streams) == {'e', 'f'} yield write( e, { 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')) }, 'dependencies': { 'x': [], 'y': ['x'] }, 'client': 'e', 'keys': ['y'] }) yield write( f, { 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10)) }, 'dependencies': { 'x': [], 'z': ['x'] }, 'client': 'f', 'keys': ['z'] }) msg = yield read(e) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg = yield read(f) assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_scheduler_as_center(): s = Scheduler() done = s.start(0) a = Worker('127.0.0.1', s.port, ip='127.0.0.1', ncores=1) a.data.update({'x': 1, 'y': 2}) b = Worker('127.0.0.1', s.port, ip='127.0.0.1', ncores=2) b.data.update({'y': 2, 'z': 3}) c = Worker('127.0.0.1', s.port, ip='127.0.0.1', ncores=3) yield [w._start(0) for w in [a, b, c]] assert s.ncores == {w.address: w.ncores for w in [a, b, c]} assert s.who_has == {'x': {a.address}, 'y': {a.address, b.address}, 'z': {b.address}} s.update_graph(tasks={'a': dumps_task((inc, 1))}, keys=['a'], dependencies={'a': []}) start = time() while not s.who_has['a']: assert time() - start < 5 yield gen.sleep(0.01) assert 'a' in a.data or 'a' in b.data or 'a' in c.data with ignoring(StreamClosedError): yield [w._close() for w in [a, b, c]] assert s.ncores == {} assert s.who_has == {} yield s.close()
def test_scheduler_as_center(): s = Scheduler(validate=True) done = s.start(0) a = Worker(s.address, ncores=1) a.data.update({'x': 1, 'y': 2}) b = Worker(s.address, ncores=2) b.data.update({'y': 2, 'z': 3}) c = Worker(s.address, ncores=3) yield [w._start(0) for w in [a, b, c]] assert s.ncores == {w.address: w.ncores for w in [a, b, c]} assert not s.who_has s.update_graph(tasks={'a': dumps_task((inc, 1))}, keys=['a'], dependencies={'a': []}) start = time() while not 'a' in s.who_has: assert time() - start < 5 yield gen.sleep(0.01) assert 'a' in a.data or 'a' in b.data or 'a' in c.data yield [w._close() for w in [a, b, c]] assert s.ncores == {} assert s.who_has == {} yield s.close()
def test_scheduler_as_center(): s = Scheduler() done = s.start(0) a = Worker('127.0.0.1', s.port, ip='127.0.0.1', ncores=1) a.data.update({'x': 1, 'y': 2}) b = Worker('127.0.0.1', s.port, ip='127.0.0.1', ncores=2) b.data.update({'y': 2, 'z': 3}) c = Worker('127.0.0.1', s.port, ip='127.0.0.1', ncores=3) yield [w._start(0) for w in [a, b, c]] assert s.ncores == {w.address: w.ncores for w in [a, b, c]} assert s.who_has == { 'x': {a.address}, 'y': {a.address, b.address}, 'z': {b.address} } s.update_graph(tasks={'a': dumps_task((inc, 1))}, keys=['a'], dependencies={'a': []}) start = time() while not s.who_has['a']: assert time() - start < 5 yield gen.sleep(0.01) assert 'a' in a.data or 'a' in b.data or 'a' in c.data with ignoring(StreamClosedError): yield [w._close() for w in [a, b, c]] assert s.ncores == {} assert s.who_has == {} yield s.close()
def test_ready_add_worker(s, a, b): s.add_client(client='client') s.add_worker(address=alice, coerce_address=False) s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: set() for i in range(20)})
def test_update_graph_culls(s, a, b): s.update_graph( tasks={ "x": dumps_task((inc, 1)), "y": dumps_task((inc, "x")), "z": dumps_task((inc, 2)), }, keys=["y"], dependencies={ "y": "x", "x": [], "z": [] }, client="client", ) assert "z" not in s.tasks assert "z" not in s.dependencies
def test_filtered_communication(s, a, b): c = yield connect(s.address) f = yield connect(s.address) yield c.write({'op': 'register-client', 'client': 'c'}) yield f.write({'op': 'register-client', 'client': 'f'}) yield c.read() yield f.read() assert set(s.comms) == {'c', 'f'} yield c.write({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')) }, 'dependencies': { 'x': [], 'y': ['x'] }, 'client': 'c', 'keys': ['y'] }) yield f.write({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'z': dumps_task((add, 'x', 10)) }, 'dependencies': { 'x': [], 'z': ['x'] }, 'client': 'f', 'keys': ['z'] }) msg, = yield c.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'y' msg, = yield f.read() assert msg['op'] == 'key-in-memory' assert msg['key'] == 'z'
def test_filtered_communication(s, a, b): c = yield connect(s.address) f = yield connect(s.address) yield c.write({"op": "register-client", "client": "c"}) yield f.write({"op": "register-client", "client": "f"}) yield c.read() yield f.read() assert set(s.client_comms) == {"c", "f"} yield c.write({ "op": "update-graph", "tasks": { "x": dumps_task((inc, 1)), "y": dumps_task((inc, "x")) }, "dependencies": { "x": [], "y": ["x"] }, "client": "c", "keys": ["y"], }) yield f.write({ "op": "update-graph", "tasks": { "x": dumps_task((inc, 1)), "z": dumps_task((add, "x", 10)) }, "dependencies": { "x": [], "z": ["x"] }, "client": "f", "keys": ["z"], }) msg, = yield c.read() assert msg["op"] == "key-in-memory" assert msg["key"] == "y" msg, = yield f.read() assert msg["op"] == "key-in-memory" assert msg["key"] == "z"
def test_diagnostic(s, a, b): sched, report = Queue(), Queue() s.handle_queues(sched, report) msg = yield report.get() assert msg['op'] == 'stream-start' class Counter(SchedulerPlugin): def start(self, scheduler): scheduler.add_plugin(self) self.count = 0 def task_finished(self, scheduler, key, worker, nbytes, **kwargs): self.count += 1 counter = Counter() counter.start(s) assert counter in s.plugins assert counter.count == 0 sched.put_nowait({ 'op': 'update-graph', 'tasks': { 'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x')), 'z': dumps_task((inc, 'y')) }, 'dependencies': { 'y': ['x'], 'z': ['y'] }, 'keys': ['z'] }) while True: msg = yield report.get() if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break assert counter.count == 3 s.remove_plugin(counter) assert counter not in s.plugins
def test_server(s, a, b): comm = yield connect(s.address) yield comm.write({'op': 'register-client', 'client': 'ident'}) yield comm.write({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'keys': ['y'], 'client': 'ident'}) while True: msg = yield readone(comm) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break yield comm.write({'op': 'close-stream'}) msg = yield readone(comm) assert msg == {'op': 'stream-closed'} with pytest.raises(CommClosedError): yield readone(comm) yield comm.close()
def test_ready_remove_worker(s, a, b): s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: [] for i in range(20)}) assert all(len(w.processing) > w.ncores for w in s.workers.values()) s.remove_worker(address=a.address) assert set(s.workers) == {b.address} assert all(len(w.processing) > w.ncores for w in s.workers.values())
def test_server(s, a, b): stream = yield connect('127.0.0.1', s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) stream = BatchedStream(stream, 0) stream.send({'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'keys': ['y'], 'client': 'ident'}) while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break stream.send({'op': 'close-stream'}) msg = yield read(stream) assert msg == {'op': 'stream-closed'} assert stream.closed() stream.close()
def test_server(s, a, b): stream = yield connect('127.0.0.1', s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) yield write(stream, {'op': 'update-graph', 'tasks': {'x': dumps_task((inc, 1)), 'y': dumps_task((inc, 'x'))}, 'dependencies': {'x': [], 'y': ['x']}, 'keys': ['y'], 'client': 'ident'}) while True: msg = yield readone(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'y': break yield write(stream, {'op': 'close-stream'}) msg = yield readone(stream) assert msg == {'op': 'stream-closed'} with pytest.raises(StreamClosedError): yield readone(stream) close(stream)
def test_multi_queues(s, a, b): sched, report = Queue(), Queue() s.handle_queues(sched, report) msg = yield report.get() assert msg['op'] == 'stream-start' # Test update graph sched.put_nowait({ 'op': 'update-graph', 'tasks': valmap(dumps_task, { 'x': (inc, 1), 'y': (inc, 'x'), 'z': (inc, 'y') }), 'dependencies': { 'x': [], 'y': ['x'], 'z': ['y'] }, 'keys': ['z'] }) while True: msg = yield report.get() if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break slen, rlen = len(s.scheduler_queues), len(s.report_queues) sched2, report2 = Queue(), Queue() s.handle_queues(sched2, report2) assert slen + 1 == len(s.scheduler_queues) assert rlen + 1 == len(s.report_queues) sched2.put_nowait({ 'op': 'update-graph', 'tasks': { 'a': dumps_task((inc, 10)) }, 'dependencies': { 'a': [] }, 'keys': ['a'] }) for q in [report, report2]: while True: msg = yield q.get() if msg['op'] == 'key-in-memory' and msg['key'] == 'a': break
def test_update_state_supports_recomputing_released_results(s): s.add_worker(address=alice, ncores=1, coerce_address=False) s.update_graph(tasks={'x': 1, 'y': dumps_task((inc, 'x')), 'z': dumps_task((inc, 'x'))}, keys=['z'], dependencies={'y': {'x'}, 'x': set(), 'z': {'y'}}, client='client') s.ensure_occupied() r = s.transition('x', 'memory', nbytes=10, type=dumps(int), compute_start=10, compute_stop=11, worker=alice) s.transitions(r) s.ensure_occupied() r = s.transition('y', 'memory', nbytes=10, type=dumps(int), compute_start=10, compute_stop=11, worker=alice) s.transitions(r) s.ensure_occupied() r = s.transition('z', 'memory', nbytes=10, type=dumps(int), compute_start=10, compute_stop=11, worker=alice) s.transitions(r) s.ensure_occupied() assert not s.waiting assert not s.ready assert s.waiting_data == {'z': set()} assert s.who_has == {'z': {alice}} s.update_graph(tasks={'x': 1, 'y': dumps_task((inc, 'x'))}, keys=['y'], dependencies={'y': {'x'}, 'x': set()}, client='client') assert s.waiting == {'y': {'x'}} assert s.waiting_data == {'x': {'y'}, 'y': set(), 'z': set()} assert s.who_wants == {'z': {'client'}, 'y': {'client'}} assert s.wants_what == {'client': {'y', 'z'}} assert set(s.processing[alice]) == {'x'}
def test_update_state_with_processing(s): s.add_worker(address=alice, ncores=1, coerce_address=False) s.update_graph(tasks={'x': 1, 'y': dumps_task((inc, 'x')), 'z': dumps_task((inc, 'y'))}, keys=['z'], dependencies={'y': {'x'}, 'x': set(), 'z': {'y'}}, client='client') s.ensure_occupied() r = s.transition('x', 'memory', nbytes=10, type=dumps(int), compute_start=10, compute_stop=11, worker=alice) s.transitions(r) s.ensure_occupied() assert s.waiting == {'z': {'y'}} assert s.waiting_data == {'x': {'y'}, 'y': {'z'}, 'z': set()} assert list(s.ready) == [] assert s.who_wants == {'z': {'client'}} assert s.wants_what == {'client': {'z'}} assert s.who_has == {'x': {alice}} s.update_graph(tasks={'a': dumps_task((inc, 'x')), 'b': (add,'a','y'), 'c': dumps_task((inc, 'z'))}, keys=['b', 'c'], dependencies={'a': {'x'}, 'b': {'a', 'y'}, 'c': {'z'}}, client='client') assert s.waiting == {'z': {'y'}, 'b': {'a', 'y'}, 'c': {'z'}} assert 'a' in s.stacks[alice] or 'a' in s.processing[alice] assert not s.ready assert s.waiting_data == {'x': {'y', 'a'}, 'y': {'z', 'b'}, 'z': {'c'}, 'a': {'b'}, 'b': set(), 'c': set()} assert s.who_wants == {'b': {'client'}, 'c': {'client'}, 'z': {'client'}} assert s.wants_what == {'client': {'b', 'c', 'z'}}
def test_ready_remove_worker(s, a, b): s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: [] for i in range(20)}) assert all(len(s.processing[w]) >= s.ncores[w] for w in s.ncores) s.remove_worker(address=a.address) for collection in [s.ncores, s.processing]: assert set(collection) == {b.address} assert all(len(s.processing[w]) >= s.ncores[w] for w in s.ncores) assert set(s.processing) == {b.address}
def test_TextProgressBar_error(s, a, b): s.update_graph(tasks={'x': dumps_task((div, 1, 0))}, keys=['x'], dependencies={}) progress = TextProgressBar(['x'], scheduler=(s.ip, s.port), start=False, interval=0.01) yield progress.listen() assert progress.status == 'error' assert progress.stream.closed() progress = TextProgressBar(['x'], scheduler=(s.ip, s.port), start=False, interval=0.01) yield progress.listen() assert progress.status == 'error' assert progress.stream.closed()
def test_ready_add_worker(s, a, b): s.add_client(client='client') s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: [] for i in range(20)}) assert all(len(s.processing[w]) == s.ncores[w] for w in s.ncores) assert len(s.ready) + sum(map(len, s.processing.values())) == 20 w = Worker(s.ip, s.port, ncores=3, ip='127.0.0.1') w.listen(0) s.add_worker(address=w.address, ncores=w.ncores, coerce_address=False) assert w.address in s.ncores assert all(len(s.processing[w]) == s.ncores[w] for w in s.ncores) assert len(s.ready) + sum(map(len, s.processing.values())) == 20
def test_restart(s, a, b): s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: [] for i in range(20)}) assert len(s.ready) + sum(map(len, s.processing.values())) == 20 assert s.ready yield s.restart() for c in [s.stacks, s.processing, s.ncores]: assert len(c) == 2 for c in [s.stacks, s.processing]: assert not any(v for v in c.values()) assert not s.ready assert not s.tasks assert not s.dependencies
def test_ready_remove_worker(s, a, b): s.add_client(client='client') s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: [] for i in range(20)}) assert all(len(s.processing[w]) >= s.ncores[w] for w in s.ncores) assert not any(stack for stack in s.stacks.values()) assert len(s.ready) + sum(map(len, s.processing.values())) == 20 s.remove_worker(address=a.address) for collection in [s.ncores, s.stacks, s.processing]: assert set(collection) == {b.address} assert all(len(s.processing[w]) >= s.ncores[w] for w in s.ncores) assert set(s.processing) == {b.address} assert not any(stack for stack in s.stacks.values()) assert len(s.ready) + sum(map(len, s.processing.values())) == 20
def test_restart(s, a, b): s.add_client(client='client') s.update_graph(tasks={'x-%d' % i: dumps_task((inc, i)) for i in range(20)}, keys=['x-%d' % i for i in range(20)], client='client', dependencies={'x-%d' % i: [] for i in range(20)}) assert len(s.ready) + sum(map(len, s.processing.values())) == 20 assert s.ready yield s.restart() for c in [s.stacks, s.processing, s.ncores]: assert len(c) == 2 for c in [s.stacks, s.processing]: assert not any(v for v in c.values()) assert not s.ready assert not s.tasks assert not s.dependencies
def test_multi_queues(s, a, b): sched, report = Queue(), Queue() s.handle_queues(sched, report) msg = yield report.get() assert msg['op'] == 'stream-start' # Test update graph sched.put_nowait({'op': 'update-graph', 'tasks': valmap(dumps_task, {'x': (inc, 1), 'y': (inc, 'x'), 'z': (inc, 'y')}), 'dependencies': {'x': [], 'y': ['x'], 'z': ['y']}, 'keys': ['z']}) while True: msg = yield report.get() if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break slen, rlen = len(s.scheduler_queues), len(s.report_queues) sched2, report2 = Queue(), Queue() s.handle_queues(sched2, report2) assert slen + 1 == len(s.scheduler_queues) assert rlen + 1 == len(s.report_queues) sched2.put_nowait({'op': 'update-graph', 'tasks': {'a': dumps_task((inc, 10))}, 'dependencies': {'a': []}, 'keys': ['a']}) for q in [report, report2]: while True: msg = yield q.get() if msg['op'] == 'key-in-memory' and msg['key'] == 'a': break
def test_scheduler(s, a, b): stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) msg = yield readone(stream) assert msg['op'] == 'stream-start' # Test update graph yield write(stream, {'op': 'update-graph', 'tasks': valmap(dumps_task, {'x': (inc, 1), 'y': (inc, 'x'), 'z': (inc, 'y')}), 'dependencies': {'x': [], 'y': ['x'], 'z': ['y']}, 'keys': ['x', 'z'], 'client': 'ident'}) while True: msg = yield readone(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break assert a.data.get('x') == 2 or b.data.get('x') == 2 # Test erring tasks yield write(stream, {'op': 'update-graph', 'tasks': valmap(dumps_task, {'a': (div, 1, 0), 'b': (inc, 'a')}), 'dependencies': {'a': [], 'b': ['a']}, 'keys': ['a', 'b'], 'client': 'ident'}) while True: msg = yield readone(stream) if msg['op'] == 'task-erred' and msg['key'] == 'b': break # Test missing data yield write(stream, {'op': 'missing-data', 'keys': ['z']}) s.ensure_occupied() while True: msg = yield readone(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break # Test missing data without being informed for w in [a, b]: if 'z' in w.data: del w.data['z'] yield write(stream, {'op': 'update-graph', 'tasks': {'zz': dumps_task((inc, 'z'))}, 'dependencies': {'zz': ['z']}, 'keys': ['zz'], 'client': 'ident'}) while True: msg = yield readone(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'zz': break write(stream, {'op': 'close'}) close(stream)
def test_scheduler(s, a, b): stream = yield connect(s.ip, s.port) yield write(stream, {'op': 'register-client', 'client': 'ident'}) stream = BatchedStream(stream, 10) msg = yield read(stream) assert msg['op'] == 'stream-start' # Test update graph yield write(stream, {'op': 'update-graph', 'tasks': valmap(dumps_task, {'x': (inc, 1), 'y': (inc, 'x'), 'z': (inc, 'y')}), 'dependencies': {'x': [], 'y': ['x'], 'z': ['y']}, 'keys': ['x', 'z'], 'client': 'ident'}) while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break assert a.data.get('x') == 2 or b.data.get('x') == 2 # Test erring tasks yield write(stream, {'op': 'update-graph', 'tasks': valmap(dumps_task, {'a': (div, 1, 0), 'b': (inc, 'a')}), 'dependencies': {'a': [], 'b': ['a']}, 'keys': ['a', 'b'], 'client': 'ident'}) while True: msg = yield read(stream) if msg['op'] == 'task-erred' and msg['key'] == 'b': break # Test missing data yield write(stream, {'op': 'missing-data', 'keys': ['z']}) s.ensure_idle_ready() while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'z': break # Test missing data without being informed for w in [a, b]: if 'z' in w.data: del w.data['z'] yield write(stream, {'op': 'update-graph', 'tasks': {'zz': dumps_task((inc, 'z'))}, 'dependencies': {'zz': ['z']}, 'keys': ['zz'], 'client': 'ident'}) while True: msg = yield read(stream) if msg['op'] == 'key-in-memory' and msg['key'] == 'zz': break write(stream, {'op': 'close'}) stream.close()