def test_server(loop): @gen.coroutine def f(): server = Server({'ping': pingpong}) with pytest.raises(OSError): server.port server.listen(8887) assert server.port == 8887 stream = yield connect('127.0.0.1', 8887) n = yield write(stream, {'op': 'ping'}) assert isinstance(n, int) assert 4 <= n <= 1000 response = yield read(stream) assert response == b'pong' yield write(stream, {'op': 'ping', 'close': True}) response = yield read(stream) assert response == b'pong' server.stop() loop.run_sync(f)
def test_monitor_resources(loop): c = Center('127.0.0.1', 8026) n = Nanny('127.0.0.1', 8027, 8028, '127.0.0.1', 8026, ncores=2) c.listen(c.port) @gen.coroutine def f(): nn = rpc(ip=n.ip, port=n.port) yield n._start() assert n.process.is_alive() d = n.resource_collect() assert {'cpu_percent', 'memory_percent'}.issubset(d) assert isinstance(d['timestamp'], datetime) stream = yield connect(ip=n.ip, port=n.port) yield write(stream, {'op': 'monitor_resources', 'interval': 0.01}) for i in range(3): msg = yield read(stream) assert isinstance(msg, dict) assert {'cpu_percent', 'memory_percent'}.issubset(msg) stream.close() yield n._close() c.stop() loop.run_sync(f)
def test_fast_kill(loop): from distributed import Nanny, rpc c = Center('127.0.0.1', 8006) a = Nanny('127.0.0.1', 8007, 8008, '127.0.0.1', 8006, ncores=2) b = Nanny('127.0.0.1', 8009, 8010, '127.0.0.1', 8006, ncores=2) e = Executor((c.ip, c.port), start=False, loop=loop) c.listen(c.port) @gen.coroutine def f(): yield a._start() yield b._start() while len(c.ncores) < 2: yield gen.sleep(0.01) yield e._start() L = e.map(sleep, range(10)) try: start = time() yield e._restart() assert time() - start < 5 assert all(x.status == 'cancelled' for x in L) x = e.submit(inc, 1) result = yield x._result() assert result == 2 finally: yield a._close() yield b._close() yield e._shutdown(fast=True) c.stop() loop.run_sync(f)
def test_All(loop): @gen.coroutine def throws(): 1 / 0 @gen.coroutine def slow(): yield gen.sleep(10) @gen.coroutine def inc(x): raise gen.Return(x + 1) @gen.coroutine def f(): results = yield All(*[inc(i) for i in range(10)]) assert results == list(range(1, 11)) start = time() for tasks in [[throws(), slow()], [slow(), throws()]]: try: yield All(tasks) assert False except ZeroDivisionError: pass end = time() assert end - start < 10 loop.run_sync(f)
def test_All(loop): @gen.coroutine def throws(): 1 / 0 @gen.coroutine def slow(): yield gen.sleep(10) @gen.coroutine def inc(x): raise gen.Return(x + 1) @gen.coroutine def f(): results = yield All([inc(i) for i in range(10)]) assert results == list(range(1, 11)) start = time() for tasks in [[throws(), slow()], [slow(), throws()]]: try: yield All(tasks) assert False except ZeroDivisionError: pass end = time() assert end - start < 10 loop.run_sync(f)
def test_All(loop): async def throws(): 1 / 0 async def slow(): await asyncio.sleep(10) async def inc(x): return x + 1 async def f(): results = await All([inc(i) for i in range(10)]) assert results == list(range(1, 11)) start = time() for tasks in [[throws(), slow()], [slow(), throws()]]: try: await All(tasks) assert False except ZeroDivisionError: pass end = time() assert end - start < 10 loop.run_sync(f)
def test_compute_who_has(loop): @gen.coroutine def f(): c = Center(ip='127.0.0.1') c.listen(0) x = Worker(c.ip, c.port, ip='127.0.0.1') y = Worker(c.ip, c.port, ip='127.0.0.1') z = Worker(c.ip, c.port, ip='127.0.0.1') x.data['a'] = 1 y.data['a'] = 2 yield [x._start(), y._start(), z._start()] zz = rpc(ip=z.ip, port=z.port) yield zz.compute(function=dumps(inc), args=dumps(('a', )), who_has={'a': [x.address]}, key='b') assert z.data['b'] == 2 yield zz.compute(function=dumps(inc), args=dumps(('a', )), who_has={'a': [y.address]}, key='c') assert z.data['c'] == 3 yield [x._close(), y._close(), z._close()] zz.close_streams() loop.run_sync(f, timeout=5)
def test_server(loop): """ Simple Server test. """ @gen.coroutine def f(): server = Server({'ping': pingpong}) with pytest.raises(ValueError): server.port server.listen(8881) assert server.port == 8881 assert server.address == ('tcp://%s:8881' % get_ip()) for addr in ('127.0.0.1:8881', 'tcp://127.0.0.1:8881', server.address): comm = yield connect(addr) n = yield comm.write({'op': 'ping'}) assert isinstance(n, int) assert 4 <= n <= 1000 response = yield comm.read() assert response == b'pong' yield comm.write({'op': 'ping', 'close': True}) response = yield comm.read() assert response == b'pong' yield comm.close() server.stop() loop.run_sync(f)
def test_retry_does_retry_and_sleep(loop): # test the retry and sleep pattern of `retry` n_calls = 0 class MyEx(Exception): pass async def coro(): nonlocal n_calls n_calls += 1 raise MyEx(f"RT_ERROR {n_calls}") sleep_calls = [] async def my_sleep(amount): sleep_calls.append(amount) return with mock.patch("asyncio.sleep", my_sleep): with pytest.raises(MyEx, match="RT_ERROR 6"): loop.run_sync(lambda: retry( coro, retry_on_exceptions=(MyEx, ), count=5, delay_min=1.0, delay_max=6.0, jitter_fraction=0.0, )) assert n_calls == 6 assert sleep_calls == [0.0, 1.0, 3.0, 6.0, 6.0]
def test_errors_dont_block(loop): c = Center('127.0.0.1', 8017) w = Worker('127.0.0.2', 8018, c.ip, c.port, ncores=1) e = Executor((c.ip, c.port), start=False, loop=loop) @gen.coroutine def f(): c.listen(c.port) yield w._start() yield e._start() L = [e.submit(inc, 1), e.submit(throws, 1), e.submit(inc, 2), e.submit(throws, 2)] i = 0 while not (L[0].status == L[2].status == 'finished'): i += 1 if i == 1000: assert False yield gen.sleep(0.01) result = yield e._gather([L[0], L[2]]) assert result == [2, 3] yield w._close() c.stop() loop.run_sync(f)
def test_compute_who_has(loop): @gen.coroutine def f(): c = Center(ip='127.0.0.1') c.listen(0) x = Worker(c.ip, c.port, ip='127.0.0.1') y = Worker(c.ip, c.port, ip='127.0.0.1') z = Worker(c.ip, c.port, ip='127.0.0.1') x.data['a'] = 1 y.data['a'] = 2 yield [x._start(), y._start(), z._start()] zz = rpc(ip=z.ip, port=z.port) yield zz.compute(function=inc, args=('a',), who_has={'a': {x.address}}, key='b') assert z.data['b'] == 2 yield zz.compute(function=inc, args=('a',), who_has={'a': {y.address}}, key='c') assert z.data['c'] == 3 yield [x._close(), y._close(), z._close()] zz.close_streams() loop.run_sync(f, timeout=5)
def test_server(loop): """ Simple Server test. """ async def f(): server = Server({"ping": pingpong}) with pytest.raises(ValueError): server.port await server.listen(8881) assert server.port == 8881 assert server.address == ("tcp://%s:8881" % get_ip()) for addr in ("127.0.0.1:8881", "tcp://127.0.0.1:8881", server.address): comm = await connect(addr) n = await comm.write({"op": "ping"}) assert isinstance(n, int) assert 4 <= n <= 1000 response = await comm.read() assert response == b"pong" await comm.write({"op": "ping", "close": True}) response = await comm.read() assert response == b"pong" await comm.close() server.stop() loop.run_sync(f)
def test_nanny_process_failure(loop): c = Center('127.0.0.1', 8036) n = Nanny('127.0.0.1', 8037, 8038, '127.0.0.1', 8036, ncores=2) c.listen(c.port) @gen.coroutine def f(): nn = rpc(ip=n.ip, port=n.port) yield n._start() ww = rpc(ip=n.ip, port=n.worker_port) yield ww.update_data(data={'x': 1, 'y': 2}) with ignoring(StreamClosedError): yield ww.compute(function=sys.exit, args=(0,), key='z') start = time() while n.process.is_alive(): # wait while process dies yield gen.sleep(0.01) assert time() - start < 2 start = time() while not n.process.is_alive(): # wait while process comes back yield gen.sleep(0.01) assert time() - start < 2 start = time() while n.worker_address not in c.ncores: yield gen.sleep(0.01) assert time() - start < 2 yield n._close() c.stop() loop.run_sync(f)
def test_fast_kill(loop): from distributed import Nanny, rpc c = Center('127.0.0.1') c.listen(0) a = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') b = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') e = Executor((c.ip, c.port), start=False, loop=loop) @gen.coroutine def f(): yield a._start() yield b._start() while len(c.ncores) < 2: yield gen.sleep(0.01) yield e._start() L = e.map(sleep, range(10)) try: start = time() yield e._restart() assert time() - start < 5 assert all(x.status == 'cancelled' for x in L) x = e.submit(inc, 1) result = yield x._result() assert result == 2 finally: yield a._close() yield b._close() yield e._shutdown(fast=True) c.stop() loop.run_sync(f)
def test_worker_waits_for_center_to_come_up(loop): @gen.coroutine def f(): w = yield Worker("127.0.0.1", 8007) try: loop.run_sync(f, timeout=4) except TimeoutError: pass
def test_worker_waits_for_center_to_come_up(loop): @gen.coroutine def f(): w = Worker('127.0.0.1', 8007, ip='127.0.0.1') yield w._start() try: loop.run_sync(f, timeout=4) except TimeoutError: pass
def test_async_with(loop): async def f(): async with LocalCluster(processes=False, asynchronous=True) as cluster: w = cluster.workers assert w assert not w loop.run_sync(f)
def test_locks(loop): async def f(): async with Client(processes=False, asynchronous=True) as c: assert c.asynchronous == True async with Lock('x'): lock2 = Lock('x') with pytest.raises(gen.TimeoutError): await lock2.acquire(timeout=0.1) loop.run_sync(f)
def test_locks(loop): async def f(): async with Client(processes=False, asynchronous=True) as c: assert c.asynchronous async with Lock('x'): lock2 = Lock('x') result = await lock2.acquire(timeout=0.1) assert result is False loop.run_sync(f)
def test_worker_with_port_zero(loop): @gen.coroutine def f(): c = Center('127.0.0.1') c.listen(8007) w = Worker(c.ip, c.port, ip='127.0.0.1') yield w._start() assert isinstance(w.port, int) assert w.port > 1024 loop.run_sync(f)
def test_cluster(loop): with Cluster(scheduler_addr="127.0.0.1", scheduler_port=8786, worker_addrs=["127.0.0.1", "127.0.0.1"]) as c: r = rpc(ip="127.0.0.1", port=8786) result = [] while len(result) != 2: result = loop.run_sync(r.ncores) c.add_worker("127.0.0.1") while len(result) != 3: result = loop.run_sync(r.ncores)
def test_cluster(loop): with Cluster(scheduler_addr='127.0.0.1', scheduler_port=8787, worker_addrs=['127.0.0.1', '127.0.0.1']) as c: r = rpc(ip='127.0.0.1', port=8787) result = [] while len(result) != 2: result = loop.run_sync(r.ncores) c.add_worker('127.0.0.1') while len(result) != 3: result = loop.run_sync(r.ncores)
def test_dask_submit_cli_writes_result_to_stdout(loop, tmpdir, valid_python_script): @gen.coroutine def test(): remote_client = RemoteClient(ip='127.0.0.1', local_dir=str(tmpdir)) yield remote_client._start() out, err = yield _submit('127.0.0.1:{0}'.format(remote_client.port), str(valid_python_script)) assert b'hello world!' in out yield remote_client._close() loop.run_sync(test, timeout=5)
def test_dask_submit_cli_writes_result_to_stdout(loop, tmpdir, valid_python_script): @gen.coroutine def test(): remote_client = RemoteClient(ip="127.0.0.1", local_dir=str(tmpdir)) yield remote_client._start() out, err = yield _submit("127.0.0.1:{0}".format(remote_client.port), str(valid_python_script)) assert b"hello world!" in out yield remote_client._close() loop.run_sync(test, timeout=5)
def test_dask_submit_cli_writes_traceback_to_stdout(loop, tmpdir, invalid_python_script): @gen.coroutine def test(): remote_client = RemoteClient(ip='127.0.0.1', local_dir=str(tmpdir)) yield remote_client._start() out, err = yield _submit('127.0.0.1:{0}'.format(remote_client.port), str(invalid_python_script)) assert b'Traceback' in err yield remote_client._close() loop.run_sync(test, timeout=5)
def test_cluster(loop): with Cluster(scheduler_addr = '127.0.0.1', scheduler_port = 8787, worker_addrs = ['127.0.0.1', '127.0.0.1']) as c: r = rpc(ip='127.0.0.1', port=8787) result = [] while len(result) != 2: result = loop.run_sync(r.ncores) c.add_worker('127.0.0.1') while len(result) != 3: result = loop.run_sync(r.ncores)
def test_hostport(loop): with popen(['dask-scheduler', '--no-bokeh', '--host', '127.0.0.1:8978']): @gen.coroutine def f(): yield [ # The scheduler's main port can't be contacted from the outside assert_can_connect_locally_4(8978, 5.0), ] loop.run_sync(f) with Client('127.0.0.1:8978', loop=loop) as c: assert len(c.ncores()) == 0
def test_identity(loop): @gen.coroutine def f(): server = Server({}) server.listen(8887) remote = rpc(ip='127.0.0.1', port=8887) a = yield remote.identity() b = yield remote.identity() assert a['type'] == 'Server' assert a['id'] == b['id'] loop.run_sync(f)
def test_worker_waits_for_scheduler(loop): @gen.coroutine def f(): w = Worker("127.0.0.1", 8007) try: yield asyncio.wait_for(w, 3) except asyncio.TimeoutError: pass else: assert False assert w.status not in ("closed", "running") yield w.close(timeout=0.1) loop.run_sync(f)
def test_remote_client_uploads_a_file(loop, tmpdir): @gen.coroutine def test(): remote_client = RemoteClient(ip='127.0.0.1', local_dir=str(tmpdir)) yield remote_client._start(0) remote_process = rpc(remote_client.address) upload = yield remote_process.upload_file(filename='script.py', file_payload='x=1') assert upload == {'status': 'OK', 'nbytes': 3} assert tmpdir.join('script.py').read() == "x=1" yield remote_client._close() loop.run_sync(test, timeout=5)
def test_worker_waits_for_scheduler(loop): @gen.coroutine def f(): w = Worker("127.0.0.1", 8007) try: yield gen.with_timeout(timedelta(seconds=3), w) except TimeoutError: pass else: assert False assert w.status not in ("closed", "running") yield w.close(timeout=0.1) loop.run_sync(f)
def test_retry0_raises_immediately(loop): # test that using max_reties=0 raises after 1 call n_calls = 0 async def coro(): nonlocal n_calls n_calls += 1 raise RuntimeError(f"RT_ERROR {n_calls}") with pytest.raises(RuntimeError, match="RT_ERROR 1"): loop.run_sync(lambda: retry(coro, count=0, delay_min=-1, delay_max=-1)) assert n_calls == 1
def test_remote_client_execution_outputs_stderr(loop, tmpdir, invalid_python_script): @gen.coroutine def test(): remote_client = RemoteClient(ip='127.0.0.1', local_dir=str(tmpdir)) yield remote_client._start(0) rr = rpc(remote_client.address) yield rr.upload_file(filename='script.py', file_payload='a+1') message = yield rr.execute(filename='script.py') assert b'\'a\' is not defined' in message['stderr'] assert message['returncode'] == 1 yield remote_client._close() loop.run_sync(test, timeout=5)
def test_remote_client_execution_outputs_to_stdout(loop, tmpdir): @gen.coroutine def test(): remote_client = RemoteClient(ip='127.0.0.1', local_dir=str(tmpdir)) yield remote_client._start(0) rr = rpc(remote_client.address) yield rr.upload_file(filename='script.py', file_payload='print("hello world!")') message = yield rr.execute(filename='script.py') assert message['stdout'] == b'hello world!' + os.linesep.encode() assert message['returncode'] == 0 yield remote_client._close() loop.run_sync(test, timeout=5)
def test_identity(loop): @gen.coroutine def f(): server = Server({}) server.listen(8887) with rpc(('127.0.0.1', 8887)) as remote: a = yield remote.identity() b = yield remote.identity() assert a['type'] == 'Server' assert a['id'] == b['id'] server.stop() loop.run_sync(f)
def test_remote_client_execution_outputs_stderr(loop, tmpdir, invalid_python_script): @gen.coroutine def test(): remote_client = RemoteClient(ip="127.0.0.1", local_dir=str(tmpdir)) yield remote_client._start(0) rr = rpc(remote_client.address) yield rr.upload_file(filename="script.py", file_payload="a+1") message = yield rr.execute(filename="script.py") assert b"'a' is not defined" in message["stderr"] assert message["returncode"] == 1 yield remote_client._close() loop.run_sync(test, timeout=5)
def test_compression(compression, serialize, loop): with dask.config.set(compression=compression): async def f(): server = Server({"echo": serialize}) await server.listen("tcp://") with rpc(server.address) as r: data = b"1" * 1000000 result = await r.echo(x=to_serialize(data)) assert result == {"result": data} server.stop() loop.run_sync(f)
def test_rpc_serialization(loop): @gen.coroutine def f(): server = Server({'echo': echo_serialize}) server.listen('tcp://') with rpc(server.address, serializers=['msgpack']) as r: with pytest.raises(TypeError): yield r.echo(x=to_serialize(inc)) with rpc(server.address, serializers=['msgpack', 'pickle']) as r: result = yield r.echo(x=to_serialize(inc)) assert result == {'result': inc} loop.run_sync(f)
def test_rpc_serialization(loop): async def f(): server = Server({"echo": echo_serialize}) await server.listen("tcp://") async with rpc(server.address, serializers=["msgpack"]) as r: with pytest.raises(TypeError): await r.echo(x=to_serialize(inc)) async with rpc(server.address, serializers=["msgpack", "pickle"]) as r: result = await r.echo(x=to_serialize(inc)) assert result == {"result": inc} server.stop() loop.run_sync(f)
def test_compression(compression, serialize, loop): with dask.config.set(compression=compression): @gen.coroutine def f(): server = Server({'echo': serialize}) server.listen('tcp://') with rpc(server.address) as r: data = b'1' * 1000000 result = yield r.echo(x=to_serialize(data)) assert result == {'result': data} server.stop() loop.run_sync(f)
def test_occupancy(loop): with cluster(nanny=True) as (s, [a, b]): rm = Occupancy(('127.0.0.1', s['port']), interval=0.01) for k in ['host', 'processing', 'waiting']: assert k in rm.cds.data start = time() while not rm.cds.data['host']: loop.run_sync(lambda: gen.sleep(0.05)) assert time() < start + 2 assert (len(rm.cds.data['host']) == len(rm.cds.data['processing']) == len(rm.cds.data['waiting']) == 2) assert isinstance(rm.figure, Figure) rm.stream.close()
def test_rpc_serialization(loop): @gen.coroutine def f(): server = Server({'echo': echo_serialize}) server.listen('tcp://') with rpc(server.address, serializers=['msgpack']) as r: with pytest.raises(TypeError): yield r.echo(x=to_serialize(inc)) with rpc(server.address, serializers=['msgpack', 'pickle']) as r: result = yield r.echo(x=to_serialize(inc)) assert result == {'result': inc} server.stop() loop.run_sync(f)
def test_rpc(loop): @gen.coroutine def f(): server = Server({'ping': pingpong}) server.listen(8887) remote = rpc(ip='127.0.0.1', port=8887) response = yield remote.ping() assert response == b'pong' response = yield remote.ping(close=True) assert response == b'pong' server.stop() loop.run_sync(f)
def test_restart(loop): from distributed import Nanny, rpc c = Center('127.0.0.1', 8006) a = Nanny('127.0.0.1', 8007, 8008, '127.0.0.1', 8006, ncores=2) b = Nanny('127.0.0.1', 8009, 8010, '127.0.0.1', 8006, ncores=2) c.listen(c.port) @gen.coroutine def f(): yield a._start() yield b._start() e = Executor((c.ip, c.port), start=False, loop=loop) yield e._start() assert e.scheduler.ncores == {a.worker_address: 2, b.worker_address: 2} x = e.submit(inc, 1) y = e.submit(inc, x) yield y._result() cc = rpc(ip=c.ip, port=c.port) who_has = yield cc.who_has() try: assert e.scheduler.who_has == who_has assert set(e.scheduler.who_has) == {x.key, y.key} f = yield e._restart() assert f is e assert len(e.scheduler.stacks) == 2 assert len(e.scheduler.processing) == 2 who_has = yield cc.who_has() assert not who_has assert not e.scheduler.who_has assert x.cancelled() assert y.cancelled() finally: yield a._close() yield b._close() yield e._shutdown(fast=True) c.stop() loop.run_sync(f)
def test_rpc(loop): @gen.coroutine def f(): server = Server({'ping': pingpong}) server.listen(8887) with rpc(ip='127.0.0.1', port=8887) as remote: response = yield remote.ping() assert response == b'pong' response = yield remote.ping(close=True) assert response == b'pong' assert not remote.streams assert remote.status == 'closed' server.stop() loop.run_sync(f)
def test_server(loop): @gen.coroutine def f(): server = Server({'ping': pingpong}) server.listen(8887) stream = yield connect('127.0.0.1', 8887) yield write(stream, {'op': 'ping'}) response = yield read(stream) assert response == b'pong' yield write(stream, {'op': 'ping', 'close': True}) response = yield read(stream) assert response == b'pong' server.stop() loop.run_sync(f)
def test_large_packets(loop): """ tornado has a 100MB cap by default """ @gen.coroutine def f(): server = Server({'echo': echo}) server.listen(8887) data = b'0' * int(200e6) # slightly more than 100MB conn = rpc(ip='127.0.0.1', port=8887) result = yield conn.echo(x=data) assert result == data d = {'x': data} result = yield conn.echo(x=d) assert result == d server.stop() loop.run_sync(f)
def test_TextProgressBar_empty(loop, capsys): @gen.coroutine def f(): s = Scheduler(loop=loop) done = s.start(0) a = Worker(s.ip, s.port, loop=loop, ncores=1) b = Worker(s.ip, s.port, loop=loop, ncores=1) yield [a._start(0), b._start(0)] progress = TextProgressBar([], scheduler=(s.ip, s.port), start=False, interval=0.01) yield progress.listen() assert progress.status == 'finished' check_bar_completed(capsys) yield [a._close(), b._close()] s.close() yield done loop.run_sync(f)
def test_rpc_with_many_connections(loop): remote = rpc(ip='127.0.0.1', port=8887) @gen.coroutine def g(): for i in range(10): yield remote.ping() @gen.coroutine def f(): server = Server({'ping': pingpong}) server.listen(8887) yield [g() for i in range(10)] server.stop() remote.close_streams() assert all(stream.closed() for stream in remote.streams) loop.run_sync(f)
def test_file_descriptors_dont_leak(loop): psutil = pytest.importorskip('psutil') proc = psutil.Process() before = proc.num_fds() s = Scheduler() s.start(0) w = Worker(s.ip, s.port) @gen.coroutine def f(): yield w._start(0) yield w._close() loop.run_sync(f) during = proc.num_fds() s.stop() s.close() start = time() while proc.num_fds() > before: loop.run_sync(lambda: gen.sleep(0.01)) assert time() < start + 5
def test_nanny(loop): c = Center('127.0.0.1', 8026) n = Nanny('127.0.0.1', 8027, 8028, '127.0.0.1', 8026, ncores=2) c.listen(c.port) @gen.coroutine def f(): nn = rpc(ip=n.ip, port=n.port) yield n._start() assert n.process.is_alive() assert c.ncores[n.worker_address] == 2 assert c.nannies[n.worker_address] > 8000 yield nn.kill() assert n.worker_address not in c.ncores assert n.worker_address not in c.nannies assert not n.process yield nn.kill() assert n.worker_address not in c.ncores assert n.worker_address not in c.nannies assert not n.process yield nn.instantiate() assert n.process.is_alive() assert c.ncores[n.worker_address] == 2 assert c.nannies[n.worker_address] > 8000 yield nn.terminate() assert not n.process if n.process: n.process.terminate() yield n._close() c.stop() loop.run_sync(f)