def test_cleanup(): c = LocalCluster(2, scheduler_port=0, silence_logs=False, diagnostic_port=None) port = c.scheduler.port c.close() c2 = LocalCluster(2, scheduler_port=port, silence_logs=False, diagnostic_port=None) c.close()
def test_cleanup(): with clean(threads=False): c = LocalCluster(n_workers=2, silence_logs=False, dashboard_address=":0") port = c.scheduler.port c.close() c2 = LocalCluster(n_workers=2, scheduler_port=port, silence_logs=False, dashboard_address=":0") c2.close()
def test_cleanup(): c = LocalCluster(2, scheduler_port=0, silence_logs=False, diagnostics_port=None) port = c.scheduler.port c.close() c2 = LocalCluster(2, scheduler_port=port, silence_logs=False, diagnostics_port=None) c.close()
def test_close_twice(loop): cluster = LocalCluster() with Client(cluster.scheduler_address) as client: f = client.map(inc, range(100)) client.gather(f) with captured_logger('tornado.application') as log: cluster.close() cluster.close() sleep(0.5) log = log.getvalue() print(log) assert not log
def test_starts_up_sync(loop): cluster = LocalCluster( n_workers=2, loop=loop, processes=False, scheduler_port=0, dashboard_address=None, ) try: assert len(cluster.scheduler.workers) == 2 finally: cluster.close()
def test_dont_select_closed_worker(): # Make sure distributed does not try to reuse a client from a # closed cluster (https://github.com/dask/distributed/issues/2840). with clean(threads=False): cluster = LocalCluster(n_workers=0) c = Client(cluster) cluster.scale(2) assert c == get_client() c.close() cluster.close() cluster2 = LocalCluster(n_workers=0) c2 = Client(cluster2) cluster2.scale(2) current_client = get_client() assert c2 == current_client cluster2.close() c2.close()