def test_get_scale_up_kwargs(loop): with LocalCluster(0, scheduler_port=0, silence_logs=False, diagnostics_port=None, loop=loop) as cluster: alc = Adaptive(cluster.scheduler, cluster, interval=100, scale_factor=3) assert alc.get_scale_up_kwargs() == {'n': 1} with Client(cluster, loop=loop) as c: future = c.submit(lambda x: x + 1, 1) assert future.result() == 2 assert c.ncores() assert alc.get_scale_up_kwargs() == {'n': 3}
def test_no_more_workers_than_tasks(): loop = IOLoop.current() cluster = yield LocalCluster(0, scheduler_port=0, silence_logs=False, processes=False, diagnostics_port=None, loop=loop, asynchronous=True) yield cluster._start() try: adapt = Adaptive(cluster.scheduler, cluster, minimum=0, maximum=4, interval='10 ms') client = yield Client(cluster, asynchronous=True, loop=loop) cluster.scheduler.task_duration['slowinc'] = 1000 yield client.submit(slowinc, 1, delay=0.100) assert len(cluster.scheduler.workers) <= 1 finally: yield client._close() yield cluster._close()
def test_avoid_churn(): """ We want to avoid creating and deleting workers frequently Instead we want to wait a few beats before removing a worker in case the user is taking a brief pause between work """ cluster = yield LocalCluster( 0, asynchronous=True, processes=False, scheduler_port=0, silence_logs=False, dashboard_address=None, ) client = yield Client(cluster, asynchronous=True) try: adapt = Adaptive(cluster.scheduler, cluster, interval="20 ms", wait_count=5) for i in range(10): yield client.submit(slowinc, i, delay=0.040) yield gen.sleep(0.040) assert frequencies(pluck(1, adapt.log)) == {"up": 1} finally: yield client.close() yield cluster.close()
def test_adaptive_config(): with dask.config.set( {"distributed.adaptive.minimum": 10, "distributed.adaptive.wait-count": 8} ): adapt = Adaptive(interval="5s") assert adapt.minimum == 10 assert adapt.maximum == math.inf assert adapt.interval == 5 assert adapt.wait_count == 8
def test_without_cluster(c, s): adapt = Adaptive(scheduler=s) future = c.submit(inc, 1) while not s.tasks: yield gen.sleep(0.01) response = yield c.scheduler.adaptive_recommendations() assert response["status"] == "up"
def test_min_max(): loop = IOLoop.current() cluster = yield LocalCluster( 0, scheduler_port=0, silence_logs=False, processes=False, dashboard_address=None, loop=loop, asynchronous=True, ) yield cluster._start() try: adapt = Adaptive( cluster.scheduler, cluster, minimum=1, maximum=2, interval="20 ms", wait_count=10, ) c = yield Client(cluster, asynchronous=True, loop=loop) start = time() while not cluster.scheduler.workers: yield gen.sleep(0.01) assert time() < start + 1 yield gen.sleep(0.2) assert len(cluster.scheduler.workers) == 1 assert frequencies(pluck(1, adapt.log)) == {"up": 1} futures = c.map(slowinc, range(100), delay=0.1) start = time() while len(cluster.scheduler.workers) < 2: yield gen.sleep(0.01) assert time() < start + 1 assert len(cluster.scheduler.workers) == 2 yield gen.sleep(0.5) assert len(cluster.scheduler.workers) == 2 assert len(cluster.workers) == 2 assert frequencies(pluck(1, adapt.log)) == {"up": 2} del futures start = time() while len(cluster.scheduler.workers) != 1: yield gen.sleep(0.01) assert time() < start + 2 assert frequencies(pluck(1, adapt.log)) == {"up": 2, "down": 1} finally: yield c.close() yield cluster.close()
def test_adapt_quickly(): """ We want to avoid creating and deleting workers frequently Instead we want to wait a few beats before removing a worker in case the user is taking a brief pause between work """ cluster = yield LocalCluster( 0, asynchronous=True, processes=False, scheduler_port=0, silence_logs=False, dashboard_address=None, ) client = yield Client(cluster, asynchronous=True) adapt = Adaptive(cluster.scheduler, cluster, interval=20, wait_count=5, maximum=10) try: future = client.submit(slowinc, 1, delay=0.100) yield wait(future) assert len(adapt.log) == 1 # Scale up when there is plenty of available work futures = client.map(slowinc, range(1000), delay=0.100) while frequencies(pluck(1, adapt.log)) == {"up": 1}: yield gen.sleep(0.01) assert len(adapt.log) == 2 assert "up" in adapt.log[-1] d = [x for x in adapt.log[-1] if isinstance(x, dict)][0] assert 2 < d["n"] <= adapt.maximum while len(cluster.scheduler.workers) < adapt.maximum: yield gen.sleep(0.01) del futures while len(cluster.scheduler.workers) > 1: yield gen.sleep(0.01) # Don't scale up for large sequential computations x = yield client.scatter(1) for i in range(100): x = client.submit(slowinc, x) yield gen.sleep(0.1) assert len(cluster.scheduler.workers) == 1 finally: yield client.close() yield cluster.close()
def test_adaptive_local_cluster(loop): with LocalCluster(0, scheduler_port=0, silence_logs=False, diagnostics_port=None, loop=loop) as cluster: alc = Adaptive(cluster.scheduler, cluster, interval=100) with Client(cluster, loop=loop) as c: assert not c.ncores() future = c.submit(lambda x: x + 1, 1) assert future.result() == 2 assert c.ncores() sleep(0.1) assert c.ncores() # still there after some time del future start = time() while cluster.scheduler.ncores: sleep(0.01) assert time() < start + 5 assert not c.ncores()
def test_adaptive_local_cluster_multi_workers(): cluster = yield LocalCluster(0, scheduler_port=0, silence_logs=False, processes=False, diagnostics_port=None, asynchronous=True) try: cluster.scheduler.allowed_failures = 1000 alc = Adaptive(cluster.scheduler, cluster, interval=100) c = yield Client(cluster, asynchronous=True) futures = c.map(slowinc, range(100), delay=0.01) start = time() while not cluster.scheduler.workers: yield gen.sleep(0.01) assert time() < start + 15, alc.log yield c.gather(futures) del futures start = time() # while cluster.workers: while cluster.scheduler.workers: yield gen.sleep(0.01) assert time() < start + 15, alc.log # assert not cluster.workers assert not cluster.scheduler.workers yield gen.sleep(0.2) # assert not cluster.workers assert not cluster.scheduler.workers futures = c.map(slowinc, range(100), delay=0.01) yield c.gather(futures) finally: yield c._close() yield cluster._close()
def __init__(self, *args, **kwargs): self.min_size = kwargs.pop("min_size", 0) Adaptive.__init__(self, *args, **kwargs)