def test_task_names_are_passed_to_submit(self, monkeypatch): client = MagicMock() monkeypatch.setattr(prefect.engine.executors.dask, "Client", client) executor = DaskExecutor() with executor.start(): with prefect.context(task_full_name="FISH!"): executor.submit(lambda: None) kwargs = client.return_value.__enter__.return_value.submit.call_args[1] assert kwargs["key"].startswith("FISH!")
def test_context_tags_are_passed_to_submit(self, monkeypatch): client = MagicMock() monkeypatch.setattr(prefect.engine.executors.dask, "Client", client) executor = DaskExecutor() with executor.start(): with prefect.context(task_tags=["dask-resource:GPU=1"]): executor.submit(lambda: None) kwargs = client.return_value.__enter__.return_value.submit.call_args[1] assert kwargs["resources"] == {"GPU": 1.0}
def test_local_cluster_adapt(self): adapt_kwargs = {"minimum": 1, "maximum": 1} called_with = None class MyCluster(distributed.LocalCluster): def adapt(self, **kwargs): nonlocal called_with called_with = kwargs super().adapt(**kwargs) executor = DaskExecutor( cluster_class=MyCluster, cluster_kwargs={ "processes": False, "n_workers": 0 }, adapt_kwargs=adapt_kwargs, ) assert executor.adapt_kwargs == adapt_kwargs with executor.start(): res = executor.wait(executor.submit(lambda x: x + 1, 1)) assert res == 2 assert called_with == adapt_kwargs
def test_start_local_cluster(self): executor = DaskExecutor(cluster_kwargs={"processes": False}) assert executor.cluster_class == distributed.LocalCluster assert executor.cluster_kwargs == { "processes": False, "silence_logs": logging.CRITICAL, } with executor.start(): res = executor.wait(executor.submit(lambda x: x + 1, 1)) assert res == 2
def test_connect_to_running_cluster(self): with distributed.Client(processes=False, set_as_default=False) as client: executor = DaskExecutor(address=client.scheduler.address) assert executor.address == client.scheduler.address assert executor.cluster_class is None assert executor.cluster_kwargs is None assert executor.client_kwargs == {"set_as_default": False} with executor.start(): res = executor.wait(executor.submit(lambda x: x + 1, 1)) assert res == 2
def test_exit_early_with_external_or_inproc_cluster_waits_for_pending_futures( self, kind, monkeypatch ): key = "TESTING_%s" % uuid.uuid4().hex monkeypatch.setenv(key, "initial") def slow(): time.sleep(0.5) os.environ[key] = "changed" def pending(x): # This function shouldn't ever start, since it's pending when the # shutdown signal is received os.environ[key] = "changed more" if kind == "external": with distributed.Client(processes=False, set_as_default=False) as client: executor = DaskExecutor(address=client.scheduler.address) with executor.start(): fut = executor.submit(slow) fut2 = executor.submit(pending, fut) # noqa time.sleep(0.2) assert os.environ[key] == "changed" elif kind == "inproc": executor = DaskExecutor(cluster_kwargs={"processes": False}) with executor.start(): fut = executor.submit(slow) fut2 = executor.submit(pending, fut) # noqa time.sleep(0.2) assert os.environ[key] == "changed" assert executor.client is None assert executor._futures is None assert executor._should_run_event is None
def test_temporary_cluster_forcefully_cancels_pending_tasks(self, tmpdir): filname = tmpdir.join("signal") def slow(): time.sleep(10) with open(filname, "w") as f: f.write("Got here") executor = DaskExecutor() with executor.start(): start = time.time() fut = executor.submit(slow) # noqa time.sleep(0.1) stop = time.time() # Cluster shutdown before task could complete assert stop - start < 5 assert not os.path.exists(filname)