コード例 #1
0
ファイル: test_executors.py プロジェクト: Marlin-Na/prefect
    def test_executor_logs_worker_events(self, caplog):
        caplog.set_level(logging.DEBUG, logger="prefect")
        with distributed.Client(n_workers=1,
                                processes=False,
                                set_as_default=False) as client:
            executor = DaskExecutor(address=client.scheduler.address)
            with executor.start():
                client.cluster.scale(4)
                while len(client.scheduler_info()["workers"]) < 4:
                    time.sleep(0.1)
                client.cluster.scale(1)
                while len(client.scheduler_info()["workers"]) > 1:
                    time.sleep(0.1)

        assert any("Worker %s added" == rec.msg for rec in caplog.records)
        assert any("Worker %s removed" == rec.msg for rec in caplog.records)
コード例 #2
0
ファイル: test_executors.py プロジェクト: Marlin-Na/prefect
    def test_connect_to_running_cluster(self, caplog):
        with distributed.Client(processes=False,
                                set_as_default=False) as client:
            address = client.scheduler.address
            executor = DaskExecutor(address=address)
            assert executor.address == address
            assert executor.cluster_class is None
            assert executor.cluster_kwargs is None
            assert executor.client_kwargs == {"set_as_default": False}

            with executor.start():
                res = executor.wait(executor.submit(lambda x: x + 1, 1))
                assert res == 2

        exp = f"Connecting to an existing Dask cluster at {address}"
        assert any(exp in rec.message for rec in caplog.records)
コード例 #3
0
    def test_temporary_cluster_forcefully_cancels_pending_tasks(self, tmpdir):
        filname = tmpdir.join("signal")

        def slow():
            time.sleep(10)
            with open(filname, "w") as f:
                f.write("Got here")

        executor = DaskExecutor()
        with executor.start():
            start = time.time()
            fut = executor.submit(slow)  # noqa
            time.sleep(0.1)
        stop = time.time()
        # Cluster shutdown before task could complete
        assert stop - start < 5
        assert not os.path.exists(filname)
コード例 #4
0
ファイル: test_executors.py プロジェクト: zschumacher/prefect
    def test_start_local_cluster(self, caplog):
        executor = DaskExecutor(cluster_kwargs={"processes": False})
        assert executor.cluster_class == distributed.LocalCluster
        assert executor.cluster_kwargs == {
            "processes": False,
            "silence_logs": logging.CRITICAL,
        }

        with executor.start():
            res = executor.wait(executor.submit(lambda x: x + 1, 1))
            assert res == 2

        assert any("Creating a new Dask cluster" in rec.message
                   for rec in caplog.records)
        try:
            import bokeh  # noqa
        except Exception:
            # If bokeh isn't installed, no dashboard will be started
            pass
        else:
            assert any("The Dask dashboard is available at" in rec.message
                       for rec in caplog.records)
コード例 #5
0
ファイル: test_executors.py プロジェクト: omarbelkady/prefect
    def test_executor_logs_worker_events(self, caplog):
        caplog.set_level(logging.DEBUG, logger="prefect")
        with distributed.Client(n_workers=1,
                                processes=False,
                                set_as_default=False) as client:
            executor = DaskExecutor(address=client.scheduler.address)
            with executor.start():
                assert executor.watch_worker_status is None
                assert executor._watch_dask_events_task is not None

                time.sleep(0.1)
                client.cluster.scale(4)
                while len(client.scheduler_info()["workers"]) < 4:
                    time.sleep(0.1)
                client.cluster.scale(1)
                while len(client.scheduler_info()["workers"]) > 1:
                    time.sleep(0.1)

        assert any(
            re.match("Worker .+ added", rec.msg) for rec in caplog.records)
        assert any(
            re.match("Worker .+ removed", rec.msg) for rec in caplog.records)
コード例 #6
0
    def test_local_cluster_adapt(self):
        adapt_kwargs = {"minimum": 1, "maximum": 1}
        called_with = None

        class MyCluster(distributed.LocalCluster):
            def adapt(self, **kwargs):
                nonlocal called_with
                called_with = kwargs
                super().adapt(**kwargs)

        executor = DaskExecutor(
            cluster_class=MyCluster,
            cluster_kwargs={"processes": False, "n_workers": 0},
            adapt_kwargs=adapt_kwargs,
        )

        assert executor.adapt_kwargs == adapt_kwargs

        with executor.start():
            res = executor.wait(executor.submit(lambda x: x + 1, 1))
            assert res == 2

        assert called_with == adapt_kwargs