Esempio n. 1
0
def test_get_flow(coefficients, unused_tcp_port):
    with tmp(Path(SOURCE_DIR) / "test-data/local/prefect_test_case"):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                ids.REALIZATIONS: 2,
                ids.EXECUTOR: "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )
        server_config = EvaluatorServerConfig(unused_tcp_port)
        for permuted_steps in permutations(config["steps"]):
            permuted_config = copy.deepcopy(config)
            permuted_config["steps"] = permuted_steps
            permuted_config["dispatch_uri"] = server_config.dispatch_uri
            ensemble = PrefectEnsemble(permuted_config)

            for iens in range(2):
                with prefect.context(
                    url=server_config.url,
                    token=server_config.token,
                    cert=server_config.cert,
                ):
                    flow = ensemble.get_flow(ensemble._ee_id, [iens])

                # Get the ordered tasks and retrieve their step ids.
                flow_steps = [
                    task.get_step()
                    for task in flow.sorted_tasks()
                    if isinstance(task, UnixTask)
                ]
                assert len(flow_steps) == 4

                realization_steps = list(
                    ensemble.get_reals()[iens].get_steps_sorted_topologically()
                )

                # Testing realization steps
                for step_ordering in [realization_steps, flow_steps]:
                    mapping = {
                        step._name: idx for idx, step in enumerate(step_ordering)
                    }
                    assert mapping["second_degree"] < mapping["zero_degree"]
                    assert mapping["zero_degree"] < mapping["add_coeffs"]
                    assert mapping["first_degree"] < mapping["add_coeffs"]
                    assert mapping["second_degree"] < mapping["add_coeffs"]
Esempio n. 2
0
def test_prefect_no_retries(unused_tcp_port, coefficients, tmpdir, function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 0
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens] for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        event_list = []
        with evaluator.run() as mon:
            for event in mon.track():
                event_list.append(event)
                if event.data is not None and event.data.get("status") in [
                    state.ENSEMBLE_STATE_FAILED,
                    state.ENSEMBLE_STATE_STOPPED,
                ]:
                    mon.signal_done()

        step_failed = False
        job_failed = False
        for real in ensemble.snapshot.get_reals().values():
            for step in real.steps.values():
                for job in step.jobs.values():
                    if job.status == state.JOB_STATE_FAILURE:
                        job_failed = True
                        assert job.error == "This is an expected ERROR"
                        if step.status == state.STEP_STATE_FAILURE:
                            step_failed = True

        assert ensemble.get_status() == state.ENSEMBLE_STATE_FAILED
        assert job_failed, f"Events: {event_list}"
        assert step_failed, f"Events: {event_list}"
Esempio n. 3
0
def evaluate(
    workspace_root: Path,
    evaluation_name: str,
    input_records: MultiEnsembleRecord,
    ensemble_config: EnsembleConfig,
    stages_config: StagesConfig,
) -> MultiEnsembleRecord:
    evaluation_tmp_dir = _create_evaluator_tmp_dir(workspace_root,
                                                   evaluation_name)

    config = EvaluatorServerConfig()
    ee_config = _build_ee_config(
        evaluation_tmp_dir,
        ensemble_config,
        stages_config,
        input_records,
        config.dispatch_uri,
    )
    ensemble = PrefectEnsemble(ee_config)  # type: ignore

    ee = EnsembleEvaluator(ensemble=ensemble, config=config, iter_=0)
    result = _run(ee)
    output_records = _prepare_output_records(result)

    return output_records
Esempio n. 4
0
def test_prefect_no_retries(unused_tcp_port, coefficients, tmpdir,
                            function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 0
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens]
            for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        step_failed = False
        job_failed = False
        with evaluator.run() as mon:
            for event in mon.track():
                # Capture the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    for real in event.data["reals"].values():
                        for step in real["steps"].values():
                            for job in step["jobs"].values():
                                if job["status"] == "Failed":
                                    job_failed = True
                                    if step["status"] == "Failed":
                                        step_failed = True

                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._ensemble.get_status() == "Failed"
        assert job_failed
        assert step_failed
Esempio n. 5
0
def test_run_prefect_ensemble_exception(unused_tcp_port, coefficients):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                "realizations": 2,
                "executor": "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(config["storage"]["storage_path"])
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        ensemble.get_flow = dummy_get_flow

        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")
        with evaluator.run() as mon:
            for event in mon.track():
                if event.data is not None and event.data.get("status") in [
                    "Failed",
                    "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._ensemble.get_status() == "Failed"
Esempio n. 6
0
def test_prefect_reties(unused_tcp_port, coefficients, tmpdir,
                        function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 2
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens]
            for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")
        error_event_reals = []
        with evaluator.run() as mon:
            for event in mon.track():
                # Caputure the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    error_event_reals.append(event.data["reals"])
                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._snapshot.get_status() == "Stopped"
        successful_realizations = evaluator._snapshot.get_successful_realizations(
        )
        assert successful_realizations == config["realizations"]
        # Check we get only one job error message per realization
        assert len(error_event_reals) == config["realizations"]
        for idx, reals in enumerate(error_event_reals):
            assert len(reals) == 1
            assert str(idx) in reals
Esempio n. 7
0
def test_cancel_run_prefect_ensemble(unused_tcp_port, coefficients):
    with tmp(Path(SOURCE_DIR) / "test-data/local/prefect_test_case"):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                "realizations": 2,
                "executor": "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(config["storage"]["storage_path"])
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="2")

        with evaluator.run() as mon:
            cancel = True
            for _ in mon.track():
                if cancel:
                    mon.signal_cancel()
                    cancel = False

        assert evaluator._ensemble.get_status() == "Cancelled"
Esempio n. 8
0
def evaluate(workspace_root, evaluation_name, input_records, ensemble_config,
             stages_config):
    evaluation_tmp_dir = _create_evaluator_tmp_dir(workspace_root,
                                                   evaluation_name)

    config = EvaluatorServerConfig()
    ee_config = _build_ee_config(
        evaluation_tmp_dir,
        ensemble_config,
        stages_config,
        input_records,
        config.dispatch_uri,
    )
    ensemble = PrefectEnsemble(ee_config)

    ee = EnsembleEvaluator(ensemble=ensemble, config=config, iter_=0)
    result = _run(ee)
    responses = _prepare_responses(result)

    return responses
Esempio n. 9
0
def test_run_prefect_ensemble(unused_tcp_port, coefficients):
    test_path = Path(SOURCE_DIR) / "test-data/local/prefect_test_case"
    with tmp(test_path):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                "realizations": 2,
                "executor": "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["dispatch_uri"] = service_config.dispatch_uri
        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        with evaluator.run() as mon:
            for event in mon.track():
                if isinstance(event.data, dict) and event.data.get("status") in [
                    "Failed",
                    "Stopped",
                ]:
                    mon.signal_done()

        assert evaluator._ensemble.get_status() == "Stopped"
        successful_realizations = evaluator._ensemble.get_successful_realizations()
        assert successful_realizations == config["realizations"]
Esempio n. 10
0
def test_run_prefect_for_function_defined_outside_py_environment(
        unused_tcp_port, coefficients, tmpdir, function_config):
    with tmpdir.as_cwd():
        # Create temporary module that defines a function `bar`
        # 'bar' returns a call to different function 'internal_call' defined in the same python file
        module_path = Path(tmpdir) / "foo"
        module_path.mkdir()
        init_file = module_path / "__init__.py"
        init_file.touch()
        file_path = module_path / "bar.py"
        file_path.write_text(
            "def bar(coeffs):\n    return internal_call(coeffs)\n"
            "def internal_call(coeffs):\n    return [sum(coeffs.values())]\n")
        spec = importlib.util.spec_from_file_location("foo", str(file_path))
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        func = getattr(module, "bar")
        pickle_func = cloudpickle.dumps(func)
        init_file.unlink()
        file_path.unlink()

        # Check module is not in the python environment
        with pytest.raises(ModuleNotFoundError):
            import foo.bar

        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {iens: coeffs_trans[iens] for iens in range(2)}
        config["outputs"] = output_transmitters(config)

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["dispatch_uri"] = service_config.dispatch_uri
        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")
        with evaluator.run() as mon:
            for event in mon.track():
                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    results = mon.get_result()
                    mon.signal_done()
        assert evaluator._snapshot.get_status() == "Stopped"
        successful_realizations = evaluator._snapshot.get_successful_realizations(
        )
        assert successful_realizations == config["realizations"]
        expected_results = [
            pickle.loads(pickle_func)(coeffs) for coeffs in coefficients
        ]
        transmitter_futures = [
            res["function_output"].load() for res in results.values()
        ]
        results = asyncio.get_event_loop().run_until_complete(
            asyncio.gather(*transmitter_futures))
        assert expected_results == [res.data for res in results]