Esempio n. 1
0
def test_monitor_failing_evaluation(make_ee_config):
    ee_config = make_ee_config(use_token=False, generate_cert=False)
    ensemble = TestEnsemble(iter=1, reals=1, steps=1, jobs=1)
    ensemble.with_failure()
    ee = EnsembleEvaluator(
        ensemble,
        ee_config,
        0,
        ee_id="ee-0",
    )
    ee.run()
    with NarrativeProxy(
        monitor_failing_evaluation().on_uri(f"ws://localhost:{ee_config.port}")
    ).proxy(ee_config.url) as port:
        with ee_monitor.create("localhost", port, "ws", None, None) as monitor:
            for event in monitor.track():
                if event["type"] == identifiers.EVTYPE_EE_SNAPSHOT:
                    ensemble.start()
                if (
                    event.data
                    and event.data.get(identifiers.STATUS) == ENSEMBLE_STATE_FAILED
                ):
                    monitor.signal_done()

    ensemble.join()
Esempio n. 2
0
def test_verify_dispatch_failing_job(make_ee_config):
    ee_config = make_ee_config(use_token=False, generate_cert=False)
    mock_ensemble = MagicMock()
    mock_ensemble.snapshot.to_dict.return_value = {}
    ee = EnsembleEvaluator(
        mock_ensemble,
        ee_config,
        0,
        ee_id="0",
    )
    ee.run()
    dispatch_failing_job.verify(ee_config.client_uri, on_connect=lambda: None)
    ee.stop()
Esempio n. 3
0
def test_verify_monitor_failing_ensemble(make_ee_config):
    ee_config = make_ee_config(use_token=False, generate_cert=False)
    ensemble = TestEnsemble(iter=1, reals=2, steps=1, jobs=2)
    ensemble.addFailJob(real=1, step=0, job=1)
    ee = EnsembleEvaluator(
        ensemble,
        ee_config,
        0,
        ee_id="0",
    )
    ee.run()
    monitor_failing_ensemble.verify(ee_config.client_uri,
                                    on_connect=ensemble.start)
    ensemble.join()
Esempio n. 4
0
def test_verify_monitor_failing_evaluation(make_ee_config):
    ee_config = make_ee_config(use_token=False, generate_cert=False)
    ensemble = TestEnsemble(iter=1, reals=2, steps=1, jobs=2)
    ensemble.with_failure()
    ee = EnsembleEvaluator(
        ensemble,
        ee_config,
        0,
        ee_id="ee-0",
    )
    ee.run()
    monitor_failing_evaluation.verify(ee_config.client_uri,
                                      on_connect=ensemble.start)
    ensemble.join()
Esempio n. 5
0
def test_run_prefect_ensemble_with_path(unused_tcp_port):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update({"config_path": Path.cwd()})
        config.update({"realizations": 2})
        config.update({"executor": "local"})

        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(
            config["storage"]["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, ee_id="1")

        mon = evaluator.run()

        for event in mon.track():
            if event.data is not None and event.data.get("status") in [
                    "Failed",
                    "Stopped",
            ]:
                mon.signal_done()

        assert evaluator._snapshot.get_status() == "Stopped"

        successful_realizations = evaluator._snapshot.get_successful_realizations(
        )

        assert successful_realizations == config["realizations"]
Esempio n. 6
0
def test_run_prefect_ensemble_exception(unused_tcp_port):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update({"config_path": Path.absolute(Path("."))})
        config.update({"realizations": 2})
        config.update({"executor": "local"})

        service_config = EvaluatorServerConfig(unused_tcp_port)

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, ee_id="1")

        with patch.object(ensemble,
                          "_fetch_input_files",
                          side_effect=RuntimeError()):
            mon = evaluator.run()
            for event in mon.track():
                if event["type"] in (
                        ids.EVTYPE_EE_SNAPSHOT_UPDATE,
                        ids.EVTYPE_EE_SNAPSHOT,
                ) and event.data.get("status") in [
                        "Stopped",
                        "Failed",
                ]:
                    mon.signal_done()
            assert evaluator._snapshot.get_status() == "Failed"
Esempio n. 7
0
def _run(
    ensemble_evaluator: EnsembleEvaluator,
) -> Dict[int, Dict[str, ert.data.RecordTransmitter]]:
    result: Dict[int, Dict[str, ert.data.RecordTransmitter]] = {}
    with ensemble_evaluator.run() as monitor:
        realization_ids = set()
        realizations_completed = set()
        for event in monitor.track():
            if isinstance(event.data, dict) and event.data.get("status") in [
                    ENSEMBLE_STATE_STOPPED,
                    ENSEMBLE_STATE_FAILED,
            ]:
                monitor.signal_done()
                if event.data.get("status") == ENSEMBLE_STATE_FAILED:
                    raise RuntimeError("Ensemble evaluation failed")
            if event["type"] == EVTYPE_EE_TERMINATED and isinstance(
                    event.data, bytes):
                result = pickle.loads(event.data)
            if isinstance(event.data, dict) and "reals" in event.data:
                for real_id in event.data["reals"]:
                    realization_ids.add(real_id)
                    real_status = event.data["reals"][real_id].get("status")
                    if (real_status == REALIZATION_STATE_FINISHED
                            and real_id not in realizations_completed):
                        realizations_completed.add(real_id)
                        print(
                            f"Realization {real_id} completed successfully"
                            f" ({len(realizations_completed)}/{len(realization_ids)})"
                        )

    return result
Esempio n. 8
0
def test_run_prefect_for_function_defined_outside_py_environment(
    evaluator_config,
    coefficients,
    function_ensemble_builder_factory,
    ensemble_size,
    external_sum_function,
):
    """Ensemble built from outside env. Assert state, realizations and result"""
    # Build ensemble and run on server
    ensemble = (function_ensemble_builder_factory(
        external_sum_function).set_retry_delay(1).set_max_retries(0).build())
    evaluator = EnsembleEvaluator(ensemble, evaluator_config, 0, ee_id="1")
    with evaluator.run() as mon:
        for event in mon.track():
            if event["type"] == ids.EVTYPE_EE_TERMINATED:
                results = pickle.loads(event.data)
            wait_until_done(mon, event)
    assert evaluator._ensemble.status == state.ENSEMBLE_STATE_STOPPED
    successful_realizations = evaluator._ensemble.get_successful_realizations()
    assert successful_realizations == ensemble_size
    expected_results = [
        pickle.loads(external_sum_function)(coeffs)["function_output"]
        for coeffs in coefficients
    ]
    transmitter_futures = [
        res["function_output"].load() for res in results.values()
    ]
    results = get_event_loop().run_until_complete(
        asyncio.gather(*transmitter_futures))
    assert expected_results == [res.data for res in results]
Esempio n. 9
0
def test_prefect_no_retries(evaluator_config,
                            function_ensemble_builder_factory, tmpdir):
    """Evaluator tries and fails once. Asserts if job and step fails"""
    cloudpickle.register_pickle_by_value(sys.modules[__name__])
    pickle_func = cloudpickle.dumps(function_that_fails_once)
    cloudpickle.unregister_pickle_by_value(sys.modules[__name__])

    ensemble = (function_ensemble_builder_factory(pickle_func).set_retry_delay(
        1).set_max_retries(0).build())
    evaluator = EnsembleEvaluator(ensemble, evaluator_config, 0, ee_id="1")
    with tmpdir.as_cwd():
        # Get events
        event_list = []
        with evaluator.run() as mon:
            for event in mon.track():
                event_list.append(event)
                wait_until_done(mon, event)
        # Find if job and step failed
        step_failed = False
        job_failed = False
        for real in ensemble.snapshot.reals.values():
            for step in real.steps.values():
                for job in step.jobs.values():
                    if job.status == state.JOB_STATE_FAILURE:
                        job_failed = True
                        assert job.error == "This is an expected ERROR"
                        if step.status == state.STEP_STATE_FAILURE:
                            step_failed = True
        assert ensemble.status == state.ENSEMBLE_STATE_FAILED
        assert job_failed, f"Events: {event_list}"
        assert step_failed, f"Events: {event_list}"
Esempio n. 10
0
def test_prefect_retries(evaluator_config, function_ensemble_builder_factory,
                         tmpdir, ensemble_size):
    """Evaluator fails once through pickled-fail-function. Asserts fail and retries"""
    cloudpickle.register_pickle_by_value(sys.modules[__name__])
    pickle_func = cloudpickle.dumps(function_that_fails_once)
    cloudpickle.unregister_pickle_by_value(sys.modules[__name__])

    ensemble = function_ensemble_builder_factory(pickle_func).set_retry_delay(
        2).build()
    evaluator = EnsembleEvaluator(ensemble, evaluator_config, 0, ee_id="1")
    with tmpdir.as_cwd():
        error_event_reals: Set[str] = set()
        with evaluator.run() as mon:
            # close_events_in_ensemble_run(monitor=mon) # more strict as above
            for event in mon.track():
                # Capture the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    error_event_reals.update(event.data["reals"].keys())
                wait_until_done(mon, event)
        assert evaluator._ensemble.status == state.ENSEMBLE_STATE_STOPPED
        successful_realizations = evaluator._ensemble.get_successful_realizations(
        )
        assert successful_realizations == ensemble_size
        # Check we get only one job error message per realization
        assert len(error_event_reals) == ensemble_size
        assert "0" in error_event_reals
        assert "1" in error_event_reals
Esempio n. 11
0
def test_run_legacy_ensemble_exception(tmpdir, make_ensemble_builder):
    num_reals = 2
    custom_port_range = range(1024, 65535)
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(custom_port_range=custom_port_range,
                                       custom_host="127.0.0.1")
        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with patch.object(ensemble._job_queue,
                          "submit_complete") as faulty_queue:
            faulty_queue.side_effect = RuntimeError()
            with evaluator.run() as monitor:
                for e in monitor.track():
                    if e.data is not None and e.data.get(
                            identifiers.STATUS) in [
                                state.ENSEMBLE_STATE_FAILED,
                                state.ENSEMBLE_STATE_STOPPED,
                            ]:
                        monitor.signal_done()
            assert evaluator._ensemble.status == state.ENSEMBLE_STATE_FAILED

        # realisations should not finish, thus not creating a status-file
        for i in range(num_reals):
            assert not os.path.isfile(f"real_{i}/status.txt")
Esempio n. 12
0
def test_verify_monitor_successful_ensemble(make_ee_config, event_loop):
    ensemble = TestEnsemble(iter=1, reals=2, steps=2, jobs=2).with_result(
        b"\x80\x04\x95\x0f\x00\x00\x00\x00\x00\x00\x00\x8c\x0bhello world\x94.",
        "application/octet-stream",
    )
    ee_config = make_ee_config(use_token=False, generate_cert=False)
    ee = EnsembleEvaluator(
        ensemble,
        ee_config,
        0,
        ee_id="ee-0",
    )
    ee.run()
    event_loop.run_until_complete(wait_for_evaluator(ee_config.url))
    monitor_successful_ensemble().verify(ee_config.client_uri,
                                         on_connect=ensemble.start)
    ensemble.join()
Esempio n. 13
0
def test_prefect_no_retries(unused_tcp_port, coefficients, tmpdir,
                            function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 0
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens]
            for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        step_failed = False
        job_failed = False
        with evaluator.run() as mon:
            for event in mon.track():
                # Capture the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    for real in event.data["reals"].values():
                        for step in real["steps"].values():
                            for job in step["jobs"].values():
                                if job["status"] == "Failed":
                                    job_failed = True
                                    if step["status"] == "Failed":
                                        step_failed = True

                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._ensemble.get_status() == "Failed"
        assert job_failed
        assert step_failed
Esempio n. 14
0
def test_prefect_no_retries(unused_tcp_port, coefficients, tmpdir, function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 0
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens] for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        event_list = []
        with evaluator.run() as mon:
            for event in mon.track():
                event_list.append(event)
                if event.data is not None and event.data.get("status") in [
                    state.ENSEMBLE_STATE_FAILED,
                    state.ENSEMBLE_STATE_STOPPED,
                ]:
                    mon.signal_done()

        step_failed = False
        job_failed = False
        for real in ensemble.snapshot.get_reals().values():
            for step in real.steps.values():
                for job in step.jobs.values():
                    if job.status == state.JOB_STATE_FAILURE:
                        job_failed = True
                        assert job.error == "This is an expected ERROR"
                        if step.status == state.STEP_STATE_FAILURE:
                            step_failed = True

        assert ensemble.get_status() == state.ENSEMBLE_STATE_FAILED
        assert job_failed, f"Events: {event_list}"
        assert step_failed, f"Events: {event_list}"
Esempio n. 15
0
def test_run_prefect_ensemble_exception(evaluator_config, poly_ensemble):
    """Test prefect on flow with runtime-error"""
    poly_ensemble.get_flow = dummy_get_flow
    evaluator = EnsembleEvaluator(poly_ensemble,
                                  evaluator_config,
                                  0,
                                  ee_id="1")
    with evaluator.run() as mon:
        for event in mon.track():
            wait_until_done(mon, event)
    assert evaluator._ensemble.status == state.ENSEMBLE_STATE_FAILED
Esempio n. 16
0
def test_run_prefect_ensemble(evaluator_config, poly_ensemble, ensemble_size):
    """Test successful realizations from prefect-run equals ensemble-size"""
    evaluator = EnsembleEvaluator(poly_ensemble,
                                  evaluator_config,
                                  0,
                                  ee_id="1")
    with evaluator.run() as mon:
        for event in mon.track():
            wait_until_done(mon, event)
    assert evaluator._ensemble.status == state.ENSEMBLE_STATE_STOPPED
    successful_realizations = evaluator._ensemble.get_successful_realizations()
    assert successful_realizations == ensemble_size
Esempio n. 17
0
def test_prefect_reties(unused_tcp_port, coefficients, tmpdir,
                        function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 2
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens]
            for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")
        error_event_reals = []
        with evaluator.run() as mon:
            for event in mon.track():
                # Caputure the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    error_event_reals.append(event.data["reals"])
                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._snapshot.get_status() == "Stopped"
        successful_realizations = evaluator._snapshot.get_successful_realizations(
        )
        assert successful_realizations == config["realizations"]
        # Check we get only one job error message per realization
        assert len(error_event_reals) == config["realizations"]
        for idx, reals in enumerate(error_event_reals):
            assert len(reals) == 1
            assert str(idx) in reals
Esempio n. 18
0
def test_monitor_successful_ensemble(make_ee_config):
    ensemble = TestEnsemble(iter=1, reals=2, steps=2, jobs=2)
    ensemble.addFailJob(real=1, step=0, job=1)
    ee_config = make_ee_config(use_token=False, generate_cert=False)
    ee = EnsembleEvaluator(
        ensemble,
        ee_config,
        0,
        ee_id="ee-0",
    )

    ee.run()
    with NarrativeProxy(monitor_successful_ensemble()).proxy(ee_config.url):
        with ee_monitor.create(ee_config.get_connection_info()) as monitor:
            for event in monitor.track():
                if event["type"] == identifiers.EVTYPE_EE_SNAPSHOT:
                    ensemble.start()
                if (event.data and event.data.get(identifiers.STATUS)
                        == ENSEMBLE_STATE_STOPPED):
                    monitor.signal_done()

    ensemble.join()
Esempio n. 19
0
def test_cancel_run_prefect_ensemble(evaluator_config, poly_ensemble):
    """Test cancellation of prefect-run"""
    evaluator = EnsembleEvaluator(poly_ensemble,
                                  evaluator_config,
                                  0,
                                  ee_id="1")
    with evaluator.run() as mon:
        cancel = True
        for _ in mon.track():
            if cancel:
                mon.signal_cancel()
                cancel = False
    assert evaluator._ensemble.status == state.ENSEMBLE_STATE_CANCELLED
Esempio n. 20
0
def _run(
    ensemble_evaluator: EnsembleEvaluator,
) -> Dict[int, Dict[str, RecordTransmitter]]:
    result = {}
    with ensemble_evaluator.run() as monitor:
        for event in monitor.track():
            if event.data is not None and event.data.get("status") in [
                    _EVTYPE_SNAPSHOT_STOPPED,
                    _EVTYPE_SNAPSHOT_FAILED,
            ]:
                if event.data.get("status") == _EVTYPE_SNAPSHOT_STOPPED:
                    result = monitor.get_result()
                monitor.signal_done()
    return result
Esempio n. 21
0
def test_run_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")
        monitor = evaluator.run()
        for e in monitor.track():
            if (e["type"] in (
                    identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                    identifiers.EVTYPE_EE_SNAPSHOT,
            ) and e.data.get("status") in ["Failed", "Stopped"]):
                monitor.signal_done()
        assert evaluator._snapshot.get_status() == "Stopped"
        assert evaluator.get_successful_realizations() == num_reals
Esempio n. 22
0
def _run(
    ensemble_evaluator: EnsembleEvaluator,
) -> Dict[int, Dict[str, RecordTransmitter]]:
    result = {}
    with ensemble_evaluator.run() as monitor:
        for event in monitor.track():
            if isinstance(event.data, dict) and event.data.get("status") in [
                _EVTYPE_SNAPSHOT_STOPPED,
                _EVTYPE_SNAPSHOT_FAILED,
            ]:
                monitor.signal_done()
            if event["type"] == EVTYPE_EE_TERMINATED and isinstance(event.data, bytes):
                result = pickle.loads(event.data)

    return result
Esempio n. 23
0
def test_run_legacy_ensemble_exception(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with patch.object(ensemble, "get_active_reals", side_effect=RuntimeError()):
            with evaluator.run() as monitor:
                for e in monitor.track():
                    if e.data is not None and e.data.get(identifiers.STATUS) in [
                        state.ENSEMBLE_STATE_FAILED,
                        state.ENSEMBLE_STATE_STOPPED,
                    ]:
                        monitor.signal_done()
            assert evaluator._ensemble.get_status() == state.ENSEMBLE_STATE_FAILED
Esempio n. 24
0
def test_run_and_cancel_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 10
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)

        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with evaluator.run() as mon:
            cancel = True
            for _ in mon.track():
                if cancel:
                    mon.signal_cancel()
                    cancel = False

        assert evaluator._ensemble.get_status() == state.ENSEMBLE_STATE_CANCELLED
Esempio n. 25
0
def test_run_and_cancel_legacy_ensemble(tmpdir, unused_tcp_port,
                                        make_ensemble_builder):
    num_reals = 10
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)

        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")

        mon = evaluator.run()
        cancel = True
        for _ in mon.track():
            if cancel:
                mon.signal_cancel()
                cancel = False

        assert evaluator._snapshot.get_status() == "Cancelled"
Esempio n. 26
0
def test_run_prefect_ensemble_with_path(unused_tcp_port, coefficients):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                "realizations": 2,
                "executor": "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(config["storage"]["storage_path"])
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        with evaluator.run() as mon:
            for event in mon.track():
                if isinstance(event.data, dict) and event.data.get("status") in [
                    "Failed",
                    "Stopped",
                ]:
                    mon.signal_done()

        assert evaluator._ensemble.get_status() == "Stopped"
        successful_realizations = evaluator._ensemble.get_successful_realizations()
        assert successful_realizations == config["realizations"]
Esempio n. 27
0
def test_run_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")
        with evaluator.run() as monitor:
            for e in monitor.track():
                if (e["type"] in (
                        identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                        identifiers.EVTYPE_EE_SNAPSHOT,
                ) and e.data.get(identifiers.STATUS) in [
                        state.ENSEMBLE_STATE_FAILED,
                        state.ENSEMBLE_STATE_STOPPED
                ]):
                    monitor.signal_done()
        assert evaluator._ensemble.get_status() == state.ENSEMBLE_STATE_STOPPED
        assert evaluator._ensemble.get_successful_realizations() == num_reals
Esempio n. 28
0
def test_run_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    conf_file = Path(tmpdir / CONFIG_FILE)
    with tmpdir.as_cwd():
        with open(conf_file, "w") as f:
            f.write(f'port: "{unused_tcp_port}"\n')

        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = load_config(conf_file)
        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")
        monitor = evaluator.run()
        for e in monitor.track():
            if (e["type"] in (
                    identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                    identifiers.EVTYPE_EE_SNAPSHOT,
            ) and e.data.get("status") == "Stopped"):
                monitor.signal_done()
        assert evaluator.get_successful_realizations() == num_reals
Esempio n. 29
0
def test_run_legacy_ensemble_exception(tmpdir, unused_tcp_port,
                                       make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")

        with patch.object(ensemble,
                          "_run_path_list",
                          side_effect=RuntimeError()):
            monitor = evaluator.run()
            for e in monitor.track():
                if (e["type"] in (
                        identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                        identifiers.EVTYPE_EE_SNAPSHOT,
                ) and e.data.get("status") in ["Failed", "Stopped"]):
                    monitor.signal_done()
            assert evaluator._snapshot.get_status() == "Failed"