Beispiel #1
0
    def startSimulation(self):
        self._run_model.reset()
        self._snapshot_model.reset()
        self._tab_widget.clear()

        evaluator_server_config = EvaluatorServerConfig()

        def run():
            asyncio.set_event_loop(asyncio.new_event_loop())
            self._run_model.startSimulations(
                evaluator_server_config=evaluator_server_config, )

        simulation_thread = Thread(name="ert_gui_simulation_thread")
        simulation_thread.setDaemon(True)
        simulation_thread.run = run
        simulation_thread.start()

        self._ticker.start(1000)

        tracker = EvaluatorTracker(
            self._run_model,
            ee_con_info=evaluator_server_config.get_connection_info(),
        )

        worker = TrackerWorker(tracker)
        worker_thread = QThread()
        worker.done.connect(worker_thread.quit)
        worker.consumed_event.connect(self._on_tracker_event)
        worker.moveToThread(worker_thread)
        self.simulation_done.connect(worker.stop)
        self._worker = worker
        self._worker_thread = worker_thread
        worker_thread.started.connect(worker.consume_and_emit)
        self._worker_thread.start()
Beispiel #2
0
def test_secure_echo(ws):
    custom_port_range = range(1024, 65535)
    config = EvaluatorServerConfig(custom_port_range=custom_port_range)

    async def handler(websocket, path):
        msg = await websocket.recv()
        await websocket.send(msg)

    ws(
        config.host,
        config.port,
        handler,
        ssl=config.get_server_ssl_context(),
        sock=config.get_socket(),
    )
    with ExitStack() as stack:
        duplexer = SyncWebsocketDuplexer(
            f"wss://{config.host}:{config.port}",
            f"wss://{config.host}:{config.port}",
            cert=config.cert,
            token=None,
        )
        stack.callback(duplexer.stop)
        duplexer.send("Hello Secure World")
        assert next(duplexer.receive()) == "Hello Secure World"
Beispiel #3
0
def test_run_legacy_ensemble_exception(tmpdir, make_ensemble_builder):
    num_reals = 2
    custom_port_range = range(1024, 65535)
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(custom_port_range=custom_port_range,
                                       custom_host="127.0.0.1")
        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with patch.object(ensemble._job_queue,
                          "submit_complete") as faulty_queue:
            faulty_queue.side_effect = RuntimeError()
            with evaluator.run() as monitor:
                for e in monitor.track():
                    if e.data is not None and e.data.get(
                            identifiers.STATUS) in [
                                state.ENSEMBLE_STATE_FAILED,
                                state.ENSEMBLE_STATE_STOPPED,
                            ]:
                        monitor.signal_done()
            assert evaluator._ensemble.status == state.ENSEMBLE_STATE_FAILED

        # realisations should not finish, thus not creating a status-file
        for i in range(num_reals):
            assert not os.path.isfile(f"real_{i}/status.txt")
Beispiel #4
0
def evaluate(
    workspace_root: Path,
    evaluation_name: str,
    input_records: MultiEnsembleRecord,
    ensemble_config: EnsembleConfig,
    stages_config: StagesConfig,
) -> MultiEnsembleRecord:
    evaluation_tmp_dir = _create_evaluator_tmp_dir(workspace_root,
                                                   evaluation_name)

    config = EvaluatorServerConfig()
    ee_config = _build_ee_config(
        evaluation_tmp_dir,
        ensemble_config,
        stages_config,
        input_records,
        config.dispatch_uri,
    )
    ensemble = PrefectEnsemble(ee_config)  # type: ignore

    ee = EnsembleEvaluator(ensemble=ensemble, config=config, iter_=0)
    result = _run(ee)
    output_records = _prepare_output_records(result)

    return output_records
def test_run_prefect_ensemble_with_path(unused_tcp_port):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update({"config_path": Path.cwd()})
        config.update({"realizations": 2})
        config.update({"executor": "local"})

        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(
            config["storage"]["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, ee_id="1")

        mon = evaluator.run()

        for event in mon.track():
            if event.data is not None and event.data.get("status") in [
                    "Failed",
                    "Stopped",
            ]:
                mon.signal_done()

        assert evaluator._snapshot.get_status() == "Stopped"

        successful_realizations = evaluator._snapshot.get_successful_realizations(
        )

        assert successful_realizations == config["realizations"]
def test_run_prefect_ensemble_exception(unused_tcp_port):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update({"config_path": Path.absolute(Path("."))})
        config.update({"realizations": 2})
        config.update({"executor": "local"})

        service_config = EvaluatorServerConfig(unused_tcp_port)

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, ee_id="1")

        with patch.object(ensemble,
                          "_fetch_input_files",
                          side_effect=RuntimeError()):
            mon = evaluator.run()
            for event in mon.track():
                if event["type"] in (
                        ids.EVTYPE_EE_SNAPSHOT_UPDATE,
                        ids.EVTYPE_EE_SNAPSHOT,
                ) and event.data.get("status") in [
                        "Stopped",
                        "Failed",
                ]:
                    mon.signal_done()
            assert evaluator._snapshot.get_status() == "Failed"
Beispiel #7
0
def test_get_flow(coefficients, unused_tcp_port):
    with tmp(Path(SOURCE_DIR) / "test-data/local/prefect_test_case"):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                ids.REALIZATIONS: 2,
                ids.EXECUTOR: "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )
        server_config = EvaluatorServerConfig(unused_tcp_port)
        for permuted_steps in permutations(config["steps"]):
            permuted_config = copy.deepcopy(config)
            permuted_config["steps"] = permuted_steps
            permuted_config["dispatch_uri"] = server_config.dispatch_uri
            ensemble = PrefectEnsemble(permuted_config)

            for iens in range(2):
                with prefect.context(
                    url=server_config.url,
                    token=server_config.token,
                    cert=server_config.cert,
                ):
                    flow = ensemble.get_flow(ensemble._ee_id, [iens])

                # Get the ordered tasks and retrieve their step ids.
                flow_steps = [
                    task.get_step()
                    for task in flow.sorted_tasks()
                    if isinstance(task, UnixTask)
                ]
                assert len(flow_steps) == 4

                realization_steps = list(
                    ensemble.get_reals()[iens].get_steps_sorted_topologically()
                )

                # Testing realization steps
                for step_ordering in [realization_steps, flow_steps]:
                    mapping = {
                        step._name: idx for idx, step in enumerate(step_ordering)
                    }
                    assert mapping["second_degree"] < mapping["zero_degree"]
                    assert mapping["zero_degree"] < mapping["add_coeffs"]
                    assert mapping["first_degree"] < mapping["add_coeffs"]
                    assert mapping["second_degree"] < mapping["add_coeffs"]
def test_tracking_progress(
    run_model: Union[BaseRunModel, ERT3RunModel],
    monitor_events: List[CloudEvent],
    brm_mutations: List[Tuple[str, Any]],
    expected_progress: float,
    make_mock_ee_monitor,
):
    """Tests progress by providing a list of CloudEvent and a list of
    arguments to apply to setattr(brm) where brm is an actual BaseRunModel
    instance.

    The CloudEvent are provided to the tracker via mocking an Ensemble
    Evaluator Monitor.

    PartialSnapshots allow realizations to progress, while iterating "iter" in
    CloudEvents allows phases to progress. Such progress should happen
    when events are yielded by the tracker. This combined progress is tested.

    The final update event and end event is also tested."""

    if issubclass(run_model, ERT3RunModel):
        brm = run_model()
    else:
        brm = run_model(None, None, None)
    ee_config = EvaluatorServerConfig(
        custom_port_range=range(1024, 65535), custom_host="127.0.0.1"
    )
    with patch(
        "ert.ensemble_evaluator.tracker.evaluator_tracker.create_ee_monitor"
    ) as mock_ee:
        mock_ee.return_value.__enter__.return_value = make_mock_ee_monitor(
            monitor_events.copy()
        )
        tracker = EvaluatorTracker(
            brm, ee_config.get_connection_info(), next_ensemble_evaluator_wait_time=0.1
        )
        for attr, val in brm_mutations:
            setattr(brm, attr, val)
        tracker_gen = tracker.track()
        update_event = None
        for i in range(len(monitor_events)):
            update_event = next(tracker_gen)
            assert update_event.progress == expected_progress[i]
        assert isinstance(update_event, SnapshotUpdateEvent)
        brm._phase = brm._phase_count
        assert isinstance(next(tracker_gen), EndEvent)
Beispiel #9
0
def run_cli(args):
    logging.basicConfig(level=logging.INFO, format="%(message)s")
    res_config = ResConfig(args.config)
    os.chdir(res_config.config_path)
    ert = EnKFMain(res_config, strict=True, verbose=args.verbose)
    notifier = ErtCliNotifier(ert, args.config)
    ERT.adapt(notifier)

    if args.mode == WORKFLOW_MODE:
        execute_workflow(args.name)
        return

    model, argument = create_model(args)
    # Test run does not have a current_case
    if "current_case" in args and args.current_case:
        ERT.enkf_facade.select_or_create_new_case(args.current_case)

    if (
        args.mode
        in [ENSEMBLE_SMOOTHER_MODE, ITERATIVE_ENSEMBLE_SMOOTHER_MODE, ES_MDA_MODE]
        and args.target_case == ERT.enkf_facade.get_current_case_name()
    ):
        msg = (
            "ERROR: Target file system and source file system can not be the same. "
            "They were both: {}.".format(args.target_case)
        )
        _clear_and_exit(msg)

    ee_config = None
    if FeatureToggling.is_enabled("ensemble-evaluator"):
        ee_config = EvaluatorServerConfig()
        argument.update({"ee_config": ee_config})

    thread = threading.Thread(
        name="ert_cli_simulation_thread",
        target=model.startSimulations,
        args=(argument,),
    )
    thread.start()

    tracker = create_tracker(model, detailed_interval=0, ee_config=ee_config)

    out = open(os.devnull, "w") if args.disable_monitoring else sys.stdout
    monitor = Monitor(out=out, color_always=args.color_always)

    try:
        monitor.monitor(tracker)
    except (SystemExit, KeyboardInterrupt):
        print("\nKilling simulations...")
        tracker.request_termination()

    if args.disable_monitoring:
        out.close()

    thread.join()

    if model.hasRunFailed():
        _clear_and_exit(1)  # the monitor has already reported the error message
Beispiel #10
0
    def __init__(self, config_file):
        QWidget.__init__(self)
        self._config_file = config_file
        self._ee_config = None
        if FeatureToggling.is_enabled("ensemble-evaluator"):
            self._ee_config = EvaluatorServerConfig()

        self.setObjectName("Simulation_panel")
        layout = QVBoxLayout()

        self._simulation_mode_combo = QComboBox()
        self._simulation_mode_combo.setObjectName("Simulation_mode")
        addHelpToWidget(self._simulation_mode_combo, "run/simulation_mode")

        self._simulation_mode_combo.currentIndexChanged.connect(
            self.toggleSimulationMode)

        simulation_mode_layout = QHBoxLayout()
        simulation_mode_layout.addSpacing(10)
        simulation_mode_layout.addWidget(QLabel("Simulation mode:"), 0,
                                         Qt.AlignVCenter)
        simulation_mode_layout.addWidget(self._simulation_mode_combo, 0,
                                         Qt.AlignVCenter)

        simulation_mode_layout.addSpacing(20)

        self.run_button = QToolButton()
        self.run_button.setObjectName("start_simulation")
        self.run_button.setIconSize(QSize(32, 32))
        self.run_button.setText("Start Simulation")
        self.run_button.setIcon(resourceIcon("ide/gear_in_play"))
        self.run_button.clicked.connect(self.runSimulation)
        self.run_button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
        addHelpToWidget(self.run_button, "run/start_simulation")

        simulation_mode_layout.addWidget(self.run_button)
        simulation_mode_layout.addStretch(1)

        layout.addSpacing(5)
        layout.addLayout(simulation_mode_layout)
        layout.addSpacing(10)

        self._simulation_stack = QStackedWidget()
        self._simulation_stack.setLineWidth(1)
        self._simulation_stack.setFrameStyle(QFrame.StyledPanel)

        layout.addWidget(self._simulation_stack)

        self._simulation_widgets = OrderedDict()
        """ :type: OrderedDict[BaseRunModel,SimulationConfigPanel]"""
        self.addSimulationConfigPanel(SingleTestRunPanel())
        self.addSimulationConfigPanel(EnsembleExperimentPanel())
        if ERT.ert.have_observations():
            self.addSimulationConfigPanel(EnsembleSmootherPanel())
            self.addSimulationConfigPanel(MultipleDataAssimilationPanel())
            self.addSimulationConfigPanel(IteratedEnsembleSmootherPanel())

        self.setLayout(layout)
Beispiel #11
0
def test_prefect_no_retries(unused_tcp_port, coefficients, tmpdir,
                            function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 0
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens]
            for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        step_failed = False
        job_failed = False
        with evaluator.run() as mon:
            for event in mon.track():
                # Capture the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    for real in event.data["reals"].values():
                        for step in real["steps"].values():
                            for job in step["jobs"].values():
                                if job["status"] == "Failed":
                                    job_failed = True
                                    if step["status"] == "Failed":
                                        step_failed = True

                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._ensemble.get_status() == "Failed"
        assert job_failed
        assert step_failed
Beispiel #12
0
def test_prefect_no_retries(unused_tcp_port, coefficients, tmpdir, function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 0
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens] for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        event_list = []
        with evaluator.run() as mon:
            for event in mon.track():
                event_list.append(event)
                if event.data is not None and event.data.get("status") in [
                    state.ENSEMBLE_STATE_FAILED,
                    state.ENSEMBLE_STATE_STOPPED,
                ]:
                    mon.signal_done()

        step_failed = False
        job_failed = False
        for real in ensemble.snapshot.get_reals().values():
            for step in real.steps.values():
                for job in step.jobs.values():
                    if job.status == state.JOB_STATE_FAILURE:
                        job_failed = True
                        assert job.error == "This is an expected ERROR"
                        if step.status == state.STEP_STATE_FAILURE:
                            step_failed = True

        assert ensemble.get_status() == state.ENSEMBLE_STATE_FAILED
        assert job_failed, f"Events: {event_list}"
        assert step_failed, f"Events: {event_list}"
Beispiel #13
0
    def evaluate(self, config: EvaluatorServerConfig, ee_id: str):
        self._ee_id = ee_id
        self._ee_config = config.get_info()

        # everything in self will be pickled since we bind a member function in target
        ctx = self._get_multiprocessing_context()
        eval_proc = ctx.Process(target=self._evaluate)
        eval_proc.daemon = True
        eval_proc.start()
        self._eval_proc = eval_proc
        self._allow_cancel.set()
Beispiel #14
0
def evaluate(
    ensemble: ert.ensemble_evaluator.Ensemble,
    custom_port_range: Optional[range] = None,
) -> Dict[int, Dict[str, ert.data.RecordTransmitter]]:
    config = EvaluatorServerConfig(custom_port_range=custom_port_range)

    run_model = ERT3RunModel()
    tracker = ert.ensemble_evaluator.EvaluatorTracker(
        run_model,
        config.get_connection_info(),
    )
    monitor = Monitor(out=sys.stderr, color_always=False)  # type: ignore

    ee = EnsembleEvaluator(ensemble=ensemble, config=config, iter_=0)

    executor = futures.ThreadPoolExecutor()
    future = executor.submit(_run, ee, run_model)
    monitor.monitor(tracker)  # type: ignore
    result: Dict[int, Dict[str, ert.data.RecordTransmitter]] = future.result()
    return result
Beispiel #15
0
def test_prefect_reties(unused_tcp_port, coefficients, tmpdir,
                        function_config):
    def function_that_fails_once(coeffs):
        run_path = Path("ran_once")
        if not run_path.exists():
            run_path.touch()
            raise RuntimeError("This is an expected ERROR")
        run_path.unlink()
        return []

    with tmpdir.as_cwd():
        pickle_func = cloudpickle.dumps(function_that_fails_once)
        config = function_config
        coeffs_trans = coefficient_transmitters(
            coefficients,
            config.get(ids.STORAGE)["storage_path"])

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["realizations"] = len(coefficients)
        config["executor"] = "local"
        config["max_retries"] = 2
        config["retry_delay"] = 1
        config["steps"][0]["jobs"][0]["executable"] = pickle_func
        config["inputs"] = {
            iens: coeffs_trans[iens]
            for iens in range(len(coefficients))
        }
        config["outputs"] = output_transmitters(config)
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)
        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")
        error_event_reals = []
        with evaluator.run() as mon:
            for event in mon.track():
                # Caputure the job error messages
                if event.data is not None and "This is an expected ERROR" in str(
                        event.data):
                    error_event_reals.append(event.data["reals"])
                if event.data is not None and event.data.get("status") in [
                        "Failed",
                        "Stopped",
                ]:
                    mon.signal_done()
        assert evaluator._snapshot.get_status() == "Stopped"
        successful_realizations = evaluator._snapshot.get_successful_realizations(
        )
        assert successful_realizations == config["realizations"]
        # Check we get only one job error message per realization
        assert len(error_event_reals) == config["realizations"]
        for idx, reals in enumerate(error_event_reals):
            assert len(reals) == 1
            assert str(idx) in reals
Beispiel #16
0
def test_load_config(unused_tcp_port):
    serv_config = EvaluatorServerConfig(unused_tcp_port)
    expected_host = _get_ip_address()
    expected_port = unused_tcp_port
    expected_url = f"wss://{expected_host}:{expected_port}"
    expected_client_uri = f"{expected_url}/client"
    expected_dispatch_uri = f"{expected_url}/dispatch"

    assert serv_config.host == expected_host
    assert serv_config.port == expected_port
    assert serv_config.url == expected_url
    assert serv_config.client_uri == expected_client_uri
    assert serv_config.dispatch_uri == expected_dispatch_uri
    assert serv_config.token is not None
    assert serv_config.cert is not None
    sock = serv_config.get_socket()
    assert sock is not None
    assert not sock._closed
    sock.close()

    ee_config = EvaluatorServerConfig()
    assert ee_config.port in range(51820, 51840)
    sock = ee_config.get_socket()
    assert sock is not None
    assert not sock._closed
    sock.close()
Beispiel #17
0
def test_load_config(unused_tcp_port):
    fixed_port = range(unused_tcp_port, unused_tcp_port)
    serv_config = EvaluatorServerConfig(custom_port_range=fixed_port)
    expected_host = port_handler._get_ip_address()
    expected_port = unused_tcp_port
    expected_url = f"wss://{expected_host}:{expected_port}"
    expected_client_uri = f"{expected_url}/client"
    expected_dispatch_uri = f"{expected_url}/dispatch"

    assert serv_config.host == expected_host
    assert serv_config.port == expected_port
    assert serv_config.url == expected_url
    assert serv_config.client_uri == expected_client_uri
    assert serv_config.dispatch_uri == expected_dispatch_uri
    assert serv_config.token is not None
    assert serv_config.cert is not None
    sock = serv_config.get_socket()
    assert sock is not None
    assert not sock._closed
    sock.close()

    ee_config = EvaluatorServerConfig(custom_port_range=range(1024, 65535))
    sock = ee_config.get_socket()
    assert sock is not None
    assert not sock._closed
    sock.close()
Beispiel #18
0
def test_run_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")
        monitor = evaluator.run()
        for e in monitor.track():
            if (e["type"] in (
                    identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                    identifiers.EVTYPE_EE_SNAPSHOT,
            ) and e.data.get("status") in ["Failed", "Stopped"]):
                monitor.signal_done()
        assert evaluator._snapshot.get_status() == "Stopped"
        assert evaluator.get_successful_realizations() == num_reals
Beispiel #19
0
def evaluate(workspace_root, evaluation_name, input_records, ensemble_config,
             stages_config):
    evaluation_tmp_dir = _create_evaluator_tmp_dir(workspace_root,
                                                   evaluation_name)

    ee_config = _build_ee_config(evaluation_tmp_dir, ensemble_config,
                                 stages_config, input_records)
    ensemble = PrefectEnsemble(ee_config)

    config = EvaluatorServerConfig()
    ee = EnsembleEvaluator(ensemble=ensemble, config=config)
    _run(ee)

    results = _fetch_results(ee_config, ensemble_config, stages_config)
    return results
Beispiel #20
0
def test_run_legacy_ensemble_exception(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with patch.object(ensemble, "get_active_reals", side_effect=RuntimeError()):
            with evaluator.run() as monitor:
                for e in monitor.track():
                    if e.data is not None and e.data.get(identifiers.STATUS) in [
                        state.ENSEMBLE_STATE_FAILED,
                        state.ENSEMBLE_STATE_STOPPED,
                    ]:
                        monitor.signal_done()
            assert evaluator._ensemble.get_status() == state.ENSEMBLE_STATE_FAILED
Beispiel #21
0
def test_run_and_cancel_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 10
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)

        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with evaluator.run() as mon:
            cancel = True
            for _ in mon.track():
                if cancel:
                    mon.signal_cancel()
                    cancel = False

        assert evaluator._ensemble.get_status() == state.ENSEMBLE_STATE_CANCELLED
Beispiel #22
0
def test_run_and_cancel_legacy_ensemble(tmpdir, unused_tcp_port,
                                        make_ensemble_builder):
    num_reals = 10
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)

        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")

        mon = evaluator.run()
        cancel = True
        for _ in mon.track():
            if cancel:
                mon.signal_cancel()
                cancel = False

        assert evaluator._snapshot.get_status() == "Cancelled"
Beispiel #23
0
def test_run_prefect_ensemble_with_path(unused_tcp_port, coefficients):
    with tmp(os.path.join(SOURCE_DIR, "test-data/local/prefect_test_case")):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                "realizations": 2,
                "executor": "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(config["storage"]["storage_path"])
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="1")

        with evaluator.run() as mon:
            for event in mon.track():
                if isinstance(event.data, dict) and event.data.get("status") in [
                    "Failed",
                    "Stopped",
                ]:
                    mon.signal_done()

        assert evaluator._ensemble.get_status() == "Stopped"
        successful_realizations = evaluator._ensemble.get_successful_realizations()
        assert successful_realizations == config["realizations"]
Beispiel #24
0
def test_run_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")
        with evaluator.run() as monitor:
            for e in monitor.track():
                if (e["type"] in (
                        identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                        identifiers.EVTYPE_EE_SNAPSHOT,
                ) and e.data.get(identifiers.STATUS) in [
                        state.ENSEMBLE_STATE_FAILED,
                        state.ENSEMBLE_STATE_STOPPED
                ]):
                    monitor.signal_done()
        assert evaluator._ensemble.get_status() == state.ENSEMBLE_STATE_STOPPED
        assert evaluator._ensemble.get_successful_realizations() == num_reals
Beispiel #25
0
def test_run_and_cancel_legacy_ensemble(tmpdir, make_ensemble_builder):
    num_reals = 10
    custom_port_range = range(1024, 65535)
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2,
                                         job_sleep=5).build()
        config = EvaluatorServerConfig(custom_port_range=custom_port_range)

        evaluator = EnsembleEvaluator(ensemble, config, 0, ee_id="1")

        with evaluator.run() as mon:
            cancel = True
            for _ in mon.track():
                if cancel:
                    mon.signal_cancel()
                    cancel = False

        assert evaluator._ensemble.get_status(
        ) == state.ENSEMBLE_STATE_CANCELLED
Beispiel #26
0
def test_run_legacy_ensemble_exception(tmpdir, unused_tcp_port,
                                       make_ensemble_builder):
    num_reals = 2
    with tmpdir.as_cwd():
        ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build()
        config = EvaluatorServerConfig(unused_tcp_port)
        evaluator = EnsembleEvaluator(ensemble, config, ee_id="1")

        with patch.object(ensemble,
                          "_run_path_list",
                          side_effect=RuntimeError()):
            monitor = evaluator.run()
            for e in monitor.track():
                if (e["type"] in (
                        identifiers.EVTYPE_EE_SNAPSHOT_UPDATE,
                        identifiers.EVTYPE_EE_SNAPSHOT,
                ) and e.data.get("status") in ["Failed", "Stopped"]):
                    monitor.signal_done()
            assert evaluator._snapshot.get_status() == "Failed"
Beispiel #27
0
def test_cancel_run_prefect_ensemble(unused_tcp_port, coefficients):
    with tmp(Path(SOURCE_DIR) / "test-data/local/prefect_test_case"):
        config = parse_config("config.yml")
        config.update(
            {
                "config_path": os.getcwd(),
                "realizations": 2,
                "executor": "local",
            }
        )
        inputs = {}
        coeffs_trans = coefficient_transmitters(
            coefficients, config.get(ids.STORAGE)["storage_path"]
        )
        script_trans = script_transmitters(config)
        for iens in range(2):
            inputs[iens] = {**coeffs_trans[iens], **script_trans[iens]}
        config.update(
            {
                "inputs": inputs,
                "outputs": output_transmitters(config),
            }
        )

        service_config = EvaluatorServerConfig(unused_tcp_port)
        config["config_path"] = Path(config["config_path"])
        config["run_path"] = Path(config["run_path"])
        config["storage"]["storage_path"] = Path(config["storage"]["storage_path"])
        config["dispatch_uri"] = service_config.dispatch_uri

        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, 0, ee_id="2")

        with evaluator.run() as mon:
            cancel = True
            for _ in mon.track():
                if cancel:
                    mon.signal_cancel()
                    cancel = False

        assert evaluator._ensemble.get_status() == "Cancelled"
def test_cancel_run_prefect_ensemble(unused_tcp_port):
    with tmp(Path(SOURCE_DIR) / "test-data/local/prefect_test_case"):
        config = parse_config("config.yml")
        config.update({"config_path": Path.absolute(Path("."))})
        config.update({"realizations": 2})
        config.update({"executor": "local"})

        service_config = EvaluatorServerConfig(unused_tcp_port)
        ensemble = PrefectEnsemble(config)

        evaluator = EnsembleEvaluator(ensemble, service_config, ee_id="2")

        mon = evaluator.run()
        cancel = True
        for _ in mon.track():
            if cancel:
                mon.signal_cancel()
                cancel = False

        assert evaluator._snapshot.get_status() == "Cancelled"
Beispiel #29
0
def evaluate(workspace_root, evaluation_name, input_records, ensemble_config,
             stages_config):
    evaluation_tmp_dir = _create_evaluator_tmp_dir(workspace_root,
                                                   evaluation_name)

    config = EvaluatorServerConfig()
    ee_config = _build_ee_config(
        evaluation_tmp_dir,
        ensemble_config,
        stages_config,
        input_records,
        config.dispatch_uri,
    )
    ensemble = PrefectEnsemble(ee_config)

    ee = EnsembleEvaluator(ensemble=ensemble, config=config, iter_=0)
    result = _run(ee)
    responses = _prepare_responses(result)

    return responses
Beispiel #30
0
def test_hook_call_order_es_mda(monkeypatch):
    """
    The goal of this test is to assert that the hook call order is the same
    across different models.
    """
    test_class = MultipleDataAssimilation
    minimum_args = {
        "start_iteration": 0,
        "weights": [1],
        "analysis_module": "some_module",
        "ee_config":
        EvaluatorServerConfig(custom_port_range=range(1024, 65535)),
    }
    mock_sim_runner = MagicMock()
    mock_parent = MagicMock()
    mock_parent.runWorkflows = mock_sim_runner
    ERT_mock = MagicMock()

    test_module = inspect.getmodule(test_class)
    monkeypatch.setattr(test_module, "EnkfSimulationRunner", mock_parent)
    monkeypatch.setattr(test_module, "ERT", ERT_mock)

    test_class = test_class()
    test_class.create_context = MagicMock()
    test_class.checkMinimumActiveRealizations = MagicMock()
    test_class.parseWeights = MagicMock(return_value=[1])
    test_class.setAnalysisModule = MagicMock()
    test_class.ert = MagicMock()

    test_class.run_ensemble_evaluator = MagicMock(return_value=1)

    test_class.runSimulations(minimum_args)

    expected_calls = [
        call(expected_call, ert=ERT_mock.ert)
        for expected_call in EXPECTED_CALL_ORDER
    ]
    assert mock_sim_runner.mock_calls == expected_calls