def run_ensemble_evaluator(self, run_context): if run_context.get_step(): self.ert().eclConfig().assert_restart() iactive = run_context.get_mask() run_context.get_sim_fs().getStateMap().deselectMatching( iactive, RealizationStateEnum.STATE_LOAD_FAILURE | RealizationStateEnum.STATE_PARENT_FAILURE, ) ensemble = create_ensemble_builder_from_legacy( run_context, self.ert().resConfig().model_config.getForwardModel(), self._queue_config, self.ert().analysisConfig(), self.ert().resConfig(), ).build() self.ert().initRun(run_context) return EnsembleEvaluator(ensemble, load_config(), ee_id=str(uuid.uuid1()).split("-") [0]).run_and_get_successful_realizations()
async def test_run_and_cancel_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder): num_reals = 10 conf_file = Path(tmpdir / CONFIG_FILE) with tmpdir.as_cwd(): with open(conf_file, "w") as f: f.write(f'port: "{unused_tcp_port}"\n') ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build() config = load_config(conf_file) evaluator = EnsembleEvaluator(ensemble, config, ee_id="1") thread = threading.Thread( name="test_eval", target=evaluator.run_and_get_successful_realizations, args=(), ) thread.start() # Wait for evaluator to start await wait_for_ws(config["url"], 10) # Send termination request to the evaluator async with websockets.connect(config["client_url"]) as websocket: out_cloudevent = CloudEvent({ "type": identifiers.EVTYPE_EE_USER_CANCEL, "source": "/ert/test/0", "id": "ID", }) await websocket.send(to_json(out_cloudevent)) thread.join() assert evaluator._snapshot.get_status() == "Cancelled"
def create_tracker( model, general_interval=5, detailed_interval=10, qtimer_cls=None, event_handler=None, num_realizations=None, ): """Creates a tracker tracking a @model. The provided model is updated in two tiers: @general_interval, @detailed_interval. Setting any interval to <=0 disables update. Should a @qtimer_cls be defined, the Qt event loop will be used for tracking. @event_handler must then be defined. If @num_realizations is defined, then the intervals are scaled according to some affine transformation such that it is tractable to do tracking. If @ee_host_port_tuple then the factory will produce something that can track an ensemble evaluator. """ if num_realizations is not None: general_interval, detailed_interval = scale_intervals(num_realizations) ee_config = load_config() ee_monitor_connection_details = ( (ee_config.get("host"), ee_config.get("port")) if FeatureToggling.is_enabled("ensemble-evaluator") else None ) if qtimer_cls: if not event_handler: raise ValueError( "event_handler must be defined if" + "qtimer_cls is defined" ) return QTimerTracker( model, qtimer_cls, general_interval, detailed_interval, event_handler, ee_monitor_connection_details, ) else: return BlockingTracker( model, general_interval, detailed_interval, ee_monitor_connection_details, )
def test_run_legacy_ensemble(tmpdir, unused_tcp_port, make_ensemble_builder): num_reals = 2 conf_file = Path(tmpdir / CONFIG_FILE) with tmpdir.as_cwd(): with open(conf_file, "w") as f: f.write(f'port: "{unused_tcp_port}"\n') ensemble = make_ensemble_builder(tmpdir, num_reals, 2).build() config = load_config(conf_file) evaluator = EnsembleEvaluator(ensemble, config, ee_id="1") monitor = evaluator.run() for e in monitor.track(): if (e["type"] in ( identifiers.EVTYPE_EE_SNAPSHOT_UPDATE, identifiers.EVTYPE_EE_SNAPSHOT, ) and e.data.get("status") == "Stopped"): monitor.signal_done() assert evaluator.get_successful_realizations() == num_reals
def request_termination(self): logger = logging.getLogger("ert_shared.ensemble_evaluator.tracker") config = load_config() # There might be some situations where the # evaulation is finished or the evaluation # is yet to start when calling this function. # In these cases the monitor is not started # # To avoid waiting too long we exit if we are not # able to connect to the monitor after 2 tries # # See issue: https://github.com/equinor/ert/issues/1250 # try: wait_for_ws(config.get("url"), 2) except ConnectionRefusedError as e: logger.warning(f"{__name__} - exception {e}") return monitor = create_ee_monitor(self._monitor_host, self._monitor_port) monitor.signal_cancel()
def test_load_config(tmpdir, host, port): config_dict = {} expected_host = host if host else DEFAULT_HOST expected_port = port if port else DEFAULT_PORT expected_config = { "host": expected_host, "port": expected_port, "url": f"ws://{expected_host}:{expected_port}", "client_url": f"ws://{expected_host}:{expected_port}/{CLIENT_URI}", "dispatch_url": f"ws://{expected_host}:{expected_port}/{DISPATCH_URI}", } if host is not None: config_dict["host"] = host if port is not None: config_dict["port"] = port with tmpdir.as_cwd(): with open("ee_config", "w") as f: json.dump(config_dict, f) res = load_config("ee_config") assert res == expected_config
def test_load_config_fail(): with pytest.raises(FileNotFoundError): load_config(config_path="non/existing/file/path.yml")