def track(self): asyncio.get_event_loop().run_until_complete( wait_for_evaluator(base_url=self._base_uri, token=self._token, cert=self._cert)) done_future = asyncio.Future(loop=self._loop) thread = threading.Thread(name=f"ert_monitor-{self._id}_loop", target=self._run, args=(done_future, )) thread.start() event = None try: while event is None or event[ "type"] != identifiers.EVTYPE_EE_TERMINATED: event = asyncio.run_coroutine_threadsafe( self._incoming.get(), self._loop).result() yield event self._loop.call_soon_threadsafe(done_future.set_result, None) except GeneratorExit: logger.debug(f"monitor-{self._id} generator exit") self._loop.call_soon_threadsafe(self._receive_future.cancel) if not done_future.done(): self._loop.call_soon_threadsafe(done_future.set_result, None) thread.join()
def __enter__(self): self._ws_thread = threading.Thread(target=self._sync_ws) self._ws_thread.start() if asyncio.get_event_loop().is_running(): raise RuntimeError( "sync narrative should control the loop, maybe you called it from within an async test?" ) asyncio.get_event_loop().run_until_complete( wait_for_evaluator(self._conn_info["base_uri"])) return self
def evaluate(self, config, ee_id): self._config = config self._ee_id = ee_id get_event_loop().run_until_complete( wait_for_evaluator( base_url=self._config.url, token=self._config.token, cert=self._config.cert, )) self._evaluate_thread = threading.Thread(target=self._evaluate) self._evaluate_thread.start()
def test_verify_dispatch_failing_job(make_ee_config, event_loop): ee_config = make_ee_config(use_token=False, generate_cert=False) mock_ensemble = MagicMock() mock_ensemble.snapshot.to_dict.return_value = {} ee = EnsembleEvaluator( mock_ensemble, ee_config, 0, ee_id="0", ) ee.run() event_loop.run_until_complete(wait_for_evaluator(ee_config.url)) dispatch_failing_job().verify(ee_config.client_uri, on_connect=lambda: None) ee.stop()
def test_verify_monitor_failing_evaluation(make_ee_config, event_loop): ee_config = make_ee_config(use_token=False, generate_cert=False) ensemble = TestEnsemble(iter=1, reals=2, steps=1, jobs=2) ensemble.with_failure() ee = EnsembleEvaluator( ensemble, ee_config, 0, ee_id="ee-0", ) ee.run() event_loop.run_until_complete(wait_for_evaluator(ee_config.url)) monitor_failing_evaluation().verify(ee_config.client_uri, on_connect=ensemble.start) ensemble.join()
def test_verify_monitor_successful_ensemble(make_ee_config, event_loop): ensemble = TestEnsemble(iter=1, reals=2, steps=2, jobs=2).with_result( b"\x80\x04\x95\x0f\x00\x00\x00\x00\x00\x00\x00\x8c\x0bhello world\x94.", "application/octet-stream", ) ee_config = make_ee_config(use_token=False, generate_cert=False) ee = EnsembleEvaluator( ensemble, ee_config, 0, ee_id="ee-0", ) ee.run() event_loop.run_until_complete(wait_for_evaluator(ee_config.url)) monitor_successful_ensemble().verify(ee_config.client_uri, on_connect=ensemble.start) ensemble.join()
def request_termination(self): logger = logging.getLogger("ert_shared.ensemble_evaluator.tracker") # There might be some situations where the # evaluation is finished or the evaluation # is yet to start when calling this function. # In these cases the monitor is not started # # To avoid waiting too long we exit if we are not # able to connect to the monitor after 2 tries # # See issue: https://github.com/equinor/ert/issues/1250 # try: get_event_loop().run_until_complete( wait_for_evaluator( base_url=self._monitor_url, token=self._token, cert=self._cert, timeout=5, ) ) except ClientError as e: logger.warning(f"{__name__} - exception {e}") return with create_ee_monitor( self._monitor_host, self._monitor_port, token=self._token, cert=self._cert, protocol=self._protocol, ) as monitor: for e in monitor.track(): monitor.signal_cancel() break while self._drainer_thread.is_alive(): self._clear_work_queue() time.sleep(1)