Exemple #1
0
    def _cancel(self):
        asyncio.set_event_loop(asyncio.new_event_loop())
        logger.debug("cancelling, waiting for wakeup...")
        self._allow_cancel.wait()
        logger.debug("got wakeup, killing all jobs...")
        self._job_queue.kill_all_jobs()
        logger.debug("cancelling futures...")
        if self._aggregate_future.cancelled():
            logger.debug("aggregate future was already cancelled")
        else:
            self._aggregate_future.cancel()
            logger.debug("aggregate future cancelled")

        out_cloudevent = CloudEvent({
            "type": identifiers.EVTYPE_ENSEMBLE_CANCELLED,
            "source": f"/ert/ee/{self._ee_id}/ensemble",
            "id": str(uuid.uuid1()),
        })
        get_event_loop().run_until_complete(
            self.send_cloudevent(
                self._config.dispatch_uri,
                out_cloudevent,
                token=self._config.token,
                cert=self._config.cert,
            ))
Exemple #2
0
def test_get_ensemble_responses(responses, expected_result, tmpdir,
                                ert_storage):
    ert.storage.init(workspace_name=tmpdir)
    experiment = "exp"
    ert.storage.init_experiment(
        experiment_name=experiment,
        parameters=[],
        ensemble_size=1,
        responses=responses,
    )
    # we need to transmitt the responses
    for response_name in responses:
        if "blob" in response_name:
            ensemble_record = ert.data.RecordCollection(
                records=tuple([ert.data.BlobRecord(data=b"\xF0\x9F\xA6\x89")]))
        else:
            ensemble_record = ert.data.RecordCollection(records=tuple(
                [ert.data.NumericalRecord(data=[0]) for rid in range(1)]))
        future = ert.storage.transmit_record_collection(
            record_coll=ensemble_record,
            record_name=response_name,
            workspace_name=tmpdir,
            experiment_name=experiment,
        )
        get_event_loop().run_until_complete(future)

    fetched_ensemble_responses = ert.storage.get_experiment_responses(
        experiment_name=experiment)

    assert set(fetched_ensemble_responses) == set(expected_result)
Exemple #3
0
    def _attempt_execute(self, *, func, transmitters):
        async def _load(io_, transmitter):
            record = await transmitter.load()
            return (io_.get_name(), record)

        futures = []
        for input_ in self._step.get_inputs():
            futures.append(_load(input_, transmitters[input_.get_name()]))
        results = get_event_loop().run_until_complete(asyncio.gather(*futures))
        kwargs = {result[0]: result[1].data for result in results}
        function_output = func(**kwargs)

        async def _transmit(io_, transmitter: RecordTransmitter, data):
            record: Record = (
                BlobRecord(data=data)
                if isinstance(data, bytes)
                else NumericalRecord(data=data)
            )
            await transmitter.transmit_record(record)
            return (io_.get_name(), transmitter)

        futures = []
        for output in self._step.get_outputs():
            transmitter = self._output_transmitters[output.get_name()]
            futures.append(
                _transmit(output, transmitter, function_output[output.get_name()])
            )
        results = get_event_loop().run_until_complete(asyncio.gather(*futures))
        transmitter_map = {result[0]: result[1] for result in results}
        return transmitter_map
Exemple #4
0
    def request_termination(self) -> None:
        logger = logging.getLogger("ert_shared.ensemble_evaluator.tracker")
        # There might be some situations where the
        # evaluation is finished or the evaluation
        # is yet to start when calling this function.
        # In these cases the monitor is not started
        #
        # To avoid waiting too long we exit if we are not
        # able to connect to the monitor after 2 tries
        #
        # See issue: https://github.com/equinor/ert/issues/1250
        #
        try:
            get_event_loop().run_until_complete(
                wait_for_evaluator(
                    base_url=self._ee_con_info.url,
                    token=self._ee_con_info.token,
                    cert=self._ee_con_info.cert,
                    timeout=5,
                ))
        except ClientError as e:
            logger.warning(f"{__name__} - exception {e}")
            return

        with create_ee_monitor(self._ee_con_info) as monitor:
            monitor.signal_cancel()
        while self._drainer_thread.is_alive():
            self._clear_work_queue()
            time.sleep(1)
Exemple #5
0
def get_ensemble_record(
    *,
    workspace_name: str,
    record_name: str,
    ensemble_size: int,
    experiment_name: Optional[str] = None,
    source: Optional[str] = None,
) -> ert.data.RecordCollection:
    records_url = ert.storage.get_records_url(
        workspace_name=workspace_name, experiment_name=experiment_name
    )

    transmitters, collection_type = get_event_loop().run_until_complete(
        _get_record_collection(
            records_url=records_url,
            record_name=record_name,
            record_source=source,
            ensemble_size=ensemble_size,
        )
    )
    records = tuple(
        get_event_loop().run_until_complete(transmitter.load())
        for transmitter in transmitters
    )
    return ert.data.RecordCollection(
        records=records, length=ensemble_size, collection_type=collection_type
    )
Exemple #6
0
def test_parameter_array():
    sample_size = 15000
    harmonics = 4
    param_size = 3
    analysis_size = 1
    parameters = {
        "xs": ert3.stats.Uniform(lower_bound=-np.pi, upper_bound=np.pi, size=3)
    }

    samples = ert3.algorithms.fast_sample(parameters, harmonics, sample_size)
    assert_samples(samples, sample_size, param_size, parameters)

    model_output = {}
    futures = []
    for iens, sample in enumerate(samples):
        x = sample["xs"].data[0]
        y = sample["xs"].data[1]
        z = sample["xs"].data[2]
        t = ert.data.InMemoryRecordTransmitter("output")
        futures.append(
            t.transmit_record(
                ert.data.NumericalRecord(data=ishigami_single(x, y, z))))
        model_output[iens] = {"output": t}
    get_event_loop().run_until_complete(asyncio.gather(*futures))

    analysis = ert3.algorithms.fast_analyze(parameters, model_output,
                                            harmonics)
    assert_analysis(analysis, analysis_size, param_size,
                    parameters["xs"].index)

    S1 = analysis[0]["S1"]
    ST = analysis[0]["ST"]
    np.testing.assert_allclose(S1, ISHIGAMI_S1, 1e-4, 1e-4)
    np.testing.assert_allclose(ST, ISHIGAMI_ST, 1e-4, 1e-4)
Exemple #7
0
def test_sample_size(sample_size):
    harmonics = 4
    param_size = 3
    analysis_size = 1
    parameters = {
        "xs":
        ert3.stats.Uniform(lower_bound=-np.pi,
                           upper_bound=np.pi,
                           index=tuple(["x", "y", "z"]))
    }
    samples = ert3.algorithms.fast_sample(parameters, harmonics, sample_size)
    assert_samples(samples, sample_size, param_size, parameters)

    model_output = {}
    futures = []
    for iens, sample in enumerate(samples):
        x = sample["xs"].data["x"]
        y = sample["xs"].data["y"]
        z = sample["xs"].data["z"]
        t = ert.data.InMemoryRecordTransmitter("output")
        futures.append(
            t.transmit_record(
                ert.data.NumericalRecord(data=ishigami_single(x, y, z))))
        model_output[iens] = {"output": t}
    get_event_loop().run_until_complete(asyncio.gather(*futures))

    analysis = ert3.algorithms.fast_analyze(parameters, model_output,
                                            harmonics)
    assert_analysis(analysis, analysis_size, param_size,
                    parameters["xs"].index)
Exemple #8
0
def test_single_evaluation(distribution):
    sample_size = 15000
    harmonics = 4
    param_size = 3
    analysis_size = 1
    parameters = {"xs": distribution}

    samples = ert3.algorithms.fast_sample(parameters, harmonics, sample_size)
    assert_samples(samples, sample_size, param_size, parameters)

    model_output = {}
    futures = []
    for iens, sample in enumerate(samples):
        x = sample["xs"].data["x"]
        y = sample["xs"].data["y"]
        z = sample["xs"].data["z"]
        t = ert.data.InMemoryRecordTransmitter("output")
        futures.append(
            t.transmit_record(
                ert.data.NumericalRecord(data=ishigami_single(x, y, z))))
        model_output[iens] = {"output": t}
    get_event_loop().run_until_complete(asyncio.gather(*futures))

    analysis = ert3.algorithms.fast_analyze(parameters, model_output,
                                            harmonics)
    assert_analysis(analysis, analysis_size, param_size,
                    parameters["xs"].index)

    if distribution.type == "uniform":
        S1 = analysis[0]["S1"]
        ST = analysis[0]["ST"]
        np.testing.assert_allclose(S1, ISHIGAMI_S1, 1e-4, 1e-4)
        np.testing.assert_allclose(ST, ISHIGAMI_ST, 1e-4, 1e-4)
Exemple #9
0
    def run(self, inputs=None):
        async def transform_output(output):
            record = await output.get_transformation().transform_output(
                mime=output.get_mime(),
                location=run_path / output.get_path(),
            )
            transmitter = outputs[output.get_name()]
            await transmitter.transmit_record(record)

        with tempfile.TemporaryDirectory() as run_path:
            run_path = Path(run_path)
            self._load_and_dump_input(transmitters=inputs, runpath=run_path)
            _send_event(
                ids.EVTYPE_FM_STEP_RUNNING,
                self._step.get_source(self._ee_id),
            )

            outputs = {}
            self.run_jobs(run_path)

            futures = []
            for output in self._step.get_outputs():
                if not (run_path / output.get_path()).exists():
                    raise FileNotFoundError(
                        f"Output file {output.get_path()} was not generated!")

                outputs[output.get_name()] = self._output_transmitters[
                    output.get_name()]
                futures.append(transform_output(output))
            get_event_loop().run_until_complete(asyncio.gather(*futures))
            _send_event(
                ids.EVTYPE_FM_STEP_SUCCESS,
                self._step.get_source(self._ee_id),
            )
        return outputs
Exemple #10
0
def test_get_record_names(tmpdir, ert_storage):
    ert.storage.init(workspace_name=tmpdir)
    ensemble_size = 5
    experiment_records = collections.defaultdict(list)
    for eid in [1, 2, 3]:
        experiment = "e" + str(eid)
        ert.storage.init_experiment(
            experiment_name=experiment,
            parameters={},
            ensemble_size=ensemble_size,
            responses=[],
        )
        for nid in range(1, 3):
            name = nid * "n"
            ensemble_record = ert.data.RecordCollection(records=tuple([
                ert.data.NumericalRecord(data=[0])
                for rid in range(ensemble_size)
            ]))
            future = ert.storage.transmit_record_collection(
                record_coll=ensemble_record,
                record_name=name,
                workspace_name=tmpdir,
                experiment_name=experiment,
            )

            get_event_loop().run_until_complete(future)
            experiment_records[str(experiment)].append(name)

            recnames = ert.storage.get_ensemble_record_names(
                workspace_name=tmpdir, experiment_name=experiment)
            assert sorted(
                experiment_records[str(experiment)]) == sorted(recnames)
Exemple #11
0
 def __enter__(self):
     self._ws_thread = threading.Thread(target=self._sync_ws)
     self._ws_thread.start()
     if get_event_loop().is_running():
         raise RuntimeError(
             "sync narrative should control the loop, "
             "maybe you called it from within an async test?")
     get_event_loop().run_until_complete(
         wait_for_evaluator(self._conn_info["base_uri"]))
     return self
Exemple #12
0
 def evaluate(self, config, ee_id):
     self._config = config
     self._ee_id = ee_id
     get_event_loop().run_until_complete(
         wait_for_evaluator(
             base_url=self._config.url,
             token=self._config.token,
             cert=self._config.cert,
         ))
     self._evaluate_thread = threading.Thread(target=self._evaluate)
     self._evaluate_thread.start()
Exemple #13
0
def create_script_transmitter(name: str, location: Path, transmitter_factory):
    async def transform_output(transmitter, mime, location):
        transformation = ert.data.ExecutableRecordTransformation()
        record = await transformation.transform_output(mime, location)
        await transmitter.transmit_record(record)

    script_transmitter = transmitter_factory(name)
    get_event_loop().run_until_complete(
        transform_output(script_transmitter,
                         mime="application/octet-stream",
                         location=location))
    return script_transmitter
Exemple #14
0
 def verify(self, on_connect):
     self._ws_thread = threading.Thread(target=self._sync_listener,
                                        args=[on_connect])
     self._ws_thread.start()
     if get_event_loop().is_running():
         raise RuntimeError(
             "sync narrative should control the loop, "
             "maybe you called verify() from within an async test?")
     self._ws_thread.join()
     errors = get_event_loop().run_until_complete(self._collect_errors())
     if errors:
         raise AssertionError(errors)
Exemple #15
0
def test_run_uniform_presampled(
    workspace_integration,
    presampled_uniform_ensemble,
    stages_config,
    evaluation_experiment_config,
    uniform_parameters_config,
):
    workspace = workspace_integration
    presampled_dir = (
        workspace._path / _EXPERIMENTS_BASE / "presampled_uniform_evaluation"
    )
    presampled_dir.mkdir(parents=True)
    with assert_clean_workspace(workspace):
        uniform_coeff0 = ert3.engine.sample_record(
            uniform_parameters_config,
            "uniform_coefficients",
            10,
        )

        future = ert.storage.transmit_record_collection(
            record_coll=uniform_coeff0,
            record_name="uniform_coefficients0",
            workspace_name=workspace.name,
        )
        get_event_loop().run_until_complete(future)
        assert 10 == len(uniform_coeff0)
        for real_coeff in uniform_coeff0.records:
            assert sorted(("a", "b", "c")) == sorted(real_coeff.index)
            for idx in real_coeff.index:
                assert isinstance(real_coeff.data[idx], float)

        experiment_run_config = ert3.config.ExperimentRunConfig(
            evaluation_experiment_config,
            stages_config,
            presampled_uniform_ensemble,
            uniform_parameters_config,
        )
        ert3.engine.run(
            experiment_run_config, workspace, "presampled_uniform_evaluation"
        )
    with assert_clean_workspace(workspace, allowed_files={"data.json"}):
        ert3.engine.export(
            workspace, "presampled_uniform_evaluation", experiment_run_config
        )

    export_data = _load_export_data(workspace, "presampled_uniform_evaluation")
    assert len(uniform_coeff0) == len(export_data)
    for coeff, real in zip(uniform_coeff0.records, export_data):
        assert ["coefficients"] == list(real["input"].keys())
        export_coeff = real["input"]["coefficients"]
        assert sorted(coeff.index) == sorted(export_coeff.keys())
        for key in coeff.index:
            assert coeff.data[key] == export_coeff[key]
Exemple #16
0
    async def _async_proxy(self, url, q):
        self.done = get_event_loop().create_future()

        async def handle_messages(msg_q: asyncio.Queue, done: asyncio.Future):
            try:
                for interaction in self.narrative.interactions:
                    await interaction.verify(msg_q.get)
            except Exception as e:
                done.set_result(e)

        async def handle_server(server, client, msg_q):
            async for msg in server:
                await msg_q.put((InteractionDirection.RESPONSE, msg))
                await client.send(msg)

        async def handle_client(client, _path):
            msg_q = asyncio.Queue()
            if _path == "/client":
                async with websockets.connect(url + _path) as server:
                    msg_task = asyncio.ensure_future(
                        handle_messages(msg_q, self.done))
                    server_task = asyncio.ensure_future(
                        handle_server(server, client, msg_q))

                    async for msg in client:
                        await msg_q.put((InteractionDirection.REQUEST, msg))
                        await server.send(msg)

                    server_task.cancel()
                    await server_task
                    await msg_task

        async with websockets.serve(
                handle_client,
                host="localhost",
                port=0,
                process_request=self.process_request,
        ) as s:
            local_family = port_handler.get_family_for_localhost()
            port = [
                p.getsockname()[1] for p in s.sockets
                if p.family == local_family
            ]
            if len(port) == 0:
                self.done.set_result(
                    aiohttp.ClientError(
                        "Unable to find suitable port for proxy"))

            get_event_loop().run_in_executor(None, lambda: q.put(port[0]))
            get_event_loop().run_in_executor(None, lambda: q.put(self.done))
            error = await self.done
            q.put(error)
Exemple #17
0
    def evaluate(self, config: "EvaluatorServerConfig", ee_id: str) -> None:
        if not config:
            raise ValueError("no config for evaluator {ee_id}")
        self._config = config
        self._ee_id = ee_id
        get_event_loop().run_until_complete(
            wait_for_evaluator(
                base_url=self._config.url,
                token=self._config.token,
                cert=self._config.cert,
            ))

        threading.Thread(target=self._evaluate, name="LegacyEnsemble").start()
Exemple #18
0
    def _evaluate(self):
        get_event_loop()
        try:
            with Client(
                    self._ee_config.dispatch_uri,
                    self._ee_config.token,
                    self._ee_config.cert,
            ) as c:
                event = CloudEvent(
                    {
                        "type": ids.EVTYPE_ENSEMBLE_STARTED,
                        "source": f"/ert/ee/{self._ee_id}",
                    }, )
                c.send(to_json(event).decode())
            with prefect.context(
                    url=self._ee_config.dispatch_uri,
                    token=self._ee_config.token,
                    cert=self._ee_config.cert,
            ):
                self.run_flow(self._ee_id)

            with Client(
                    self._ee_config.dispatch_uri,
                    self._ee_config.token,
                    self._ee_config.cert,
            ) as c:
                event = CloudEvent(
                    {
                        "type": ids.EVTYPE_ENSEMBLE_STOPPED,
                        "source": f"/ert/ee/{self._ee_id}",
                        "datacontenttype": "application/octet-stream",
                    },
                    cloudpickle.dumps(self._outputs),
                )
                c.send(to_json(event).decode())
        except Exception as e:
            logger.exception(
                "An exception occurred while starting the ensemble evaluation",
                exc_info=True,
            )
            with Client(
                    self._ee_config.dispatch_uri,
                    self._ee_config.token,
                    self._ee_config.cert,
            ) as c:
                event = CloudEvent(
                    {
                        "type": ids.EVTYPE_ENSEMBLE_FAILED,
                        "source": f"/ert/ee/{self._ee_id}",
                    }, )
                c.send(to_json(event).decode())
Exemple #19
0
def add_commands(
    transportable_commands: Tuple[ert3.config.TransportableCommand, ...],
    storage_type: str,
    storage_path: str,
    step: StepBuilder,
) -> None:
    def command_location(name: str) -> FilePath:
        return next(
            (cmd.location for cmd in transportable_commands if cmd.name == name),
            pathlib.Path(name),
        )

    async def transform_output(
        transmitter: ert.data.RecordTransmitter, mime: str, location: pathlib.Path
    ) -> None:
        transformation = ert.data.ExecutableRecordTransformation()
        record = await transformation.transform_output(mime, location)
        await transmitter.transmit_record(record)

    for command in transportable_commands:
        transmitter: ert.data.RecordTransmitter
        if storage_type == "shared_disk":
            transmitter = ert.data.SharedDiskRecordTransmitter(
                name=command.name,
                storage_path=pathlib.Path(storage_path),
            )
        elif storage_type == "ert_storage":
            transmitter = ert.storage.StorageRecordTransmitter(
                name=command.name, storage_url=storage_path
            )
        else:
            raise ValueError(f"Unsupported transmitter type: {storage_type}")
        get_event_loop().run_until_complete(
            transform_output(
                transmitter=transmitter,
                mime="application/octet-stream",
                location=command.location,
            )
        )
        step.add_input(
            create_file_io_builder()
            .set_name(command.name)
            .set_path(command_location(command.name))
            .set_mime("application/octet-stream")
            .set_transformation(ert.data.ExecutableRecordTransformation())
            # cast necessary due to https://github.com/python/mypy/issues/9656
            .set_transmitter_factory(
                lambda _t=transmitter: cast(ert.data.RecordTransmitter, _t)
            )
        )
Exemple #20
0
    def run(  # type: ignore  # pylint: disable=arguments-differ
        self,
        inputs: Optional[_stage_transmitter_mapping] = None,
    ) -> _stage_transmitter_mapping:
        async def transform_output(
            transformation: "ert.data.RecordTransformation",
            transmitter: "ert.data.RecordTransmitter",
            run_path: Path,
        ) -> None:
            record = await transformation.to_record(root_path=run_path, )
            await transmitter.transmit_record(record)

        with create_runpath(self._run_path) as run_path:
            if inputs is not None:
                self._load_and_dump_input(transmitters=inputs,
                                          runpath=run_path)
            _send_event(
                EVTYPE_FM_STEP_RUNNING,
                self.step.source(self._ee_id),
            )

            outputs: _stage_transmitter_mapping = {}
            self.run_jobs(run_path)

            futures = []
            for output in self.step.outputs:
                transformation = output.transformation
                if not transformation:
                    raise ValueError(
                        f"no transformation for output '{output.name}'")
                if not isinstance(transformation, FileTransformation):
                    raise ValueError(
                        f"got unexpected transformation {transformation} for "
                        + f"'{output.name}'")
                if not (run_path / transformation.location).exists():
                    raise FileNotFoundError(
                        f"Output '{output.name}' file {transformation.location} was not"
                        + " generated!")

                transmitter = self._output_transmitters[output.name]
                outputs[output.name] = transmitter
                futures.append(
                    transform_output(transformation, transmitter, run_path))

            get_event_loop().run_until_complete(asyncio.gather(*futures))
            _send_event(
                EVTYPE_FM_STEP_SUCCESS,
                self.step.source(self._ee_id),
            )
        return outputs
Exemple #21
0
def _record(workspace: Workspace, args: Any) -> None:
    assert args.sub_cmd == "record"
    if args.sub_record_cmd == "sample":
        parameters_config = workspace.load_parameters_config()
        collection = ert3.engine.sample_record(
            parameters_config,
            args.parameter_group,
            args.ensemble_size,
        )
        future = ert.storage.transmit_record_collection(
            record_coll=collection,
            record_name=args.record_name,
            workspace_name=workspace.name,
        )
        get_event_loop().run_until_complete(future)

    elif args.sub_record_cmd == "load":
        if args.mime_type == "guess" and not args.blob_record:
            guess = mimetypes.guess_type(str(args.record_file))[0]
            if guess:
                if ert.serialization.has_serializer(guess):
                    record_mime = guess
                else:
                    print(f"Unsupported type '{guess}', defaulting to " +
                          f"'{DEFAULT_RECORD_MIME_TYPE}'.")
                    record_mime = DEFAULT_RECORD_MIME_TYPE
            else:
                print(f"Unable to guess what type '{args.record_file}' is, " +
                      f"defaulting to '{DEFAULT_RECORD_MIME_TYPE}'.")
                record_mime = DEFAULT_RECORD_MIME_TYPE
        else:
            record_mime = args.mime_type

        if args.blob_record or args.is_directory:
            record_mime = "application/octet-stream"

        get_event_loop().run_until_complete(
            ert3.engine.load_record(
                workspace,
                args.record_name,
                args.record_file,
                record_mime,
                args.is_directory,
            ))
    else:
        raise NotImplementedError(
            f"No implementation to handle record command {args.sub_record_cmd}"
        )
Exemple #22
0
    def _attempt_execute(
        self,
        *,
        func: Callable[..., Any],
        transmitters: _stage_transmitter_mapping,
    ) -> _stage_transmitter_mapping:
        async def _load(
            io_: _IO, transmitter: "ert.data.RecordTransmitter"
        ) -> Tuple[str, Record]:
            record = await transmitter.load()
            return (io_.name, record)

        input_futures = []
        for input_ in self.step.inputs:
            transmitter = transmitters[input_.name]
            if transmitter:
                input_futures.append(_load(input_, transmitter))
            else:
                self.logger.info("no transmitter for input %s", input_.name)
        results = get_event_loop().run_until_complete(asyncio.gather(*input_futures))
        kwargs = {result[0]: result[1].data for result in results}
        function_output: Dict[str, record_data] = func(**kwargs)

        async def _transmit(
            io_: _IO, transmitter: "ert.data.RecordTransmitter", data: record_data
        ) -> Tuple[str, "ert.data.RecordTransmitter"]:
            record: Record = (
                BlobRecord(data=data)
                if isinstance(data, bytes)
                else NumericalRecord(data=data)
            )
            await transmitter.transmit_record(record)
            return (io_.name, transmitter)

        output_futures = []
        for output in self.step.outputs:
            transmitter = self._output_transmitters[output.name]
            if transmitter:
                output_futures.append(
                    _transmit(output, transmitter, function_output[output.name])
                )
            else:
                self.logger.info("no transmitter for output %s", output.name)
        results = get_event_loop().run_until_complete(asyncio.gather(*output_futures))
        transmitter_map: _stage_transmitter_mapping = {
            result[0]: result[1] for result in results
        }
        return transmitter_map
Exemple #23
0
    def __init__(
        self,
        interactions: List[_Interaction],
        conn_info: _ConnectionInformation,
        ce_serializer: _CloudEventSerializer,
    ) -> None:
        self._interactions: List[_Interaction] = interactions
        self._loop: Optional[AbstractEventLoop] = None
        self._ws: Optional[WebSocketServer] = None
        self._conn_info = conn_info
        self._ce_serializer = ce_serializer

        # ensure there is an event loop in case we are not on main loop
        get_event_loop()
        # A queue on which errors will be put
        self._errors: asyncio.Queue = asyncio.Queue()
Exemple #24
0
    def setup_timeout_callback(
        self, timeout_queue: "asyncio.Queue[CloudEvent]"
    ) -> Tuple[Callable[[List[Any]], Any], "asyncio.Task[None]"]:
        def on_timeout(callback_args: Sequence[Any]) -> None:
            run_args: RunArg = callback_args[0]
            timeout_cloudevent = CloudEvent({
                "type": identifiers.EVTYPE_FM_STEP_TIMEOUT,
                "source": f"/ert/ee/{self._ee_id}/real/{run_args.iens}/step/0",
                "id": str(uuid.uuid1()),
            })
            timeout_queue.put_nowait(timeout_cloudevent)

        async def send_timeout_message() -> None:
            while True:
                timeout_cloudevent = await timeout_queue.get()
                if timeout_cloudevent is None:
                    break
                assert self._config  # mypy
                await self.send_cloudevent(
                    self._config.dispatch_uri,
                    timeout_cloudevent,
                    token=self._config.token,
                    cert=self._config.cert,
                )

        send_timeout_future = get_event_loop().create_task(
            send_timeout_message())

        return on_timeout, send_timeout_future
Exemple #25
0
def test_run_prefect_for_function_defined_outside_py_environment(
    evaluator_config,
    coefficients,
    function_ensemble_builder_factory,
    ensemble_size,
    external_sum_function,
):
    """Ensemble built from outside env. Assert state, realizations and result"""
    # Build ensemble and run on server
    ensemble = (function_ensemble_builder_factory(
        external_sum_function).set_retry_delay(1).set_max_retries(0).build())
    evaluator = EnsembleEvaluator(ensemble, evaluator_config, 0, ee_id="1")
    with evaluator.run() as mon:
        for event in mon.track():
            if event["type"] == ids.EVTYPE_EE_TERMINATED:
                results = pickle.loads(event.data)
            wait_until_done(mon, event)
    assert evaluator._ensemble.status == state.ENSEMBLE_STATE_STOPPED
    successful_realizations = evaluator._ensemble.get_successful_realizations()
    assert successful_realizations == ensemble_size
    expected_results = [
        pickle.loads(external_sum_function)(coeffs)["function_output"]
        for coeffs in coefficients
    ]
    transmitter_futures = [
        res["function_output"].load() for res in results.values()
    ]
    results = get_event_loop().run_until_complete(
        asyncio.gather(*transmitter_futures))
    assert expected_results == [res.data for res in results]
Exemple #26
0
def step_test_script_transmitter(test_data_path, transmitter_factory,
                                 script_name):
    async def transform_output(transmitter, mime, location):
        transformation = ert.data.ExecutableRecordTransformation()
        record = await transformation.transform_output(mime, location)
        await transmitter.transmit_record(record)

    script_transmitter = transmitter_factory("script")
    transformation = ert.data.ExecutableRecordTransformation()
    get_event_loop().run_until_complete(
        transform_output(
            transmitter=script_transmitter,
            mime="application/octet-stream",
            location=test_data_path / script_name,
        ))
    return script_transmitter
Exemple #27
0
def get_inputs(coeffs):
    input_records = {}
    futures = []
    for iens, (a, b, c) in enumerate(coeffs):
        record_name = "coefficients"
        t = ert.data.InMemoryRecordTransmitter(record_name)
        futures.append(
            t.transmit_record(
                ert.data.NumericalRecord(data={
                    "a": a,
                    "b": b,
                    "c": c
                })))
        input_records[iens] = {record_name: t}
    get_event_loop().run_until_complete(asyncio.gather(*futures))
    return input_records
Exemple #28
0
def assert_prefect_flow_run(
    result,
    flow_run,
    expect=True,
    step_type="unix",
    step_output=None,
    expected_result=None,
):
    """Assert if statements associated a prefect-flow-run are valid"""
    output_name = "output" if step_type == "unix" else "function_output"
    task_result = flow_run.result[result]
    # Check for all parametrizations
    assert task_result.is_successful() == expect
    assert flow_run.is_successful() == expect
    # Check when success is True
    if expect:
        assert len(task_result.result) == 1
        expected_uri = step_output[output_name]._uri
        output_uri = task_result.result[output_name]._uri
        assert expected_uri == output_uri
        # If function-step: Check result
        if step_type == "function":
            transmitted_record = get_event_loop().run_until_complete(
                task_result.result[output_name].load())
            transmitted_result = transmitted_record.data
            assert transmitted_result == expected_result
    else:
        # Check when success is False
        assert isinstance(task_result.result, Exception)
        assert ("unix_test_script.py: error: unrecognized arguments: bar"
                in task_result.message)
Exemple #29
0
    def setup_timeout_callback(self, timeout_queue):
        def on_timeout(callback_args):
            run_args: RunArg = callback_args[0]
            timeout_cloudevent = CloudEvent({
                "type": identifiers.EVTYPE_FM_STEP_TIMEOUT,
                "source": f"/ert/ee/{self._ee_id}/real/{run_args.iens}/step/0",
                "id": str(uuid.uuid1()),
            })
            timeout_queue.put_nowait(timeout_cloudevent)

        dispatch_url = self._config.dispatch_uri
        cert = self._config.cert
        token = self._config.token

        async def send_timeout_message():
            while True:
                timeout_cloudevent = await timeout_queue.get()
                if timeout_cloudevent is None:
                    break
                await self.send_cloudevent(dispatch_url,
                                           timeout_cloudevent,
                                           token=token,
                                           cert=cert)

        send_timeout_future = get_event_loop().create_task(
            send_timeout_message())

        return on_timeout, send_timeout_future
Exemple #30
0
 def __exit__(self, *args, **kwargs):
     if self._loop and self._done:
         self._loop.call_soon_threadsafe(self._done.set_result, None)
     if self._ws:
         self._ws_thread.join()
     errors = get_event_loop().run_until_complete(self._verify())
     if errors:
         raise AssertionError(errors)