Beispiel #1
0
def get_step(step_name, inputs, outputs, jobs, type_="unix"):
    input_map: Dict[str, ert.data.RecordTransmitter] = {}
    output_map: Dict[str, ert.data.RecordTransmitter] = {}
    real_source = "/real/0"
    step_source = "/real/0/step/0"
    step_builder = (ee.create_step_builder().set_parent_source(
        source=real_source).set_id("0").set_name(step_name).set_type(type_))
    for idx, (name, executable, args) in enumerate(jobs):
        step_builder.add_job(ee.create_job_builder().set_id(
            str(idx)).set_name(name).set_executable(executable).set_args(
                args).set_parent_source(step_source))
    for name, path, mime, factory in inputs:
        step_builder.add_input(
            ee.create_file_io_builder().set_name(name).set_path(
                Path(path)).set_mime(mime).set_transformation(
                    ert.data.ExecutableRecordTransformation(
                    )).set_transmitter_factory(factory))
    for name, path, mime, factory in outputs:
        step_builder.add_output(
            ee.create_file_io_builder().set_name(name).set_path(path).set_mime(
                mime).set_transmitter_factory(factory))

    for input_ in step_builder._inputs:
        input_map[input_._name] = input_.transmitter_factory()()

    for output in step_builder._outputs:
        output_map[output._name] = output.transmitter_factory()()

    return step_builder.build(), input_map, output_map
Beispiel #2
0
def test_build_ensemble():
    ensemble = ee.create_ensemble_builder().add_realization(
        ee.create_realization_builder().set_iens(0).add_step(
            ee.create_step_builder().add_job(ee.create_legacy_job_builder(
            ).set_id(0).set_name("echo_command").set_ext_job(
                Mock())).set_id("0").set_name(
                    "some_step").set_dummy_io().set_type("unix")).active(True))
    ensemble = ensemble.build()
    real = ensemble.get_reals()[0]
    assert real.is_active()
Beispiel #3
0
def test_topological_sort(steps, expected, ambiguous):
    """Checks that steps are topologically sortable.

    For all ambiguous steps, assert that they are at least present in the
    sorted step. An ambiguous step is an isolated vertex in the topology graph,
    i.e. it does not depend on the input of any other step, nor does any other
    step depend on its output. It is ambiguous because it does not matter where
    in the topological sorting it appears.

    For expected steps, assert that they are equal to the sorted steps, minus
    any ambiguous steps.
    """
    real = ee.create_realization_builder().set_iens(0).active(True)
    transmitted_factory = MagicMock()
    non_transmitted_factory = MagicMock().return_value = MagicMock()
    non_transmitted_factory.return_value.is_transmitted.return_value = False
    for step_def in steps:
        step = (ee.create_step_builder().set_id("0").set_name(
            step_def["name"]).set_type("unix"))
        for input_ in step_def["inputs"]:
            step.add_input(ee.create_input_builder().set_name(
                input_).set_transmitter_factory(transmitted_factory))
        for output in step_def["outputs"]:
            step.add_output(ee.create_output_builder().set_name(
                output).set_transmitter_factory(non_transmitted_factory))
        real.add_step(step)

    ensemble = ee.create_ensemble_builder().add_realization(real).build()
    real = ensemble.get_reals()[0]

    if ambiguous:
        sorted_ = [
            step.get_name()
            for step in list(real.get_steps_sorted_topologically())
        ]
        for step in ambiguous:
            assert step in sorted_

    if expected:
        assert expected == [
            step.get_name() for step in real.get_steps_sorted_topologically()
            if step.get_name() not in ambiguous
        ]
Beispiel #4
0
def test_inline_script(commandline, parsed_name, parsed_args):
    """Verify that the ensemble builder will obey quotations in order
    to support inlined shell scripts"""
    step = ert3.config.Unix(
        name="step with inlined script",
        input=[],
        script=tuple([commandline]),
        output=[],
        transportable_commands=tuple(),
    )
    step_builder = create_step_builder().set_name(
        "inline_script_test").set_type("unix")

    ensemble = ert3.evaluator.build_ensemble(stage=step,
                                             driver="local",
                                             ensemble_size=1,
                                             step_builder=step_builder)

    job = ensemble.get_reals()[0].get_steps()[0].get_jobs()[0]
    assert job.get_name() == parsed_name
    assert job.get_args() == tuple(parsed_args)
Beispiel #5
0
def sum_coeffs_step(test_data_path, transmitter_factory):
    step_builder = ee.create_step_builder().set_name("add_coeffs").set_type(
        "unix")

    step_builder.add_input(ee.create_file_io_builder(
    ).set_name("sum_up").set_path(
        Path("sum_coeffs.py")).set_mime("text/x-python").set_transformation(
            ert.data.ExecutableRecordTransformation()).set_transmitter_factory(
                partial(
                    create_script_transmitter,
                    "sum_up",
                    test_data_path / "sum_coeffs.py",
                    transmitter_factory=transmitter_factory,
                )))

    step_builder.add_input(
        ee.create_file_io_builder().set_name("input0").set_path(
            Path("poly_0.out")).set_mime(
                "application/json").set_transmitter_factory(
                    partial(transmitter_factory, "input0")))
    step_builder.add_input(
        ee.create_file_io_builder().set_name("input1").set_path(
            Path("poly_1.out")).set_mime(
                "application/json").set_transmitter_factory(
                    partial(transmitter_factory, "input1")))
    step_builder.add_input(
        ee.create_file_io_builder().set_name("input2").set_path(
            Path("poly_2.out")).set_mime(
                "application/json").set_transmitter_factory(
                    partial(transmitter_factory, "input2")))

    step_builder.add_output(
        ee.create_file_io_builder().set_name("sum_output").set_path(
            Path("poly_sum.out")).set_mime(
                "application/json").set_transmitter_factory(
                    partial(transmitter_factory, "sum_output")))
    step_builder.add_job(
        ee.create_job_builder().set_name("sum_up").set_executable(
            Path("sum_coeffs.py")).set_args([]))
    return step_builder
Beispiel #6
0
def function_ensemble_builder_factory(
    ensemble_size,
    transmitter_factory,
    coefficients,
):
    job_builder = ee.create_job_builder().set_name("user_defined_function")

    step_builder = (ee.create_step_builder().set_name(
        "function_evaluation").set_type("function"))

    coeffs_input = (ee.create_file_io_builder().set_name("coeffs").set_path(
        "coeffs").set_mime("application/json"))

    for iens, values in enumerate(coefficients):
        transmitter = create_input_transmitter(values,
                                               transmitter_factory("coeffs"))
        coeffs_input.set_transmitter_factory(lambda _t=transmitter: _t, iens)
    step_builder.add_input(coeffs_input)

    step_builder.add_output(
        ee.create_file_io_builder().set_name("function_output").set_path(
            "output").set_mime("application/json").set_transmitter_factory(
                partial(transmitter_factory, "function_output")))
    step_builder.add_job(job_builder)
    real_builder = ee.create_realization_builder().active(True).add_step(
        step_builder)

    builder = (ee.create_ensemble_builder().set_custom_port_range(
        custom_port_range=range(1024, 65535)).set_ensemble_size(
            ensemble_size).set_max_running(6).set_max_retries(2).set_executor(
                "local").set_forward_model(real_builder))

    def build(pickled_function):
        job_builder.set_executable(pickled_function)
        return builder

    return build
Beispiel #7
0
def get_degree_step(degree, degree_spelled, transmitter_factory,
                    test_data_path, coefficients):
    step_builder = (ee.create_step_builder().set_name(
        f"{degree_spelled}_degree").set_type("unix"))

    input_name = f"generate_{degree_spelled}_degree"
    step_builder.add_input(
        ee.create_file_io_builder().set_name(input_name).set_path(
            Path("evaluate_coeffs.py")
        ).set_mime("text/x-python").set_transformation(
            ert.data.ExecutableRecordTransformation()).set_transmitter_factory(
                partial(
                    create_script_transmitter,
                    input_name,
                    test_data_path / "evaluate_coeffs.py",
                    transmitter_factory=transmitter_factory,
                )))

    coeffs_input = (ee.create_file_io_builder().set_name("coeffs").set_path(
        Path("coeffs.json")).set_mime("application/json"))
    for iens, values in enumerate(coefficients):
        transmitter = create_input_transmitter(values,
                                               transmitter_factory("coeffs"))
        coeffs_input.set_transmitter_factory(lambda _t=transmitter: _t, iens)
    step_builder.add_input(coeffs_input)

    output_name = f"input{degree}"
    step_builder.add_output(
        ee.create_file_io_builder().set_name(output_name).set_path(
            Path(f"poly_{degree}.out")).set_mime(
                "application/json").set_transmitter_factory(
                    partial(transmitter_factory, output_name)))
    step_builder.add_job(ee.create_job_builder().set_name(
        f"generate_{degree_spelled}_degree").set_executable(
            Path("evaluate_coeffs.py")).set_args([f"{degree}"]))
    return step_builder
Beispiel #8
0
def test_evaluator(workspace, config, base_ensemble_dict, coeffs, expected,
                   request, has_unix_config):
    stages_config = request.getfixturevalue(config)
    storage_path = workspace._path / ".ert" / "tmp" / "test"
    input_transmitters = get_inputs(coeffs)
    base_ensemble_dict["size"] = len(coeffs)
    base_ensemble_dict["storage_type"] = "shared_disk"
    ensemble_config = ert3.config.load_ensemble_config(base_ensemble_dict)

    experiment_run_config = ert3.config.ExperimentRunConfig(
        ert3.config.ExperimentConfig(type="evaluation"),
        stages_config,
        ensemble_config,
        ert3.config.ParametersConfig.parse_obj([]),
    )
    stage = experiment_run_config.get_stage()

    step_builder = (
        create_step_builder().set_name(f"{stage.name}-only_step").set_type(
            "function" if isinstance(stage, ert3.config.Function) else "unix"))

    inputs = experiment_run_config.get_linked_inputs()
    stochastic_inputs = tuple(inputs[ert3.config.SourceNS.stochastic].values())

    ert3.evaluator.add_step_inputs(stochastic_inputs, input_transmitters,
                                   step_builder)

    ert3.evaluator.add_step_outputs(
        ensemble_config.storage_type,
        stage,
        storage_path,
        ensemble_config.size,
        step_builder,
    )

    if has_unix_config:
        ert3.evaluator.add_commands(
            stage.transportable_commands,
            base_ensemble_dict["storage_type"],
            storage_path,
            step_builder,
        )

    ensemble = ert3.evaluator.build_ensemble(
        stage, ensemble_config.forward_model.driver, ensemble_config.size,
        step_builder)

    evaluation_records = ert3.evaluator.evaluate(ensemble)

    for _, transmitter_map in evaluation_records.items():
        record = asyncio.get_event_loop().run_until_complete(
            transmitter_map["polynomial_output"].load())
        transmitter_map["polynomial_output"] = record.data

    expected = {
        iens: {
            "polynomial_output": data
        }
        for iens, data in enumerate(expected)
    }
    assert expected == evaluation_records
Beispiel #9
0
def run_sensitivity_analysis(
    experiment_run_config: ert3.config.ExperimentRunConfig,
    workspace: ert3.workspace.Workspace,
    experiment_name: str,
) -> None:
    inputs = experiment_run_config.get_linked_inputs()
    storage_inputs = tuple(inputs[SourceNS.storage].values())
    resource_inputs = tuple(inputs[SourceNS.resources].values())
    stochastic_inputs = tuple(inputs[SourceNS.stochastic].values())
    sensitivity_input_records = prepare_sensitivity(
        stochastic_inputs,
        experiment_run_config.experiment_config,
        experiment_run_config.parameters_config,
    )
    ensemble_size = len(sensitivity_input_records)

    _prepare_experiment(
        workspace.name,
        experiment_name,
        experiment_run_config.ensemble_config,
        ensemble_size,
    )

    storage_path = _get_storage_path(
        experiment_run_config.ensemble_config, workspace, experiment_name
    )
    records_url = ert.storage.get_records_url(workspace.name)

    stage = experiment_run_config.get_stage()
    step_builder = (
        create_step_builder()
        .set_name(f"{stage.name}-only_step")
        .set_type("function" if isinstance(stage, ert3.config.Function) else "unix")
    )

    transmitters = _gather_transmitter_maps(
        _transmitter_map_storage(storage_inputs, ensemble_size, records_url)
        + _transmitter_map_resources(
            experiment_run_config,
            resource_inputs,
            ensemble_size,
            experiment_name,
            workspace,
        )
        + transmitter_map_sensitivity(
            stochastic_inputs,
            sensitivity_input_records,
            experiment_name,
            workspace,
        )
    )
    for records in (storage_inputs, resource_inputs, stochastic_inputs):
        ert3.evaluator.add_step_inputs(
            records,
            transmitters,
            step_builder,
        )

    ert3.evaluator.add_step_outputs(
        experiment_run_config.ensemble_config.storage_type,
        stage,
        storage_path,
        ensemble_size,
        step_builder,
    )

    if isinstance(stage, ert3.config.Unix):
        ert3.evaluator.add_commands(
            stage.transportable_commands,
            experiment_run_config.ensemble_config.storage_type,
            storage_path,
            step_builder,
        )

    ensemble = ert3.evaluator.build_ensemble(
        stage,
        experiment_run_config.ensemble_config.forward_model.driver,
        ensemble_size,
        step_builder,
    )

    output_transmitters = ert3.evaluator.evaluate(ensemble)
    analyze_sensitivity(
        stochastic_inputs,
        experiment_run_config.experiment_config,
        experiment_run_config.parameters_config,
        workspace,
        experiment_name,
        output_transmitters,
    )
Beispiel #10
0
def run(
    experiment_run_config: ert3.config.ExperimentRunConfig,
    workspace: ert3.workspace.Workspace,
    experiment_name: str,
) -> None:
    # This reassures mypy that the ensemble size is defined
    assert experiment_run_config.ensemble_config.size is not None
    ensemble_size = experiment_run_config.ensemble_config.size

    if experiment_run_config.experiment_config.type != "evaluation":
        raise ValueError("this entry point can only run 'evaluation' experiments")

    _prepare_experiment(
        workspace.name,
        experiment_name,
        experiment_run_config.ensemble_config,
        ensemble_size,
    )
    storage_path = _get_storage_path(
        experiment_run_config.ensemble_config, workspace, experiment_name
    )
    records_url = ert.storage.get_records_url(workspace.name)

    stage = experiment_run_config.get_stage()
    step_builder = (
        create_step_builder()
        .set_name(f"{stage.name}-only_step")
        .set_type("function" if isinstance(stage, ert3.config.Function) else "unix")
    )

    inputs = experiment_run_config.get_linked_inputs()

    storage_inputs = tuple(inputs[SourceNS.storage].values())
    resource_inputs = tuple(inputs[SourceNS.resources].values())
    stochastic_inputs = tuple(inputs[SourceNS.stochastic].values())
    transmitters = _gather_transmitter_maps(
        _transmitter_map_storage(storage_inputs, ensemble_size, records_url)
        + _transmitter_map_resources(
            experiment_run_config,
            resource_inputs,
            ensemble_size,
            experiment_name,
            workspace,
        )
        + _transmitter_map_stochastic(
            stochastic_inputs,
            experiment_run_config.parameters_config,
            ensemble_size,
            experiment_name,
            workspace.name,
        )
    )

    for records in (storage_inputs, resource_inputs, stochastic_inputs):
        ert3.evaluator.add_step_inputs(
            records,
            transmitters,
            step_builder,
        )

    ert3.evaluator.add_step_outputs(
        experiment_run_config.ensemble_config.storage_type,
        stage,
        storage_path,
        ensemble_size,
        step_builder,
    )

    if isinstance(stage, ert3.config.Unix):
        ert3.evaluator.add_commands(
            stage.transportable_commands,
            experiment_run_config.ensemble_config.storage_type,
            storage_path,
            step_builder,
        )

    ensemble = ert3.evaluator.build_ensemble(
        stage,
        experiment_run_config.ensemble_config.forward_model.driver,
        ensemble_size,
        step_builder,
    )
    ert3.evaluator.evaluate(ensemble)