コード例 #1
0
def test_fs_init_from_scratch_deprecated():
    res_config = ResConfig("snake_oil.ert")
    ert = EnKFMain(res_config)
    sim_fs = ert.getEnkfFsManager().getFileSystem("new_case")  # new case
    mask = BoolVector.createFromList(25, [0, 1, 2, 3, 4, 5])
    run_context = ErtRunContext.case_init(sim_fs, mask)
    with pytest.warns(DeprecationWarning):
        ert.getEnkfFsManager().initializeFromScratch(
            StringList(["SNAKE_OIL_PARAM"]), run_context)
    assert len(ert.getEnkfFsManager().getStateMapForCase("new_case")) == 6
コード例 #2
0
def test_fs_init_from_scratch():
    res_config = ResConfig("snake_oil.ert")
    ert = EnKFMain(res_config)
    sim_fs = ert.getEnkfFsManager().getFileSystem("new_case")
    mask = [True] * 6 + [False] * 19
    run_context = ErtRunContext.case_init(sim_fs, mask)

    ert.getEnkfFsManager().initializeFromScratch(
        StringList(["SNAKE_OIL_PARAM"]), run_context)
    assert len(ert.getEnkfFsManager().getStateMapForCase("new_case")) == 6
コード例 #3
0
def initializeCurrentCaseFromScratch(
    parameters: List[str], members: List[str], ert: EnKFMain
):
    selected_parameters = StringList(parameters)
    mask = [False] * ert.getEnsembleSize()
    for member in members:
        member = int(member.strip())
        mask[member] = True

    sim_fs = ert.getEnkfFsManager().getCurrentFileSystem()
    run_context = ErtRunContext.case_init(sim_fs, mask)
    ert.getEnkfFsManager().initializeFromScratch(selected_parameters, run_context)
コード例 #4
0
def test_enkf_fs_manager_create(setup_case):
    # We are indirectly testing the create through the create
    # already in the enkf_main object. In principle we could
    # create a separate manager instance from the ground up, but
    # then the reference count will be weird.
    res_config = setup_case("local/snake_oil", "snake_oil.ert")
    ert = EnKFMain(res_config)
    fsm = ert.getEnkfFsManager()

    fsm.getCurrentFileSystem()
    assert fsm.isCaseMounted("default_0")
    assert fsm.caseExists("default_0")
    assert fsm.caseHasData("default_0")
    assert not fsm.isCaseRunning("default_0")

    assert fsm.getFileSystemCount() == 1

    assert not fsm.isCaseMounted("newFS")
    assert not fsm.caseExists("newFS")
    assert not fsm.caseHasData("newFS")
    assert not fsm.isCaseRunning("newFS")

    fsm.getFileSystem("newFS")
    assert fsm.getFileSystemCount() == 2

    assert fsm.isCaseMounted("newFS")
    assert fsm.caseExists("newFS")
    assert not fsm.caseHasData("newFS")
    assert not fsm.isCaseRunning("newFS")
コード例 #5
0
    def loadAllMisfitData(ert: EnKFMain, case_name) -> DataFrame:
        """
        @type ert: EnKFMain
        @type case_name: str
        @rtype: DataFrame
        """
        fs = ert.getEnkfFsManager().getFileSystem(case_name)

        realizations = MisfitCollector.createActiveList(ert, fs)
        misfit_keys = ert.getKeyManager().misfitKeys(sort_keys=False)
        misfit_sum_index = len(misfit_keys) - 1

        misfit_array = numpy.empty(shape=(len(misfit_keys), len(realizations)),
                                   dtype=numpy.float64)
        misfit_array.fill(numpy.nan)
        misfit_array[misfit_sum_index] = 0.0

        for column_index, obs_vector in enumerate(ert.getObservations()):

            for realization_index, realization_number in enumerate(
                    realizations):
                misfit = obs_vector.getTotalChi2(fs, realization_number)

                misfit_array[column_index][realization_index] = misfit
                misfit_array[misfit_sum_index][realization_index] += misfit

        misfit_data = DataFrame(data=numpy.transpose(misfit_array),
                                index=realizations,
                                columns=misfit_keys)
        misfit_data.index.name = "Realization"

        return misfit_data
コード例 #6
0
    def loadGenData(ert: EnKFMain, case_name, key, report_step, realization_index=None):
        """@type ert: EnKFMain
        @type case_name: str
        @type key: str
        @type report_step: int
        @rtype: DataFrame

        In the returned dataframe the realisation index runs along the
        rows, and the gen_data element index runs vertically along the
        columns.
        """
        fs = ert.getEnkfFsManager().getFileSystem(case_name)
        realizations = fs.realizationList(RealizationStateEnum.STATE_HAS_DATA)
        if realization_index:
            if realization_index not in realizations:
                raise IndexError(f"No such realization {realization_index}")
            realizations = IntVector.active_list(str(realization_index))

        config_node = ert.ensembleConfig().getNode(key)
        config_node.getModelConfig()

        ensemble_data = EnsemblePlotGenData(config_node, fs, report_step)
        data_array = ensemble_data.getRealizations(realizations)

        realizations = numpy.array(realizations)
        return DataFrame(data=data_array, columns=realizations)
コード例 #7
0
    def test_with_enkf_fs(self):
        config_file = self.createTestPath("Statoil/config/with_data/config")

        with TestAreaContext("enkf/summary_key_set/enkf_fs",
                             store_area=True) as context:
            context.copy_parent_content(config_file)

            fs = EnkfFs("storage/default")
            summary_key_set = fs.getSummaryKeySet()
            summary_key_set.addSummaryKey("FOPT")
            summary_key_set.addSummaryKey("WWCT")
            summary_key_set.addSummaryKey("WOPR")
            fs.umount()

            res_config = ResConfig("config")
            ert = EnKFMain(res_config)
            fs = ert.getEnkfFsManager().getCurrentFileSystem()
            summary_key_set = fs.getSummaryKeySet()
            self.assertTrue("FOPT" in summary_key_set)
            self.assertTrue("WWCT" in summary_key_set)
            self.assertTrue("WOPR" in summary_key_set)

            ensemble_config = ert.ensembleConfig()

            self.assertTrue("FOPT" in ensemble_config)
            self.assertTrue("WWCT" in ensemble_config)
            self.assertTrue("WOPR" in ensemble_config)
            self.assertFalse("TCPU" in ensemble_config)
コード例 #8
0
    def loadAllGenKwData(ert: EnKFMain, case_name, keys=None, realization_index=None):
        """
        @type ert: EnKFMain
        @type case_name: str
        @type keys: list of str
        @rtype: DataFrame
        """
        fs = ert.getEnkfFsManager().getFileSystem(case_name)

        realizations = GenKwCollector.createActiveList(ert, fs)

        if realization_index is not None:
            if realization_index not in realizations:
                raise IndexError(f"No such realization ({realization_index})")
            realizations = [realization_index]

        gen_kw_keys = ert.getKeyManager().genKwKeys()

        if keys is not None:
            gen_kw_keys = [
                key for key in keys if key in gen_kw_keys
            ]  # ignore keys that doesn't exist

        gen_kw_array = _lib.enkf_fs_keyword_data.keyword_data_get_realizations(
            ert.ensembleConfig(), fs, gen_kw_keys, realizations
        )
        gen_kw_data = DataFrame(
            data=gen_kw_array, index=realizations, columns=gen_kw_keys
        )

        gen_kw_data.index.name = "Realization"
        return gen_kw_data
コード例 #9
0
def test_assert_symlink_deleted(setup_case):
    res_config = setup_case("local/snake_oil_field", "snake_oil.ert")
    ert = EnKFMain(res_config)
    runpath_list = ert.getRunpathList()

    runner = ert.getEnkfSimulationRunner()

    # create directory structure
    model_config = ert.getModelConfig()
    run_context = ErtRunContext.ensemble_experiment(
        ert.getEnkfFsManager().getCurrentFileSystem(),
        [True],
        model_config.getRunpathFormat(),
        model_config.getJobnameFormat(),
        ert.getDataKW(),
        0,
    )
    runner.createRunPath(run_context)

    # replace field file with symlink
    linkpath = "%s/permx.grdcel" % str(runpath_list[0].runpath)
    targetpath = "%s/permx.grdcel.target" % str(runpath_list[0].runpath)
    open(targetpath, "a").close()
    os.remove(linkpath)
    os.symlink(targetpath, linkpath)

    # recreate directory structure
    runner.createRunPath(run_context)

    # ensure field symlink is replaced by file
    assert not os.path.islink(linkpath)
コード例 #10
0
def test_load_results_manually2(setup_case, caplog, monkeypatch, lazy_load):
    """
    This little test does not depend on Equinor-data and only verifies
    the lazy_load flag in forward_load_context plus memory-logging
    """
    if lazy_load:
        monkeypatch.setenv("ERT_LAZY_LOAD_SUMMARYDATA", str(lazy_load))
    res_config = setup_case("local/snake_oil", "snake_oil.ert")
    ert = EnKFMain(res_config, strict=True)
    load_from = ert.getEnkfFsManager().getFileSystem("default_0")
    ert.getEnkfFsManager().switchFileSystem(load_from)
    realisations = [False] * 25
    realisations[0] = True  # only need one to test what we want
    with caplog.at_level(logging.INFO):
        loaded = ert.loadFromForwardModel(realisations, 0, load_from)
        assert 0 == loaded  # they will in fact all fail, but that's ok
        assert f"lazy={lazy_load}".lower() in caplog.text
コード例 #11
0
def test_localization(setup_case, expected_target_gen_kw):
    """
    Note that this is now a snapshot test, so there is no guarantee that the
    snapshots are correct, they are just documenting the current behavior.
    """
    res_config = setup_case("local/snake_oil", "snake_oil.ert")
    ert = EnKFMain(res_config)
    es_update = ESUpdate(ert)
    fsm = ert.getEnkfFsManager()
    sim_fs = fsm.getFileSystem("default_0")
    target_fs = fsm.getFileSystem("target")

    # perform localization
    localized_idxs = (1, 2)
    local_config = ert.getLocalConfig()
    local_config.clear()
    obs = local_config.createObsdata("OBSSET_LOCA")
    obs.addNode("WOPR_OP1_72")
    ministep = local_config.createMinistep("MINISTEP_LOCA")
    ministep.addActiveData("SNAKE_OIL_PARAM")  # replace dataset.addNode()
    active_list = ministep.getActiveList("SNAKE_OIL_PARAM")
    for i in localized_idxs:
        active_list.addActiveIndex(i)
    ministep.attachObsset(obs)
    updatestep = local_config.getUpdatestep()
    updatestep.attachMinistep(ministep)

    # Run ensemble smoother
    mask = [True] * ert.getEnsembleSize()
    model_config = ert.getModelConfig()
    path_fmt = model_config.getRunpathFormat()
    jobname_fmt = model_config.getJobnameFormat()
    subst_list = None
    run_context = ErtRunContext.ensemble_smoother(
        sim_fs, target_fs, mask, path_fmt, jobname_fmt, subst_list, 0
    )
    es_update.smootherUpdate(run_context)

    conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"]
    sim_node = EnkfNode(conf)
    target_node = EnkfNode(conf)

    node_id = NodeId(0, 0)
    sim_node.load(sim_fs, node_id)
    target_node.load(target_fs, node_id)

    sim_gen_kw = list(sim_node.asGenKw())
    target_gen_kw = list(target_node.asGenKw())

    # Test that the localized values has been updated
    assert sim_gen_kw[1:3] != target_gen_kw[1:3]

    # test that all the other values are left unchanged
    assert sim_gen_kw[3:] == target_gen_kw[3:]
    assert sim_gen_kw[0] == target_gen_kw[0]

    assert target_gen_kw == pytest.approx(expected_target_gen_kw)
コード例 #12
0
def initializeCurrentCaseFromExisting(
    source_case: str,
    target_case: str,
    source_report_step: int,
    parameters: List[str],
    members: List[str],
    ert: EnKFMain,
):
    if (
        caseExists(source_case, LibresFacade(ert))
        and ert.getEnkfFsManager().isCaseInitialized(source_case)
        and caseExists(target_case, LibresFacade(ert))
    ):
        member_mask = [False] * ert.getEnsembleSize()
        for member in members:
            member_mask[int(member)] = True

        ert.getEnkfFsManager().customInitializeCurrentFromExistingCase(
            source_case, source_report_step, member_mask, parameters
        )
コード例 #13
0
def test_custom_init_runs(state_mask, expected_length):
    res_config = ResConfig("snake_oil.ert")
    ert = EnKFMain(res_config)
    new_fs = ert.getEnkfFsManager().getFileSystem("new_case")
    ert.getEnkfFsManager().switchFileSystem(new_fs)
    ert.getEnkfFsManager().customInitializeCurrentFromExistingCase(
        "default_0", 0, state_mask, StringList(["SNAKE_OIL_PARAM"]))
    assert len(ert.getEnkfFsManager().getStateMapForCase(
        "new_case")) == expected_length
コード例 #14
0
def test_update_report(setup_case, snapshot):
    """
    Note that this is now a snapshot test, so there is no guarantee that the
    snapshots are correct, they are just documenting the current behavior.
    """
    res_config = setup_case("local/snake_oil", "snake_oil.ert")

    ert = EnKFMain(res_config)
    es_update = ESUpdate(ert)
    fsm = ert.getEnkfFsManager()
    sim_fs = fsm.getFileSystem("default_0")
    target_fs = fsm.getFileSystem("target")
    run_context = ErtRunContext.ensemble_smoother_update(sim_fs, target_fs)
    es_update.smootherUpdate(run_context)
    log_file = Path(ert.analysisConfig().get_log_path()) / "deprecated"
    snapshot.assert_match(log_file.read_text("utf-8"), "update_log")
コード例 #15
0
ファイル: test_row_scaling_case.py プロジェクト: ManInFez/ert
    def test_large_case(self):
        with open("config", "w") as fp:
            fp.write(
                """NUM_REALIZATIONS 10
GRID             CASE.EGRID
FIELD            PORO    PARAMETER    poro.grdecl INIT_FILES:fields/poro%d.grdecl
SUMMARY          WBHP
OBS_CONFIG       observations.txt
TIME_MAP timemap.txt
"""
            )

        for f in ["timemap.txt", "observations.txt"]:
            src_file = self.createTestPath(os.path.join("local/row_scaling", f))
            shutil.copy(src_file, "./")
        # The grid size must be greater than 250000 (the default matrix size in
        # enkf_main_update())
        grid = EclGridGenerator.create_rectangular((70, 70, 70), (1, 1, 1))
        grid.save_EGRID("CASE.EGRID")
        res_config = ResConfig(user_config_file="config")
        main = EnKFMain(res_config)
        init_fs = init_data(main)

        # Configure the local updates
        local_config = main.getLocalConfig()
        local_config.clear()
        local_data = local_config.createDataset("LOCAL")
        local_data.addNode("PORO")
        obs = local_config.createObsdata("OBSSET_LOCAL")
        obs.addNode("WBHP0")
        ministep = local_config.createMinistep("MINISTEP_LOCAL")
        ministep.attachDataset(local_data)
        ministep.attachObsset(obs)
        updatestep = local_config.getUpdatestep()
        updatestep.attachMinistep(ministep)

        # Apply the row scaling
        row_scaling = local_data.row_scaling("PORO")
        ens_config = main.ensembleConfig()
        poro_config = ens_config["PORO"]
        field_config = poro_config.getFieldModelConfig()
        grid = main.eclConfig().getGrid()
        row_scaling.assign(field_config.get_data_size(), ScalingTest(grid))
        es_update = ESUpdate(main)
        update_fs = main.getEnkfFsManager().getFileSystem("target2")
        run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs)
        es_update.smootherUpdate(run_context)
コード例 #16
0
def test_snapshot_alpha(setup_case, alpha, expected):
    """
    Note that this is now a snapshot test, so there is no guarantee that the
    snapshots are correct, they are just documenting the current behavior.
    """
    res_config = setup_case("local/snake_oil", "snake_oil.ert")

    obs_file = Path("observations") / "observations.txt"
    with obs_file.open(mode="w") as fin:
        fin.write(
            """
SUMMARY_OBSERVATION LOW_STD
{
   VALUE   = 10;
   ERROR   = 0.1;
   DATE    = 2015-06-23;
   KEY     = FOPR;
};
SUMMARY_OBSERVATION HIGH_STD
{
   VALUE   = 10;
   ERROR   = 1.0;
   DATE    = 2015-06-23;
   KEY     = FOPR;
};
SUMMARY_OBSERVATION EXTREMELY_HIGH_STD
{
   VALUE   = 10;
   ERROR   = 10.0;
   DATE    = 2015-06-23;
   KEY     = FOPR;
};
"""
        )

    ert = EnKFMain(res_config)
    es_update = ESUpdate(ert)
    ert.analysisConfig().selectModule("IES_ENKF")
    fsm = ert.getEnkfFsManager()
    sim_fs = fsm.getFileSystem("default_0")
    target_fs = fsm.getFileSystem("target")
    run_context = ErtRunContext.ensemble_smoother_update(sim_fs, target_fs)
    ert.analysisConfig().setEnkfAlpha(alpha)
    es_update.smootherUpdate(run_context)
    result_snapshot = ert.update_snapshots[run_context.get_id()]
    assert result_snapshot.alpha == alpha
    assert result_snapshot.ministep_snapshots["ALL_ACTIVE"].obs_status == expected
コード例 #17
0
def test_custom_init_runs(state_mask, expected_length):
    res_config = ResConfig("snake_oil.ert")
    ert = EnKFMain(res_config)
    new_fs = ert.getEnkfFsManager().getFileSystem("new_case")  # new case
    ert.getEnkfFsManager().switchFileSystem(new_fs)
    index_list = [i for i, flag in enumerate(state_mask) if flag]
    bool_vector = BoolVector.createFromList(len(state_mask), index_list)
    ert.getEnkfFsManager().customInitializeCurrentFromExistingCase(
        "default_0", 0, bool_vector, StringList(["SNAKE_OIL_PARAM"]))
    assert len(ert.getEnkfFsManager().getStateMapForCase(
        "new_case")) == expected_length
コード例 #18
0
ファイル: test_enkf_library.py プロジェクト: oyvindeide/ert
    def test_ecl_config_creation(self):
        with TestAreaContext("enkf_library_test") as work_area:
            work_area.copy_directory(self.case_directory)

            res_config = ResConfig("simple_config/minimum_config")
            main = EnKFMain(res_config)

            self.assertIsInstance(main.analysisConfig(), AnalysisConfig)
            self.assertIsInstance(main.eclConfig(), EclConfig)

            with self.assertRaises(AssertionError):  # Null pointer!
                self.assertIsInstance(main.eclConfig().getRefcase(), EclSum)

            file_system = main.getEnkfFsManager().getCurrentFileSystem()
            self.assertEqual(file_system.getCaseName(), "default")
            time_map = file_system.getTimeMap()
            self.assertIsInstance(time_map, TimeMap)
コード例 #19
0
def test_update(setup_case, module, expected_gen_kw):
    """
    Note that this is now a snapshot test, so there is no guarantee that the
    snapshots are correct, they are just documenting the current behavior.
    """
    res_config = setup_case("local/snake_oil", "snake_oil.ert")

    ert = EnKFMain(res_config)
    es_update = ESUpdate(ert)
    ert.analysisConfig().selectModule(module)
    fsm = ert.getEnkfFsManager()
    sim_fs = fsm.getFileSystem("default_0")
    target_fs = fsm.getFileSystem("target")
    run_context = ErtRunContext.ensemble_smoother_update(sim_fs, target_fs)
    es_update.smootherUpdate(run_context)

    conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"]
    sim_node = EnkfNode(conf)
    target_node = EnkfNode(conf)

    node_id = NodeId(0, 0)
    sim_node.load(sim_fs, node_id)
    target_node.load(target_fs, node_id)

    sim_gen_kw = list(sim_node.asGenKw())
    target_gen_kw = list(target_node.asGenKw())

    assert sim_gen_kw != target_gen_kw

    assert sim_gen_kw == pytest.approx(
        [
            -1.3035319087841115,
            0.8222709205428339,
            -1.1400029486153482,
            0.7477534046493867,
            -0.10400064074767973,
            -1.7223242794585338,
            0.0761604027734105,
            0.4039137216428462,
            0.10001691562080614,
            0.09549338450036506,
        ]
    )

    assert target_gen_kw == pytest.approx(expected_gen_kw)
コード例 #20
0
ファイル: test_obs_util.py プロジェクト: oddvarlia/semeio
def test_validate_no_realizations(test_data_root):
    """
    Ensamble has not run
    """
    test_data_dir = os.path.join(test_data_root, "poly_normal")
    shutil.copytree(test_data_dir, "test_data")
    os.chdir(os.path.join("test_data"))

    res_config = ResConfig("poly.ert")
    ert = EnKFMain(res_config)
    observations = ert.getObservations()

    result = keys_with_data(
        observations,
        ["POLY_OBS"],
        ert.getEnsembleSize(),
        ert.getEnkfFsManager().getCurrentFileSystem(),
    )
    assert result == []
コード例 #21
0
ファイル: test_obs_util.py プロジェクト: oddvarlia/semeio
def test_validate_failed_realizations(test_data_root):
    """
    Config has several failed realisations
    """
    test_data_dir = os.path.join(test_data_root, "failed_runs_in_storage")
    shutil.copytree(test_data_dir, "test_data")
    os.chdir(os.path.join("test_data"))

    res_config = ResConfig("mini_fail_config")
    ert = EnKFMain(res_config)
    observations = ert.getObservations()

    result = keys_with_data(
        observations,
        ["GEN_PERLIN_1"],
        ert.getEnsembleSize(),
        ert.getEnkfFsManager().getCurrentFileSystem(),
    )
    assert result == ["GEN_PERLIN_1"]
コード例 #22
0
    def loadAllSummaryData(ert: EnKFMain,
                           case_name,
                           keys=None,
                           realization_index=None):
        """
        @type ert: EnKFMain
        @type case_name: str
        @type keys: list of str
        @rtype: DataFrame
        """

        fs = ert.getEnkfFsManager().getFileSystem(case_name)

        time_map = fs.getTimeMap()
        dates = [
            time_map[index].datetime() for index in range(1, len(time_map))
        ]

        realizations = SummaryCollector.createActiveList(ert, fs)
        if realization_index is not None:
            if realization_index not in realizations:
                raise IndexError(f"No such realization {realization_index}")
            realizations = [realization_index]

        summary_keys = ert.getKeyManager().summaryKeys()
        if keys is not None:
            summary_keys = [key for key in keys if key in summary_keys
                            ]  # ignore keys that doesn't exist

        summary_data = _lib.enkf_fs_summary_data.get_summary_data(
            ert.ensembleConfig(), fs, summary_keys, realizations, len(dates))

        multi_index = MultiIndex.from_product([realizations, dates],
                                              names=["Realization", "Date"])

        df = DataFrame(data=summary_data,
                       index=multi_index,
                       columns=summary_keys)

        return df
コード例 #23
0
def test_transfer_var(use_tmpdir):
    # Write a minimal config file with env
    with open("config_file.ert", "w") as fout:
        fout.write(
            dedent("""
        NUM_REALIZATIONS 1
        JOBNAME a_name_%d
        SETENV FIRST TheFirstValue
        SETENV SECOND TheSecondValue
        UPDATE_PATH   THIRD  TheThirdValue
        UPDATE_PATH   FOURTH TheFourthValue
        """))
    res_config = ResConfig("config_file.ert")
    ert = EnKFMain(res_config)
    fs_manager = ert.getEnkfFsManager()

    model_config = ert.getModelConfig()
    run_context = ErtRunContext.ensemble_experiment(
        fs_manager.getCurrentFileSystem(),
        [True],
        model_config.getRunpathFormat(),
        model_config.getJobnameFormat(),
        ert.getDataKW(),
        0,
    )
    ert.getEnkfSimulationRunner().createRunPath(run_context)
    os.chdir("simulations/realization0")
    with open("jobs.json", "r") as f:
        data = json.load(f)
        env_data = data["global_environment"]
        assert env_data["FIRST"] == "TheFirstValue"
        assert env_data["SECOND"] == "TheSecondValue"

        path_data = data["global_update_path"]
        assert "TheThirdValue" == path_data["THIRD"]
        assert "TheFourthValue" == path_data["FOURTH"]
コード例 #24
0
def test_assert_export(use_tmpdir):
    # Write a minimal config file with env
    with open("config_file.ert", "w") as fout:
        fout.write(
            dedent(
                """
        NUM_REALIZATIONS 1
        JOBNAME a_name_%d
        RUNPATH_FILE directory/test_runpath_list.txt
        """
            )
        )
    res_config = ResConfig("config_file.ert")
    ert = EnKFMain(res_config)
    runpath_list = ert.getRunpathList()
    assert not os.path.isfile(runpath_list.getExportFile())

    fs_manager = ert.getEnkfFsManager()
    model_config = ert.getModelConfig()
    run_context = ErtRunContext.ensemble_experiment(
        fs_manager.getCurrentFileSystem(),
        [True],
        model_config.getRunpathFormat(),
        model_config.getJobnameFormat(),
        ert.getDataKW(),
        0,
    )

    ert.getEnkfSimulationRunner().createRunPath(run_context)

    assert os.path.isfile(runpath_list.getExportFile())
    assert "test_runpath_list.txt" == os.path.basename(runpath_list.getExportFile())
    assert (
        Path(runpath_list.getExportFile()).read_text("utf-8")
        == f"000  {os.getcwd()}/simulations/realization0  a_name_0  000\n"
    )
コード例 #25
0
ファイル: batch_simulator.py プロジェクト: kvashchuka/libres
class BatchSimulator(object):
    def __init__(self, res_config, controls, results, callback=None):
        """Will create simulator which can be used to run multiple simulations.

        The @res_config argument should be a ResConfig object, representing the
        fully configured state of libres.


        The @controls argument configures which parameters the simulator should
        get when actually simulating. The @controls argument should be a
        dictionary like this :

            controls = {
                "cmode": ["Well", "Group"],
                "order":
                    "W" : ["01", "02", "03"]
                }

        In this example, the first group of controls "cmode" includes two
        controls, called "Well" and "Group". The second group of controls
        "order" has one control "W" with three suffixes.
        Note that:
        - Either no variable in a group has suffixes or all the variables in
          the group have suffixes.
        - Suffixes must be specified as non-empty collections of strings.
        - No duplicate groups/controls/suffixes are allowed

        When actually simulating, these values will be written to json files
        looking like this:

            cmode.json = {"Well": 1.0, "Group": 2.0}
            order.json = {
                "W":
                    "01": 0.3,
                    "02": 1.0,
                    "03": 4.2
                }

        When later invoking the start() method the simulator expects to get
        values for all parameters configured with the @controls argument,
        otherwise an exception will be raised.
        Internally in libres code the controls will be implemented as
        'ext_param' instances.


        The @results argument is a list of keys of results which the simulator
        expects to be generated by the forward model. If argument @results
        looks like:

             results = ["CMODE", "order"]

        The simulator will look for the files 'CMODE_0' and 'order_0' in the
        simulation folder. If those files are not produced by the simulator an
        exception will be raised.

        The optional argument callback can be used to provide a callable
        which will be called as:

              callback(run_context)

        When the simulator has started. For the callable passed as
        callback you are encouraged to use the future proof signature:

             def callback(*args, **kwargs):
                 ....

        """
        if not isinstance(res_config, ResConfig):
            raise ValueError(
                "The first argument must be valid ResConfig instance")

        self.res_config = res_config
        self.ert = EnKFMain(self.res_config)
        self.control_keys = set(controls.keys())
        self.result_keys = set(results)
        self.callback = callback

        ens_config = self.res_config.ensemble_config
        for control_name, variables in controls.items():
            ens_config.addNode(
                EnkfConfigNode.create_ext_param(control_name, variables))

        for key in results:
            ens_config.addNode(
                EnkfConfigNode.create_gen_data(key, "{}_%d".format(key)))

    def _setup_sim(self, sim_id, controls, file_system):
        def _set_ext_param(ext_param, key, assignment):
            if isinstance(assignment, dict):  # handle suffixes
                suffixes = ext_param.config[key]
                if len(assignment) != len(suffixes):
                    raise KeyError(
                        "Key {} is missing values for these suffixes: {}".
                        format(
                            key,
                            set(suffixes).difference(set(assignment.keys()))))
                for suffix, value in assignment.items():
                    ext_node[key, suffix] = value
            else:  # assume assignment is a single numerical value
                ext_node[key] = assignment

        node_id = NodeId(0, sim_id)
        if set(controls.keys()) != self.control_keys:
            err_msg = "Mismatch between initialized and provided control names."
            raise KeyError(err_msg)

        for control_name, control in controls.items():
            ens_config = self.res_config.ensemble_config
            node = EnkfNode(ens_config[control_name])
            ext_node = node.as_ext_param()
            if len(ext_node) != len(control.keys()):
                raise KeyError(("Expected {} variables for control {}, "
                                "received {}.").format(len(ext_node),
                                                       control_name,
                                                       len(control.keys())))
            for var_name, var_setting in control.items():
                _set_ext_param(ext_node, var_name, var_setting)
            node.save(file_system, node_id)

    def start(self, case_name, case_data):
        """Start batch simulation, return a simulation context

        The start method will submit simulations to the queue system and then
        return a BatchContext handle which can be used to query for simulation
        status and results. The @case_name argument should just be string which
        will be used as name for the storage of these simulations in the
        system. The @controls argument is the set of control values, and the
        corresponding ID of the external realisation used for the simulations.
        The @control argument must match the control argument used when the
        simulator was instantiated. Assuming the following @control argument
        was passed to simulator construction:

            controls = {
                "cmode": ["Well", "Group"],
                "order":
                    "W" : ["01", "02", "03"]
                }

        Then the following @case_data argument can be used in the start method
        to simulate four simulations:

              [
                  (1,
                   {
                       "cmode": {"Well": 2, "Group": 2},
                       "order": {
                            "W":
                                "01": 2,
                                "02": 2,
                                "03": 5},
                   }),
                  (1,
                   {
                       "cmode": {"Well": 1, "Group": 3},
                       "order": {"W": ...},
                   }),
                  (1,
                   {
                       "cmode": {"Well": 1, "Group": 7},
                       "order": {"W": ...},
                   }),
                  (2,
                   {
                       "cmode": {"Well": 1, "Group": -1},
                       "order": {"W": ...},
                   }),
              ]

        The first integer argument in the tuple is the realisation id, so this
        simulation batch will consist of a total of four simulations, where the
        first three are based on realisation 1, and the last is based on
        realisation 2.

        Observe that only one BatchSimulator should actually be running at a
        time, so when you have called the 'start' method you need to let that
        batch complete before you start a new batch.
        """

        self.ert.addDataKW("<CASE_NAME>", _slug(case_name))
        file_system = self.ert.getEnkfFsManager().getFileSystem(case_name)
        for sim_id, (geo_id, controls) in enumerate(case_data):
            assert isinstance(geo_id, int)
            self._setup_sim(sim_id, controls, file_system)

        # The input should be validated before we instantiate the BatchContext
        # object, at that stage a job_queue object with multiple threads is
        # started, and things will typically be in a quite sorry state if an
        # exception occurs.
        itr = 0
        mask = BoolVector(default_value=True, initial_size=len(case_data))
        sim_context = BatchContext(self.result_keys, self.ert, file_system,
                                   mask, itr, case_data)

        if self.callback:
            self.callback(sim_context)
        return sim_context
コード例 #26
0
ファイル: sol4.py プロジェクト: berland/ert
#!/usr/bin/env python
import sys
import time
from res.enkf import EnKFMain, RunArg, NodeId, ResConfig
from res.enkf.data import EnkfNode
from ert.job_queue import JobQueueManager

res_config = ResConfig( sys.argv[1] )
ert = EnKFMain( res_config )
fs_manager = ert.getEnkfFsManager( )
fs = fs_manager.getCurrentFileSystem( )


# Initialize the realisations.
for iens in range( ert.getEnsembleSize()):
    realisation = ert.getRealisation( iens )
    realisation.initialize( fs )


# Fetch out the job_queue from the SiteConfig object. In addition we
# create a JobQueueManager objects which wraps the queue. The purpose
# of this manager object is to let the queue run nonblocking in the
# background.
site_config = ert.siteConfig( )
queue_manager = JobQueueManager( site_config.getJobQueue( ) )
queue_manager.startQueue( ert.getEnsembleSize( ) , verbose = False )


# Create list of RunArg instances which hold metadata for one running
# realisation, create the directory where the simulation should run
# and submit the simulation.
コード例 #27
0
ファイル: sol4.py プロジェクト: agchitu/ert-1
#!/usr/bin/env python
import sys
import time
from res.enkf import EnKFMain, RunArg, NodeId, ResConfig
from res.enkf.data import EnkfNode
from ert.job_queue import JobQueueManager

res_config = ResConfig(sys.argv[1])
ert = EnKFMain(res_config)
fs_manager = ert.getEnkfFsManager()
fs = fs_manager.getCurrentFileSystem()

# Initialize the realisations.
for iens in range(ert.getEnsembleSize()):
    realisation = ert.getRealisation(iens)
    realisation.initialize(fs)

# Fetch out the job_queue from the SiteConfig object. In addition we
# create a JobQueueManager objects which wraps the queue. The purpose
# of this manager object is to let the queue run nonblocking in the
# background.
site_config = ert.siteConfig()
queue_manager = JobQueueManager(site_config.getJobQueue())
queue_manager.startQueue(ert.getEnsembleSize(), verbose=False)

# Create list of RunArg instances which hold metadata for one running
# realisation, create the directory where the simulation should run
# and submit the simulation.
path_fmt = "/tmp/run%d"
arg_list = [
    RunArg.createEnsembleExperimentRunArg(fs, iens, path_fmt % iens)
コード例 #28
0
ファイル: batch_simulator.py プロジェクト: jokva/libres
class BatchSimulator(object):
    def __init__(self, res_config, controls, results):
        """Will create simulator which can be used to run multiple simulations.

        The @res_config argument should be a ResConfig object, representing the
        fully configured state of libres.


        The @controls argument configures which parameters the simulator should
        get when actually simulating. The @controls argument should be a
        dictionary like this:

            controls = {"cmode": ["Well","Group"], "order" : ["W1", "W2", "W3"]}

        In this example the simulator will expect two arrays 'cmode' and
        'order', consisting of two and three elements respectively. When
        actually simualating these values will be written to json files looking
        like:

             cmode.json = {"Well" : 1.0, "Group" : 2.0}
             order.json = {"W1" : 1, "W2" : 1.0, "W3": 1.0}

        When later invoking the start() method the simulator expects to get
        values for all parameters configured with the @controls argument,
        otherwise an exception will be raised. Internally in libres code the
        controls will be implemented as 'ext_param' instances.


        The @results argument is a list of keys of results which the simulator
        expects to be generated by the forward model. If argument @results
        looks like:

             results = ["CMODE", "order"]

        The simulator will look for the files 'CMODE_0' and 'order_0' in the
        simulation folder. If those files are not produced by the simulator an
        exception will be raised.
        """
        if not isinstance(res_config, ResConfig):
            raise ValueError(
                "The first argument must be valid ResConfig instance")

        self.res_config = res_config
        self.ert = EnKFMain(self.res_config)
        self.control_keys = []
        self.result_keys = []

        ens_config = self.res_config.ensemble_config
        for key in controls.keys():
            ens_config.addNode(
                EnkfConfigNode.create_ext_param(key, controls[key]))
            self.control_keys.append(key)

        for key in results:
            ens_config.addNode(
                EnkfConfigNode.create_gen_data(key, "{}_%d".format(key)))
            self.result_keys.append(key)

    def start(self, case_name, controls):
        """Will start batch simulation, returning a handle to query status and results.

        The start method will submit simulations to the queue system and then
        return a RobustContext handle which can be used to query for simulation
        status and results. The @case_name argument should just be string which
        will be used as name for the storage of these simulations in the
        system. The @controls argument is the set of control values, and the
        corresponding ID of the external realisation used for the simulations.
        The @control argument must match the control argument used when the
        simulator was instantiated. Assuming the following @control argument
        was passed to simulator construction:

             controls = {"cmode": ["Well","Group"], "order" : ["W1", "W2", "W3"]}

        Then the following @controls argument can be used in the start method
        to simulate four simulations:

              [ (1, {"cmode" : [1 ,2], "order" : [2,2,5]}),
                (1, {"cmode" : [1, 3], "order" : [2,2,7]}),
                (1, {"cmode" : [1, 7], "order" : [2,0,5]}),
                (2, {"cmode" : [1,-1], "order" : [2,2,1]})]

        The first integer argument in the tuple is the realisation id, so this
        simulation batch will consist of a total of four simulations, where the
        three first are based on realisation 1, and the last is based on
        realisation 2.

        Observe that only one BatchSimulator should actually be running at a
        time, so when you have called the 'start' method you need to let that
        batch complete before you start a new batch.
        """
        ens_config = self.res_config.ensemble_config
        fsm = self.ert.getEnkfFsManager()
        fs = fsm.getFileSystem(case_name)

        for sim_id, (geo_id, control_dict) in enumerate(controls):
            assert isinstance(geo_id, int)

            node_id = NodeId(0, sim_id)
            if len(control_dict) != len(self.control_keys):
                raise ValueError("Not all keys supplied in controls")

            for key in control_dict.keys():
                config_node = ens_config[key]
                ext_config = config_node.getModelConfig()
                values = control_dict[key]
                if not len(values) == len(ext_config):
                    raise ValueError("Wrong number of values for:%s" % key)

                node = EnkfNode(config_node)
                ext_node = node.as_ext_param()
                ext_node.set_vector(values)
                node.save(fs, node_id)

        # The input should be validated before we instantiate the BatchContext
        # object, at that stage a job_queue object with multiple threads is
        # started, and things will typically be in a quite sorry state if an
        # exception occurs.
        itr = 0
        mask = BoolVector(default_value=True, initial_size=len(controls))
        sim_context = BatchContext(self.result_keys, self.ert, fs, mask, itr)

        for sim_id, (geo_id, control_dict) in enumerate(controls):
            sim_context.addSimulation(sim_id, geo_id)

        return sim_context