def test_sgof_satnuminferrer(tmpdir, mocker): """Test inferring of SATNUMS in SGOF strings""" sgofstr = """ SGOF 0 0 1 1 1 1 0 0 / 0 0 1 1 0.5 0.5 0.5 0.5 1 1 0 0 / 0 0 1 0 0.1 0.1 0.1 0.1 1 1 0 0 / """ tmpdir.chdir() assert inferdims.guess_dim(sgofstr, "TABDIMS", 0) == 3 sgofdf = satfunc.df(sgofstr) assert "SATNUM" in sgofdf assert len(sgofdf["SATNUM"].unique()) == 3 assert len(sgofdf) == 8 inc = satfunc.df2ecl(sgofdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(sgofdf, df_from_inc) # Write to file and try to parse it with command line: sgoffile = "__sgof_tmp.txt" Path(sgoffile).write_text(sgofstr) mocker.patch( "sys.argv", ["ecl2csv", "satfunc", "-v", sgoffile, "-o", sgoffile + ".csv"]) ecl2csv.main() parsed_sgof = pd.read_csv(sgoffile + ".csv") assert len(parsed_sgof["SATNUM"].unique()) == 3
def test_wrong_columns(): """Test some error situations""" # SWFN data given as SWOF: satnumstr = """ SWOF 0 0 0 1 1 0 / """ with pytest.raises(ValueError, match="Wrong number count for keyword SWOF"): satfunc.df(satnumstr) satnumstr = """ SWFN 0 0 0 0 1 1 0 0 / """ with pytest.raises(ValueError, match="Wrong number count for keyword SWFN"): satfunc.df(satnumstr) # The following error is parseable into a dataframe, but gives # four saturation points, this error can not be detected while parsing. satnumstr = """ SWFN 0 0 0 0 0.5 0.5 0.5 0 1 1 0 0 / """ wrongdf = satfunc.df(satnumstr) # We see the error as the saturation points are not unique: assert len(wrongdf["SW"]) == 4 assert len(wrongdf["SW"].unique()) == 3
def test_csv2ecl(tmpdir, mocker): """Test command line interface for csv to Eclipse include files""" tmpdir.chdir() tmpcsvfile = "satfunc.csv" swof_df = pd.DataFrame( columns=["KEYWORD", "SW", "KRW", "KROW", "PCOW"], data=[["SWOF", 0.0, 0.0, 1.0, 0.0], ["SWOF", 1.0, 1.0, 0.0, 0.0]], ) swof_df.to_csv(tmpcsvfile, index=False) mocker.patch("sys.argv", ["csv2ecl", "satfunc", "--output", "swof.inc", tmpcsvfile]) csv2ecl.main() pd.testing.assert_frame_equal( satfunc.df(open("swof.inc").read()).drop("SATNUM", axis="columns"), swof_df, check_like=True, ) # Test writing to stdout: result = subprocess.run( ["csv2ecl", "satfunc", "--output", "-", tmpcsvfile], stdout=subprocess.PIPE) pd.testing.assert_frame_equal( satfunc.df(result.stdout.decode()).drop("SATNUM", axis="columns"), swof_df, check_like=True, )
def test_mock_two_satnums_via_fam2_files(tmpdir, int_param, expected_file): tmpdir.chdir() PyscalFactory.create_pyscal_list(TWO_SATNUM_PYSCAL_MOCK.loc["low"], h=0.1).dump_family_2("pess.inc") PyscalFactory.create_pyscal_list(TWO_SATNUM_PYSCAL_MOCK.loc["base"], h=0.1).dump_family_2("base.inc") PyscalFactory.create_pyscal_list(TWO_SATNUM_PYSCAL_MOCK.loc["high"], h=0.1).dump_family_2("opt.inc") config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [{ "param_w": int_param, "param_g": int_param }], "family": 2, "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = Path("outfile.inc").read_text() outfile_df = satfunc.df(outfile_str) if expected_file is not None: expected_df = satfunc.df(Path(expected_file).read_text()) pd.testing.assert_frame_equal(outfile_df, expected_df) else: # Use test function from pyscal to assert that the produced file is # valid for Eclipse (not testing numerically that the interpolation # is correct) sat_table_str_ok(outfile_str)
def test_satfunc_roundtrip(): """Test that we can produce a SATNUM dataframe from the Reek case, convert it back to an include file, and then reinterpret it to the same""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( satdf.sort_values(["SATNUM", "KEYWORD"]), df_from_inc.sort_values(["SATNUM", "KEYWORD"]), )
def test_nodata(): """Test when no data is found""" swofstr = "" satdf = satfunc.df(swofstr) assert len(satdf) == 0 inc = satfunc.df2ecl_swof(satdf) assert "No data" in inc df_from_inc = satfunc.df(inc) assert df_from_inc.empty
def test_str2df(string, expected_df): """Test that we can parse a string into a DataFrame, back to string, and to DataFrame again""" satdf = satfunc.df(string) pd.testing.assert_frame_equal(satdf, expected_df) if expected_df.empty: return inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(df_from_inc, expected_df)
def test_str2df(): """Test parsing of a direct string""" swofstr = """ SWOF 0 0 1 1 1 1 0 0 / """ satdf = satfunc.df(swofstr) assert len(satdf) == 2 inc = satfunc.df2ecl_swof(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(satdf, df_from_inc) swofstr2 = """ -- RUNSPEC -- (this line is optional) TABDIMS 2 / -- PROPS -- (optional) SWOF 0 0 1 1 1 1 0 0 / 0 0 1 1 0.5 0.5 0.5 0.5 1 1 0 0 / """ satdf2 = satfunc.df(swofstr2) assert "SATNUM" in satdf assert len(satdf2["SATNUM"].unique()) == 2 assert len(satdf2) == 5 inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(satdf, df_from_inc) # Try empty/bogus data: bogusdf = satfunc.deck2df("SWRF\n 0 /\n") # (warnings should be issued) assert bogusdf.empty # Test with bogus E100 keywords: tricky = satfunc.deck2df("FOO\n\nSWOF\n 0 0 0 1/ 1 1 1 0\n/\n") assert not tricky.empty assert len(tricky["SATNUM"].unique()) == 1
def test_sof2(): """Test parsing of SOF2""" string = """ SOF2 0 1 1 0 / """ sof2_df = satfunc.df(string) assert len(sof2_df) == 2 assert "SO" in sof2_df assert "KRO" in sof2_df inc = satfunc.df2ecl(sof2_df) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(sof2_df, df_from_inc)
def test_mock(tmpdir): """Mocked pyscal-generated input files. Note that this is using pyscal both for dumping to disk and parsing from disk, and is thus not representative for how flexible the code is for reading from include files not originating in pyscal. """ tmpdir.chdir() mock_family_1() config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [{ "param_w": -0.5, "param_g": 0.5 }], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_df = satfunc.df(open("outfile.inc").read(), ntsfun=1) assert set(outfile_df["KEYWORD"].unique()) == {"SWOF", "SGOF"} assert outfile_df["SW"].sum() > 0 assert outfile_df["SG"].sum() > 0 assert outfile_df["KRW"].sum() > 0 assert outfile_df["KROW"].sum() > 0 assert outfile_df["KRG"].sum() > 0 assert outfile_df["KROG"].sum() > 0 assert outfile_df["PCOW"].sum() > 0
def test_multiple_keywords_family2(): satnumstr = """ SWFN -- Sw Krw Pcow 0 0 2 1. 1.000 0.00000e+00 / SOF3 -- So Krow Krog 0.00000e+00 0.00000e+00 0.00000e+00 0.581051658 1.000000000 1.000000000 / SGFN -- Sg Krg Pcog 0.000 0.00000 0.000 0.800 1.00000 0.000 / """ satnum_df = satfunc.df(satnumstr) assert set(satnum_df["SATNUM"]) == {1} assert set(satnum_df["KEYWORD"]) == {"SWFN", "SOF3", "SGFN"} assert len(satnum_df) == 6
def test_ecldeck_to_satfunc_dframe(): """Test that dataframes can be produced from a full Eclipse deck (the example Reek case)""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"} assert set(satdf["SATNUM"]) == {1} assert np.isclose(satdf["SW"].min(), 0.32) assert np.isclose(satdf["SW"].max(), 1.0) assert np.isclose(satdf["SG"].min(), 0.0) assert np.isclose(satdf["SG"].max(), 1 - 0.32) assert np.isclose(satdf["KRW"].min(), 0.0) assert np.isclose(satdf["KRW"].max(), 1.0) assert np.isclose(satdf["KROW"].min(), 0.0) assert np.isclose(satdf["KROW"].max(), 1.0) assert np.isclose(satdf["KROG"].min(), 0.0) assert np.isclose(satdf["KROG"].max(), 1.0) assert len(satdf) == 76
def test_slgof(tmpdir): """Test parsing of SLGOF""" tmpdir.chdir() string = """ SLGOF 0 1 1 0 1 0 0 0 / """ slgof_df = satfunc.df(string) assert len(slgof_df) == 2 assert "SL" in slgof_df assert "KRG" in slgof_df assert "KRO" in slgof_df assert "PCOG" in slgof_df inc = satfunc.df2ecl(slgof_df, filename="slgof.inc") assert os.path.exists("slgof.inc") df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(slgof_df, df_from_inc)
def test_sgwfn(): """Test parsing of SGWFN""" string = """ SGWFN 0 1 1 0 1 0 0 0 / """ sgwfn_df = satfunc.df(string) assert len(sgwfn_df) == 2 assert "SG" in sgwfn_df assert "KRG" in sgwfn_df assert "KRW" in sgwfn_df assert "PCGW" in sgwfn_df inc = satfunc.df2ecl(sgwfn_df) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( sgwfn_df.sort_values(["SATNUM", "KEYWORD"]), df_from_inc.sort_values(["SATNUM", "KEYWORD"]), )
def test_sgfn(): """Test parsing of SGFN""" string = """ SGFN 0 1 0 1 0 0 / 0 1 0 1 0.1 1 / """ sgfn_df = satfunc.df(string) assert len(sgfn_df) == 4 assert len(sgfn_df["SATNUM"].unique()) == 2 assert "SG" in sgfn_df assert "KRG" in sgfn_df assert "PCOG" in sgfn_df inc = satfunc.df2ecl(sgfn_df) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(sgfn_df, df_from_inc)
def test_defaulted_sof2_values(): """The Eclipse manual states that missing values in a SWOF/SWFN/++ record should be replaced by linearly interpolated values""" dframe = satfunc.df(""" SOF2 0 0 0.1 1* 0.9 1* 1 1 / """) np.testing.assert_allclose(dframe["KRO"], [0, 0.1, 0.9, 1])
def test_defaulted_sgwfn_values(): """The Eclipse manual states that missing values in a SWOF/SWFN/++ record should be replaced by linearly interpolated values""" dframe = satfunc.df(""" SGWFN 0 0 1 1 0.5 3* 1 1 0 0 / """) np.testing.assert_allclose(dframe["PCGW"], [1, 0.5, 0]) np.testing.assert_allclose(dframe["KRG"], [0, 0.5, 1]) np.testing.assert_allclose(dframe["KRW"], [1, 0.5, 0])
def test_defaulted_slgof_values(): """The Eclipse manual states that missing values in a SWOF/SWFN/++ record should be replaced by linearly interpolated values""" dframe = satfunc.df(""" SLGOF 0 0 1 1 0.5 3* 1 1 0 0 / """) np.testing.assert_allclose(dframe["PCOG"], [1, 0.5, 0]) np.testing.assert_allclose(dframe["KRG"], [0, 0.5, 1]) np.testing.assert_allclose(dframe["KRO"], [1, 0.5, 0]) # This will probably crash Eclipse: dframe = satfunc.df(""" SWFN 0 0 1* 1 1 0 / """) np.testing.assert_allclose(dframe["PCOW"], [np.nan, 0]) np.testing.assert_allclose(dframe["KRW"], [0, 1])
def parse_satfunc_files(filenames: List[str]) -> pd.DataFrame: """ Routine to gather scal tables (SWOF and SGOF) from ecl include files. Parameters: filenames: List with filenames to be parsed. Assumed to contain Eclipse saturation function keywords. Returns: dataframe with the tables """ return pd.concat( [satfunc.df(Path(filename).read_text()) for filename in filenames], sort=False ).set_index("SATNUM")
def test_df2ecl_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) swof_sgof = satfunc.df2ecl(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") sgof_swof = satfunc.df2ecl(satdf, keywords=["SGOF", "SWOF"]) assert sgof_swof.find("SGOF") < sgof_swof.find("SWOF") only_swof = satfunc.df2ecl(satdf, keywords=["SWOF"]) assert "SGOF" not in only_swof only_sgof = satfunc.df2ecl(satdf, keywords="SGOF") assert "SWOF" not in only_sgof
def test_satfunc2df(): """Test that dataframes are produced""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) assert not satdf.empty assert "KEYWORD" in satdf # for all data assert "SATNUM" in satdf # for all data assert "SWOF" in satdf["KEYWORD"].unique() assert "SGOF" in satdf["KEYWORD"].unique() assert "SW" in satdf assert "KRW" in satdf assert "KROW" in satdf assert "SG" in satdf assert "KROG" in satdf assert satdf["SATNUM"].unique() == [1] inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( satdf.sort_values(["SATNUM", "KEYWORD"]), df_from_inc.sort_values(["SATNUM", "KEYWORD"]), )
def tables_to_dataframe(filenames): """ Routine to gather scal tables (SWOF and SGOF) from ecl include files. Parameters: filenames (list): List with filenames (str) to be parsed. Assumed to contain ecl SCAL tables Returns: dataframe with the tables """ return pd.concat( [satfunc.df(open(filename).read()) for filename in filenames], sort=False)
def test_main(tmpdir): """Test command line interface""" tmpdir.chdir() tmpcsvfile = ".TMP-satfunc.csv" sys.argv = ["satfunc2csv", DATAFILE, "-o", tmpcsvfile] satfunc.main() assert os.path.exists(tmpcsvfile) disk_df = pd.read_csv(tmpcsvfile) assert not disk_df.empty # Write back to include file: incfile = str(tmpdir.join("relperm.inc")) sys.argv = ["csv2ecl", "satfunc", "-v", tmpcsvfile, "-o", incfile] csv2ecl.main() # Reparse the include file on disk back to dataframe # and check dataframe equality assert os.path.exists(incfile) disk_inc_df = satfunc.df(open(incfile).read()) pd.testing.assert_frame_equal( disk_df.sort_values(["SATNUM", "KEYWORD"]).reset_index(drop=True), disk_inc_df.sort_values(["SATNUM", "KEYWORD"]).reset_index(drop=True), )
def test_mock(tmpdir): """Mocked pyscal-generated input files. Note that this is using pyscal both for dumping to disk and parsing from disk, and is thus not representative for how flexible the code is for reading from include files not originating in pyscal. """ tmpdir.chdir() columns = [ "SATNUM", "Nw", "Now", "Ng", "Nog", "swl", "a", "b", "poro_ref", "perm_ref", "drho", ] dframe_pess = pd.DataFrame( columns=columns, data=[[1, 1, 1, 1, 1, 0.1, 2, -2, 0.25, 100, 150]], ) dframe_base = pd.DataFrame( columns=columns, data=[[1, 2, 2, 2, 2, 0.1, 2, -2, 0.25, 200, 150]], ) dframe_opt = pd.DataFrame( columns=columns, data=[[1, 3, 3, 3, 3, 0.1, 2, -2, 0.25, 300, 150]], ) PyscalFactory.create_pyscal_list(dframe_pess).dump_family_1("pess.inc") PyscalFactory.create_pyscal_list(dframe_base).dump_family_1("base.inc") PyscalFactory.create_pyscal_list(dframe_opt).dump_family_1("opt.inc") config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [{ "param_w": -0.5, "param_g": 0.5 }], "delta_s": 0.1, } interp_relperm.process_config(config) interp_relperm.process_config(config) outfile_df = satfunc.df(open("outfile.inc").read(), ntsfun=1) assert set(outfile_df["KEYWORD"].unique()) == {"SWOF", "SGOF"} assert outfile_df["SW"].sum() > 0 assert outfile_df["SG"].sum() > 0 assert outfile_df["KRW"].sum() > 0 assert outfile_df["KROW"].sum() > 0 assert outfile_df["KRG"].sum() > 0 assert outfile_df["KROG"].sum() > 0 assert outfile_df["PCOW"].sum() > 0
def test_mock_two_satnums(tmpdir): """Mocked pyscal-generated input files. Note that this is using pyscal both for dumping to disk and parsing from disk, and is thus not representative for how flexible the code is for reading from include files not originating in pyscal. """ # pylint: disable=no-value-for-parameter tmpdir.chdir() columns = [ "SATNUM", "Nw", "Now", "Ng", "Nog", "swl", "a", "b", "poro_ref", "perm_ref", "drho", ] dframe_pess = pd.DataFrame( columns=columns, data=[ [1, 1, 1, 1, 1, 0.1, 2, -2, 0.25, 100, 150], [1, 1, 1, 1, 1, 0.1, 2, -2, 0.25, 100, 150], ], ) dframe_base = pd.DataFrame( columns=columns, data=[ [1, 2, 2, 2, 2, 0.1, 2, -2, 0.25, 200, 150], [1, 2, 2, 2, 2, 0.1, 2, -2, 0.25, 200, 150], ], ) dframe_opt = pd.DataFrame( columns=columns, data=[ [1, 3, 3, 3, 3, 0.1, 2, -2, 0.25, 300, 150], [1, 3, 3, 3, 3, 0.1, 2, -2, 0.25, 300, 150], ], ) PyscalFactory.create_pyscal_list(dframe_pess).dump_family_1("pess.inc") PyscalFactory.create_pyscal_list(dframe_base).dump_family_1("base.inc") PyscalFactory.create_pyscal_list(dframe_opt).dump_family_1("opt.inc") config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [{ "param_w": -0.5, "param_g": 0.5 }], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() # Assert things about the comments emitted by pyscal when interpolating: # This is used as a proxy for asserting that interpolation parameters # are used for the correct satnums assert outfile_str.find("SCAL recommendation interpolation to 0.5") assert outfile_str.find("SCAL recommendation interpolation to -0.5") # SWOF comes before SGOF: assert outfile_str.find("to -0.5") < outfile_str.find("to 0.5") outfile_df = satfunc.df(outfile_str, ntsfun=2) assert set(outfile_df["KEYWORD"].unique()) == {"SWOF", "SGOF"} assert set(outfile_df["SATNUM"].unique()) == {1, 2} config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [ { "tables": [1], "param_w": -0.9, "param_g": -0.5 }, { "tables": [2], "param_w": 0.5, "param_g": 0.8 }, ], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() assert outfile_str.find("to -0.9") < outfile_str.find("to 0.5") assert outfile_str.find("to 0.5") < outfile_str.find("to -0.5") assert outfile_str.find("to 0.5") < outfile_str.find("to 0.8") config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [ # This is a user error, the latter will override the first { "param_w": -0.9, "param_g": -0.5 }, { "param_w": 0.5, "param_g": 0.8 }, ], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() assert "interpolation to -0.9" not in outfile_str assert "interpolation to 0.8" in outfile_str config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [ # Here the user intentionally overwrites the first: { "param_w": -0.9, "param_g": -0.5 }, { "tables": [], "param_w": 0.5, "param_g": 0.8 }, ], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() assert "interpolation to -0.9" not in outfile_str assert "interpolation to 0.8" in outfile_str
def test_mock_two_satnums_via_files(tmpdir): """Mocked pyscal-generated input files. Note that this is using pyscal both for dumping to disk and parsing from disk, and is thus not representative for how flexible the code is for reading from include files not originating in pyscal. """ # pylint: disable=no-value-for-parameter tmpdir.chdir() PyscalFactory.create_pyscal_list( TWO_SATNUM_PYSCAL_MOCK.loc["low"]).dump_family_1("pess.inc") PyscalFactory.create_pyscal_list( TWO_SATNUM_PYSCAL_MOCK.loc["base"]).dump_family_1("base.inc") PyscalFactory.create_pyscal_list( TWO_SATNUM_PYSCAL_MOCK.loc["high"]).dump_family_1("opt.inc") config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [{ "param_w": -0.5, "param_g": 0.5 }], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = Path("outfile.inc").read_text() # Assert things about the comments emitted by pyscal when interpolating: # This is used as a proxy for asserting that interpolation parameters # are used for the correct satnums assert outfile_str.find("SCAL recommendation interpolation to 0.5") assert outfile_str.find("SCAL recommendation interpolation to -0.5") # SWOF comes before SGOF: assert outfile_str.find("to -0.5") < outfile_str.find("to 0.5") outfile_df = satfunc.df(outfile_str, ntsfun=2) assert set(outfile_df["KEYWORD"].unique()) == {"SWOF", "SGOF"} assert set(outfile_df["SATNUM"].unique()) == {1, 2} config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [ { "tables": [1], "param_w": -0.9, "param_g": -0.5 }, { "tables": [2], "param_w": 0.5, "param_g": 0.8 }, ], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() assert outfile_str.find("to -0.9") < outfile_str.find("to 0.5") assert outfile_str.find("to 0.5") < outfile_str.find("to -0.5") assert outfile_str.find("to 0.5") < outfile_str.find("to 0.8") config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [ # This is a user error, the latter will override the first { "param_w": -0.9, "param_g": -0.5 }, { "param_w": 0.5, "param_g": 0.8 }, ], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() assert "interpolation to -0.9" not in outfile_str assert "interpolation to 0.8" in outfile_str config = { "base": ["base.inc"], "low": ["pess.inc"], "high": ["opt.inc"], "result_file": "outfile.inc", "interpolations": [ # Here the user intentionally overwrites the first: { "param_w": -0.9, "param_g": -0.5 }, { "tables": [], "param_w": 0.5, "param_g": 0.8 }, ], "delta_s": 0.1, } interp_relperm.process_config(config) outfile_str = open("outfile.inc").read() assert "interpolation to -0.9" not in outfile_str assert "interpolation to 0.8" in outfile_str