def df(eclfiles: EclFiles, initvectors: Optional[List[str]] = None) -> pd.DataFrame: """Main function for Python API users Supports only COMPDAT information for now. Will add a zone-name if a zonefile is found alongside Returns: pd.Dataframe with one row pr cell to well connection """ compdat_df = deck2dfs(eclfiles.get_ecldeck())["COMPDAT"] compdat_df = unrolldf(compdat_df) if initvectors: compdat_df = merge_initvectors(eclfiles, compdat_df, initvectors, ijknames=["I", "J", "K1"]) zonemap = eclfiles.get_zonemap() if zonemap: logger.info("Merging zonemap into compdat") compdat_df = merge_zones(compdat_df, zonemap) return compdat_df
def test_df_column_keys(): """Test that we can slice the dataframe on columns""" sumdf = summary.df(EclFiles(REEK), column_keys="FOPT") assert set(sumdf.columns) == {"FOPT"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT"} fop_cols = { "FOPRS", "FOPT", "FOPRH", "FOPTH", "FOPRF", "FOPR", "FOPTS", "FOPTF", "FOPP", } sumdf = summary.df(EclFiles(REEK), column_keys="FOP*") assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols sumdf = summary.df(EclFiles(REEK), column_keys=["FOP*"]) assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols sumdf = summary.df(EclFiles(REEK), column_keys=["FOPR", "FOPT"]) assert set(sumdf.columns) == {"FOPT", "FOPR"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"} sumdf_no_columns = summary.df(EclFiles(REEK), column_keys=["BOGUS"]) assert sumdf_no_columns.columns.empty assert all(sumdf_no_columns.index == sumdf.index)
def test_ecldeck_to_satfunc_dframe(): """Test that dataframes can be produced from a full Eclipse deck (the example Reek case)""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"} assert set(satdf["SATNUM"]) == {1} assert np.isclose(satdf["SW"].min(), 0.32) assert np.isclose(satdf["SW"].max(), 1.0) assert np.isclose(satdf["SG"].min(), 0.0) assert np.isclose(satdf["SG"].max(), 1 - 0.32) assert np.isclose(satdf["KRW"].min(), 0.0) assert np.isclose(satdf["KRW"].max(), 1.0) assert np.isclose(satdf["KROW"].min(), 0.0) assert np.isclose(satdf["KROW"].max(), 1.0) assert np.isclose(satdf["KROG"].min(), 0.0) assert np.isclose(satdf["KROG"].max(), 1.0) assert len(satdf) == 76
def test_df_column_keys(): """Test that we can slice the dataframe on columns""" sumdf = summary.df(EclFiles(DATAFILE), column_keys="FOPT") assert set(sumdf.columns) == {"FOPT"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT"} fop_cols = { "FOPRS", "FOPT", "FOPRH", "FOPTH", "FOPRF", "FOPR", "FOPTS", "FOPTF", "FOPP", } sumdf = summary.df(EclFiles(DATAFILE), column_keys="FOP*") assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols sumdf = summary.df(EclFiles(DATAFILE), column_keys=["FOP*"]) assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols sumdf = summary.df(EclFiles(DATAFILE), column_keys=["FOPR", "FOPT"]) assert set(sumdf.columns) == {"FOPT", "FOPR"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"} with pytest.raises(ValueError, match="No valid key"): summary.df(EclFiles(DATAFILE), column_keys=["BOGUS"])
def test_density(): """Test that DENSITY can be parsed from files and from strings""" eclfiles = EclFiles(DATAFILE) density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) assert len(density_df) == 1 assert "PVTNUM" in density_df assert "OILDENSITY" in density_df assert "WATERDENSITY" in density_df assert "GASDENSITY" in density_df dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) two_pvtnum_deck = """DENSITY 860 999.04 1.1427 / 800 950 1.05 / """ density_df = pvt.density_fromdeck(EclFiles.str2deck(two_pvtnum_deck)) # (a warning will be printed that we cannot guess) assert len(density_df) == 1 density_df = pvt.density_fromdeck(two_pvtnum_deck) assert "PVTNUM" in density_df assert density_df["PVTNUM"].max() == 2 assert density_df["PVTNUM"].min() == 1 assert "OILDENSITY" in density_df dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) # Test emtpy data: inc = pvt.df2ecl_density(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty
def test_decks(): """Test some string decks""" deckstr = """ OIL WATER GAS EQUIL 2000 200 2200 / """ deck = EclFiles.str2deck(deckstr) df = equil.deck2df(deck) assert df["OWC"].values == 2200 assert len(df) == 1 assert "IGNORE1" not in df deckstr = """ OIL WATER EQUIL 2000 200 2200 / """ deck = EclFiles.str2deck(deckstr) df = equil.deck2df(deck) assert df["OWC"].values == 2200 assert len(df) == 1 assert "IGNORE1" not in df deckstr = """ GAS WATER EQUIL 2000 200 2200 / """ deck = EclFiles.str2deck(deckstr) df = equil.deck2df(deck) assert df["GWC"].values == 2200 assert "OWC" not in df assert len(df) == 1 assert "IGNORE2" not in df deckstr = """ GAS OIL EQUIL 2000 200 2200 1 2100 3 / """ deck = EclFiles.str2deck(deckstr) df = equil.deck2df(deck) assert df["GOC"].values == 2100 assert "GWC" not in df assert "OWC" not in df assert len(df) == 1 assert "IGNORE2" not in df
def test_wcon2df(): """Test that dataframes are produced""" eclfiles = EclFiles(DATAFILE) wcondf = wcon.df(eclfiles.get_ecldeck()) assert not wcondf.empty assert "DATE" in wcondf # for all data assert "KEYWORD" in wcondf for col in wcondf.columns: assert col == col.upper()
def test_satfunc_roundtrip(): """Test that we can produce a SATNUM dataframe from the Reek case, convert it back to an include file, and then reinterpret it to the same""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( satdf.sort_values(["SATNUM", "KEYWORD"]), df_from_inc.sort_values(["SATNUM", "KEYWORD"]), )
def test_multiple_parameters(): """Test what happens when we have duplicate parameter files""" eclfiles = EclFiles(DATAFILE) parametersjson = Path(eclfiles.get_path()) / "parameters.json" parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" parameterstxt.write_text("FOO 1\nBAR 4", encoding="utf-8") parametersjson.write_text(json.dumps({"BAR": 5, "COM": 6}), encoding="utf-8") param_dict = load_all(find_parameter_files(eclfiles)) assert len(param_dict) == 3 assert param_dict["BAR"] == 5 # json has precedence over txt parametersjson.unlink() parameterstxt.unlink()
def test_faults2df(): """Test that dataframes are produced""" eclfiles = EclFiles(DATAFILE) faultsdf = faults.df(eclfiles.get_ecldeck()) assert "NAME" in faultsdf assert "I" in faultsdf assert "J" in faultsdf assert "K" in faultsdf assert "FACE" in faultsdf assert not faultsdf.empty
def test_multiple_parameters(): """Test what happens when we have duplicate parameter files""" eclfiles = EclFiles(DATAFILE) parametersjson = os.path.join(eclfiles.get_path(), "parameters.json") parameterstxt = os.path.join(eclfiles.get_path(), os.pardir, "parameters.txt") with open(parameterstxt, "w") as pfile: pfile.write("FOO 1\nBAR 4") with open(parametersjson, "w") as pfile: pfile.write(json.dumps({"BAR": 5, "COM": 6})) param_dict = load_all(find_parameter_files(eclfiles)) assert len(param_dict) == 3 assert param_dict["BAR"] == 5 # json has precedence over txt os.unlink(parametersjson) os.unlink(parameterstxt)
def test_df2ecl_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" eclfiles = EclFiles(DATAFILE) satdf = satfunc.df(eclfiles.get_ecldeck()) swof_sgof = satfunc.df2ecl(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") sgof_swof = satfunc.df2ecl(satdf, keywords=["SGOF", "SWOF"]) assert sgof_swof.find("SGOF") < sgof_swof.find("SWOF") only_swof = satfunc.df2ecl(satdf, keywords=["SWOF"]) assert "SGOF" not in only_swof only_sgof = satfunc.df2ecl(satdf, keywords="SGOF") assert "SWOF" not in only_sgof
def test_eightcells_dataset(): """Test Eightcells dataset""" eclfiles = EclFiles(EIGHTCELLS) gruptree_df = gruptree.df(eclfiles.get_ecldeck()) expected_dframe = pd.DataFrame( [ ["2000-01-01", "FIELD", "GRUPTREE", np.nan], ["2000-01-01", "OP1", "WELSPECS", "OPS"], ["2000-01-01", "OPS", "GRUPTREE", "FIELD"], ], columns=["DATE", "CHILD", "KEYWORD", "PARENT"], ) expected_dframe["DATE"] = pd.to_datetime(expected_dframe["DATE"]) pd.testing.assert_frame_equal(gruptree_df, expected_dframe, check_dtype=False)
def test_gridgeometry2df(mocker): """Test that dataframes are produced""" eclfiles = EclFiles(REEK) grid_geom = grid.gridgeometry2df(eclfiles) assert isinstance(grid_geom, pd.DataFrame) assert not grid_geom.empty assert "I" in grid_geom assert "J" in grid_geom assert "K" in grid_geom assert "X" in grid_geom assert "Y" in grid_geom assert "Z" in grid_geom assert "Z_MIN" in grid_geom assert "Z_MAX" in grid_geom assert "VOLUME" in grid_geom assert "ZONE" in grid_geom assert "GLOBAL_INDEX" in grid_geom # If at least one inactive cell, this will hold: assert grid_geom["GLOBAL_INDEX"].max() > len(grid_geom) assert (grid_geom["Z_MAX"] > grid_geom["Z_MIN"]).all() with pytest.raises(TypeError, match="missing 1 required positional"): grid.gridgeometry2df() with pytest.raises(AttributeError): # This error situation we don't really try to handle. grid.gridgeometry2df(None) with pytest.raises(ValueError, match="No EGRID file supplied"): mocker.patch("ecl2df.eclfiles.EclFiles.get_egridfile", return_value=None) grid.gridgeometry2df(eclfiles)
def test_subvectors(): """Test that we can ask for a few vectors only""" eclfiles = EclFiles(EIGHTCELLS) init_df = grid.init2df(eclfiles, "PORO") assert "PORO" in init_df assert "PERMX" not in init_df assert "PORV" not in init_df init_df = grid.init2df(eclfiles, "P*") assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df init_df = grid.init2df(eclfiles, ["P*"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df init_df = grid.init2df(eclfiles, ["P*", "*NUM"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" in init_df assert "MULTZ" not in init_df
def test_summary2df_dates(): """Test that we have some API possibilities with ISO dates""" eclfiles = EclFiles(DATAFILE) sumdf = summary.df( eclfiles, start_date=datetime.date(2002, 1, 2), end_date="2002-03-01", time_index="daily", datetime=True, ) assert sumdf.index.name == "DATE" assert sumdf.index.dtype == "datetime64[ns]" or sumdf.index.dtype == "datetime64" assert len(sumdf) == 59 assert str(sumdf.index.values[0])[0:10] == "2002-01-02" assert sumdf.index.values[0] == np.datetime64("2002-01-02") assert sumdf.index.values[-1] == np.datetime64("2002-03-01") sumdf = summary.df(eclfiles, time_index="last", datetime=True) assert len(sumdf) == 1 assert sumdf.index.values[0] == np.datetime64("2003-01-02") # Leave this test for the datetime=False behaviour: sumdf = summary.df(eclfiles, time_index="first") assert len(sumdf) == 1 assert str(sumdf.index.values[0]) == "2000-01-01"
def test_reek_dataset(): """Test Reek dataset. It contains no CPI data and should return an empty dataframe. """ eclfiles = EclFiles(REEK) wellconnstatus_df = wellconnstatus.df(eclfiles) assert wellconnstatus_df.empty
def test_pvtw(): """Test that PVTW can be parsed from a string""" deck = """PVTW 327.3 1.03 4.51E-005 0.25 0 /""" pvtw_df = pvt.pvtw_fromdeck(EclFiles.str2deck(deck)) pd.testing.assert_frame_equal( pvtw_df, pd.DataFrame( columns=[ "PRESSURE", "VOLUMEFACTOR", "COMPRESSIBILITY", "VISCOSITY", "VISCOSIBILITY", "PVTNUM", ], data=[[327.3, 1.03, 4.51e-005, 0.25, 0.0, 1]], ), check_like=True, ) deck = """PVTW 327.3 1.03 4.51E-005 0.25 0 / 300 1 0.0001 0.2 /""" pvtw_df = pvt.pvtw_fromdeck( deck) # Must give string, not deck, for NTPVT guessing assert len(pvtw_df) == 2 # Test emtpy data: inc = pvt.df2ecl_pvtw(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty
def test_pillars(): """Test that we can build a dataframe of pillar statistics""" eclfiles = EclFiles(DATAFILE) pillars_df = pillars.df(eclfiles) assert "PILLAR" in pillars_df assert "VOLUME" in pillars_df assert "PORV" in pillars_df assert "PERMX" in pillars_df assert "X" in pillars_df assert "Y" in pillars_df assert "PORO" in pillars_df assert "OILVOL" not in pillars_df assert "FIPNUM" not in pillars_df assert "EQLNUM" not in pillars_df assert "OWC" not in pillars_df assert "GOC" not in pillars_df assert len(pillars_df) == 2560 pillars_df = pillars.df(eclfiles, region="FIPNUM") assert "FIPNUM" in pillars_df assert len(pillars_df["FIPNUM"].unique()) == 6 assert "OILVOL" not in pillars_df pillars_df = pillars.df(eclfiles, rstdates="first") firstdate = str(grid.dates2rstindices(eclfiles, "first")[1][0]) assert "OILVOL@" + firstdate in pillars_df assert "GASVOL@" + firstdate in pillars_df assert "WATVOL@" + firstdate in pillars_df pillars_df = pillars.df(eclfiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2) lastdate = str(grid.dates2rstindices(eclfiles, "last")[1][0]) assert "OWC@" + lastdate in pillars_df assert "GOC@" + lastdate not in pillars_df # Because the dataset has no GAS...
def test_datenormalization(): """Test normalization of dates, where dates can be ensured to be on dategrid boundaries""" start = datetime.date(1997, 11, 5) end = datetime.date(2020, 3, 2) assert normalize_dates(start, end, "monthly") == ( datetime.date(1997, 11, 1), datetime.date(2020, 4, 1), ) assert normalize_dates(start, end, "yearly") == ( datetime.date(1997, 1, 1), datetime.date(2021, 1, 1), ) # Check it does not touch already aligned dates assert normalize_dates(datetime.date(1997, 11, 1), datetime.date(2020, 4, 1), "monthly") == (datetime.date(1997, 11, 1), datetime.date(2020, 4, 1)) assert normalize_dates(datetime.date(1997, 1, 1), datetime.date(2021, 1, 1), "yearly") == (datetime.date(1997, 1, 1), datetime.date(2021, 1, 1)) # Check that we normalize correctly with get_smry(): # realization-0 here has its last summary date at 2003-01-02 eclfiles = EclFiles(DATAFILE) daily = summary.df(eclfiles, column_keys="FOPT", time_index="daily") assert str(daily.index[-1]) == "2003-01-02" monthly = summary.df(eclfiles, column_keys="FOPT", time_index="monthly") assert str(monthly.index[-1]) == "2003-02-01" yearly = summary.df(eclfiles, column_keys="FOPT", time_index="yearly") assert str(yearly.index[-1]) == "2004-01-01"
def test_str2df(): """Test when we send in a string directly""" schstr = """ GRUPTREE 'OPWEST' 'OP' / 'OP' 'FIELD' / 'FIELD' 'AREA' / 'AREA' 'NORTHSEA' / / WELSPECS 'OP1' 'OPWEST' 41 125 1759.74 'OIL' 0.0 'STD' 'SHUT' 'YES' 0 'SEG' / / """ deck = EclFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.dropna().empty # the DATE is empty # This is only available if GRUPNET is also there assert "TERMINAL_PRESSURE" not in grupdf withstart = gruptree.gruptree2df(deck, startdate="2019-01-01") assert not withstart.dropna().empty assert len(withstart) == 5
def test_gruptree2df(): """Test that dataframes are produced""" eclfiles = EclFiles(DATAFILE) grupdf = gruptree.df(eclfiles.get_ecldeck()) assert not grupdf.empty assert len(grupdf["DATE"].unique()) == 5 assert len(grupdf["CHILD"].unique()) == 10 assert len(grupdf["PARENT"].unique()) == 3 assert set(grupdf["KEYWORD"].unique()) == set(["GRUPTREE", "WELSPECS"]) grupdfnowells = gruptree.df(eclfiles.get_ecldeck(), welspecs=False) assert len(grupdfnowells["KEYWORD"].unique()) == 1 assert grupdf["PARENT"].unique()[0] == "FIELD" assert grupdf["KEYWORD"].unique()[0] == "GRUPTREE"
def test_merge_initvectors(): eclfiles = EclFiles(REEK) assert grid.merge_initvectors(eclfiles, pd.DataFrame(), []).empty foo_df = pd.DataFrame([{"FOO": 1}]) pd.testing.assert_frame_equal(grid.merge_initvectors(eclfiles, foo_df, []), foo_df) with pytest.raises(ValueError, match="All of the columns"): grid.merge_initvectors(eclfiles, foo_df, ["NONEXISTING"]) minimal_df = pd.DataFrame([{"I": 10, "J": 11, "K": 12}]) with pytest.raises(KeyError): grid.merge_initvectors(eclfiles, minimal_df, ["NONEXISTING"]) withporo = grid.merge_initvectors(eclfiles, minimal_df, ["PORO"]) pd.testing.assert_frame_equal( withporo, minimal_df.assign(PORO=0.221848), check_dtype=False ) with pytest.raises(ValueError): # ijknames must be length 3 grid.merge_initvectors( eclfiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] ) with pytest.raises(ValueError): grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["I", "J"]) with pytest.raises(ValueError, match="All of the columns"): grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"])
def test_df2ecl_editnnc(tmpdir): """Test generation of EDITNNC keyword""" eclfiles = EclFiles(DATAFILE) nncdf = nnc.df(eclfiles) tmpdir.chdir() nncdf["TRANM"] = 2 editnnc = nnc.df2ecl_editnnc(nncdf, filename="editnnc.inc") editnnc_fromfile = "".join(open("editnnc.inc").readlines()) assert editnnc == editnnc_fromfile assert "EDITNNC" in editnnc assert editnnc.count("/") == len(nncdf) + 1 assert "avg multiplier" in editnnc # Fails when columns are missing with pytest.raises((KeyError, ValueError)): nnc.df2ecl_editnnc(nncdf[["I1", "I2"]]) editnnc = nnc.df2ecl_editnnc(nncdf, nocomments=True) assert "avg multiplier" not in editnnc # Test compatibility with trans module: trans_df = trans.df(eclfiles, addnnc=True) editnnc = nnc.df2ecl_editnnc(trans_df.assign(TRANM=0.3)) assert "avg multiplier 0.3" in editnnc or "avg multiplier 0.29999" in editnnc print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1)))
def test_smry_meta(): """Test obtaining metadata dictionary for summary vectors from an EclSum object""" meta = smry_meta(EclFiles(DATAFILE)) assert isinstance(meta, dict) assert "FOPT" in meta assert "FOPTH" in meta assert meta["FOPT"]["unit"] == "SM3" assert meta["FOPR"]["unit"] == "SM3/DAY" assert meta["FOPT"]["is_total"] assert not meta["FOPR"]["is_total"] assert not meta["FOPT"]["is_rate"] assert meta["FOPR"]["is_rate"] assert not meta["FOPT"]["is_historical"] assert meta["FOPTH"]["is_historical"] assert meta["WOPR:OP_1"]["wgname"] == "OP_1" assert meta["WOPR:OP_1"]["keyword"] == "WOPR" if "wgname" in meta["FOPT"]: # Not enforced yet to have None fields actually included assert meta["FOPT"]["wgname"] is None # Can create dataframes like this: meta_df = pd.DataFrame.from_dict(meta, orient="index") hist_keys = meta_df[meta_df["is_historical"]].index assert all([key.split(":")[0].endswith("H") for key in hist_keys])
def test_wconinje(): """Test WCONINJE parsing and column names""" wconstr = """ WCONINJE 'FOO' 0 1 / / """ deck = EclFiles.str2deck(wconstr) wconinje_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinje_df, pd.DataFrame([{ "WELL": "FOO", "TYPE": "0", "STATUS": "1", "CMODE": None, "RATE": None, "RESV": None, "BHP": 6895, "THP": None, "VFP_TABLE": 0, "VAPOIL_C": 0, "GAS_STEAM_RATIO": 0, "SURFACE_OIL_FRACTION": 0, "SURFACE_WATER_FRACTION": 0, "SURFACE_GAS_FRACTION": 0, "OIL_STEAM_RATIO": 0, "DATE": None, "KEYWORD": "WCONINJE", }]), )
def test_wconhist(): """Test WCONHIST parsing and column names""" wconstr = """ WCONHIST 'FOO' 0 1 / / """ deck = EclFiles.str2deck(wconstr) wconhist_df = wcon.df(deck) pd.testing.assert_frame_equal( wconhist_df, pd.DataFrame([{ "WELL": "FOO", "STATUS": "0", "CMODE": "1", "ORAT": 0, "WRAT": 0, "GRAT": 0, "VFP_TABLE": 0, "ALQ": 0, "THP": 0, "BHP": 0, "NGLRAT": 0, "DATE": None, "KEYWORD": "WCONHIST", }]), )
def test_tstep(): """Test that we support the TSTEP keyword""" schstr = """ DATES 1 MAY 2001 / / WCONHIST 'OP1' 1000 / / TSTEP 1 / WCONHIST 'OP1' 2000 / / TSTEP 2 3 / WCONHIST 'OP1' 3000 / / """ deck = EclFiles.str2deck(schstr) wcondf = wcon.df(deck) dates = [str(x) for x in wcondf["DATE"].unique()] assert len(dates) == 3 assert "2001-05-01" in dates assert "2001-05-02" in dates assert "2001-05-07" in dates
def test_rft2df(): """Test that dataframes are produced""" eclfiles = EclFiles(DATAFILE) rftdf = rft.rft2df(eclfiles) assert "ZONE" in rftdf assert not rftdf.empty assert not rftdf.columns.empty
def test_nnc2df_coords(): """Test that we are able to add coordinates""" eclfiles = EclFiles(DATAFILE) gnncdf = nnc.df(eclfiles, coords=True) assert not gnncdf.empty assert "X" in gnncdf assert "Y" in gnncdf assert "Z" in gnncdf