コード例 #1
0
def test_df_column_keys():
    """Test that we can slice the dataframe on columns"""
    sumdf = summary.df(EclFiles(REEK), column_keys="FOPT")
    assert set(sumdf.columns) == {"FOPT"}
    assert set(sumdf.attrs["meta"].keys()) == {"FOPT"}

    fop_cols = {
        "FOPRS",
        "FOPT",
        "FOPRH",
        "FOPTH",
        "FOPRF",
        "FOPR",
        "FOPTS",
        "FOPTF",
        "FOPP",
    }
    sumdf = summary.df(EclFiles(REEK), column_keys="FOP*")
    assert set(sumdf.columns) == fop_cols
    assert set(sumdf.attrs["meta"].keys()) == fop_cols

    sumdf = summary.df(EclFiles(REEK), column_keys=["FOP*"])
    assert set(sumdf.columns) == fop_cols
    assert set(sumdf.attrs["meta"].keys()) == fop_cols

    sumdf = summary.df(EclFiles(REEK), column_keys=["FOPR", "FOPT"])
    assert set(sumdf.columns) == {"FOPT", "FOPR"}
    assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"}

    sumdf_no_columns = summary.df(EclFiles(REEK), column_keys=["BOGUS"])
    assert sumdf_no_columns.columns.empty
    assert all(sumdf_no_columns.index == sumdf.index)
コード例 #2
0
ファイル: test_grid.py プロジェクト: miroine/ecl2df
def test_rstdates():
    eclfiles = EclFiles(DATAFILE)
    # rstfile = eclfiles.get_rstfile()

    alldates = grid.rstdates(eclfiles)
    assert len(alldates) == 4

    didx = grid.dates2rstindices(eclfiles, "all")
    assert len(didx[0]) == len(alldates)
    assert len(didx[1]) == len(alldates)
    assert isinstance(didx[0][0], int)
    assert isinstance(didx[1][0], datetime.date)
    assert didx[1][0] == alldates[0]
    assert didx[1][-1] == alldates[-1]

    first = grid.dates2rstindices(eclfiles, "first")
    assert first[1][0] == alldates[0]

    last = grid.dates2rstindices(eclfiles, "last")
    assert last[1][0] == alldates[-1]

    dates = grid.rstdates(eclfiles)
    assert isinstance(dates, list)

    # Test with missing RST file:
    eclfiles = EclFiles("BOGUS.DATA")
    with pytest.raises(IOError):
        eclfiles.get_rstfile()
コード例 #3
0
def test_df_column_keys():
    """Test that we can slice the dataframe on columns"""
    sumdf = summary.df(EclFiles(DATAFILE), column_keys="FOPT")
    assert set(sumdf.columns) == {"FOPT"}
    assert set(sumdf.attrs["meta"].keys()) == {"FOPT"}

    fop_cols = {
        "FOPRS",
        "FOPT",
        "FOPRH",
        "FOPTH",
        "FOPRF",
        "FOPR",
        "FOPTS",
        "FOPTF",
        "FOPP",
    }
    sumdf = summary.df(EclFiles(DATAFILE), column_keys="FOP*")
    assert set(sumdf.columns) == fop_cols
    assert set(sumdf.attrs["meta"].keys()) == fop_cols

    sumdf = summary.df(EclFiles(DATAFILE), column_keys=["FOP*"])
    assert set(sumdf.columns) == fop_cols
    assert set(sumdf.attrs["meta"].keys()) == fop_cols

    sumdf = summary.df(EclFiles(DATAFILE), column_keys=["FOPR", "FOPT"])
    assert set(sumdf.columns) == {"FOPT", "FOPR"}
    assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"}

    with pytest.raises(ValueError, match="No valid key"):
        summary.df(EclFiles(DATAFILE), column_keys=["BOGUS"])
コード例 #4
0
def test_datenormalization():
    """Test normalization of dates, where
    dates can be ensured to be on dategrid boundaries"""

    start = datetime.date(1997, 11, 5)
    end = datetime.date(2020, 3, 2)

    assert normalize_dates(start, end, "monthly") == (
        datetime.date(1997, 11, 1),
        datetime.date(2020, 4, 1),
    )
    assert normalize_dates(start, end, "yearly") == (
        datetime.date(1997, 1, 1),
        datetime.date(2021, 1, 1),
    )

    # Check it does not touch already aligned dates
    assert normalize_dates(datetime.date(1997, 11,
                                         1), datetime.date(2020, 4, 1),
                           "monthly") == (datetime.date(1997, 11, 1),
                                          datetime.date(2020, 4, 1))
    assert normalize_dates(datetime.date(1997, 1,
                                         1), datetime.date(2021, 1, 1),
                           "yearly") == (datetime.date(1997, 1, 1),
                                         datetime.date(2021, 1, 1))

    # Check that we normalize correctly with get_smry():
    # realization-0 here has its last summary date at 2003-01-02
    eclfiles = EclFiles(DATAFILE)
    daily = summary.df(eclfiles, column_keys="FOPT", time_index="daily")
    assert str(daily.index[-1]) == "2003-01-02"
    monthly = summary.df(eclfiles, column_keys="FOPT", time_index="monthly")
    assert str(monthly.index[-1]) == "2003-02-01"
    yearly = summary.df(eclfiles, column_keys="FOPT", time_index="yearly")
    assert str(yearly.index[-1]) == "2004-01-01"
コード例 #5
0
def test_summary2df_dates():
    """Test that we have some API possibilities with ISO dates"""
    eclfiles = EclFiles(DATAFILE)

    sumdf = summary.df(
        eclfiles,
        start_date=datetime.date(2002, 1, 2),
        end_date="2002-03-01",
        time_index="daily",
        datetime=True,
    )
    assert sumdf.index.name == "DATE"
    assert sumdf.index.dtype == "datetime64[ns]" or sumdf.index.dtype == "datetime64"

    assert len(sumdf) == 59
    assert str(sumdf.index.values[0])[0:10] == "2002-01-02"
    assert sumdf.index.values[0] == np.datetime64("2002-01-02")
    assert sumdf.index.values[-1] == np.datetime64("2002-03-01")

    sumdf = summary.df(eclfiles, time_index="last", datetime=True)
    assert len(sumdf) == 1
    assert sumdf.index.values[0] == np.datetime64("2003-01-02")

    # Leave this test for the datetime=False behaviour:
    sumdf = summary.df(eclfiles, time_index="first")
    assert len(sumdf) == 1
    assert str(sumdf.index.values[0]) == "2000-01-01"
コード例 #6
0
def test_density():
    """Test that DENSITY can be parsed from files and from strings"""
    eclfiles = EclFiles(DATAFILE)
    density_df = pvt.density_fromdeck(eclfiles.get_ecldeck())
    assert len(density_df) == 1
    assert "PVTNUM" in density_df
    assert "OILDENSITY" in density_df
    assert "WATERDENSITY" in density_df
    assert "GASDENSITY" in density_df

    dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df))
    pd.testing.assert_frame_equal(dframe_via_string, density_df)

    two_pvtnum_deck = """DENSITY
        860      999.04       1.1427 /
        800      950     1.05
        /
        """
    density_df = pvt.density_fromdeck(EclFiles.str2deck(two_pvtnum_deck))
    # (a warning will be printed that we cannot guess)
    assert len(density_df) == 1
    density_df = pvt.density_fromdeck(two_pvtnum_deck)
    assert "PVTNUM" in density_df
    assert density_df["PVTNUM"].max() == 2
    assert density_df["PVTNUM"].min() == 1
    assert "OILDENSITY" in density_df
    dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df))
    pd.testing.assert_frame_equal(dframe_via_string, density_df)

    # Test emtpy data:
    inc = pvt.df2ecl_density(pvt.df(""))
    assert "No data" in inc
    assert pvt.df(inc).empty
コード例 #7
0
def test_df2ecl_editnnc(tmpdir):
    """Test generation of EDITNNC keyword"""
    eclfiles = EclFiles(DATAFILE)
    nncdf = nnc.df(eclfiles)
    tmpdir.chdir()

    nncdf["TRANM"] = 2
    editnnc = nnc.df2ecl_editnnc(nncdf, filename="editnnc.inc")
    editnnc_fromfile = "".join(open("editnnc.inc").readlines())
    assert editnnc == editnnc_fromfile
    assert "EDITNNC" in editnnc
    assert editnnc.count("/") == len(nncdf) + 1
    assert "avg multiplier" in editnnc

    # Fails when columns are missing
    with pytest.raises((KeyError, ValueError)):
        nnc.df2ecl_editnnc(nncdf[["I1", "I2"]])

    editnnc = nnc.df2ecl_editnnc(nncdf, nocomments=True)
    assert "avg multiplier" not in editnnc

    # Test compatibility with trans module:
    trans_df = trans.df(eclfiles, addnnc=True)
    editnnc = nnc.df2ecl_editnnc(trans_df.assign(TRANM=0.3))
    assert "avg multiplier 0.3" in editnnc or "avg multiplier 0.29999" in editnnc

    print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1)))
コード例 #8
0
def test_pillars():
    """Test that we can build a dataframe of pillar statistics"""
    eclfiles = EclFiles(DATAFILE)
    pillars_df = pillars.df(eclfiles)
    assert "PILLAR" in pillars_df
    assert "VOLUME" in pillars_df
    assert "PORV" in pillars_df
    assert "PERMX" in pillars_df
    assert "X" in pillars_df
    assert "Y" in pillars_df
    assert "PORO" in pillars_df
    assert "OILVOL" not in pillars_df
    assert "FIPNUM" not in pillars_df
    assert "EQLNUM" not in pillars_df
    assert "OWC" not in pillars_df
    assert "GOC" not in pillars_df
    assert len(pillars_df) == 2560

    pillars_df = pillars.df(eclfiles, region="FIPNUM")
    assert "FIPNUM" in pillars_df
    assert len(pillars_df["FIPNUM"].unique()) == 6
    assert "OILVOL" not in pillars_df

    pillars_df = pillars.df(eclfiles, rstdates="first")
    firstdate = str(grid.dates2rstindices(eclfiles, "first")[1][0])
    assert "OILVOL@" + firstdate in pillars_df
    assert "GASVOL@" + firstdate in pillars_df
    assert "WATVOL@" + firstdate in pillars_df

    pillars_df = pillars.df(eclfiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2)
    lastdate = str(grid.dates2rstindices(eclfiles, "last")[1][0])
    assert "OWC@" + lastdate in pillars_df
    assert "GOC@" + lastdate not in pillars_df  # Because the dataset has no GAS...
コード例 #9
0
def test_ecldeck_to_satfunc_dframe():
    """Test that dataframes can be produced from a full Eclipse deck (the
    example Reek case)"""
    eclfiles = EclFiles(DATAFILE)
    satdf = satfunc.df(eclfiles.get_ecldeck())

    assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"}
    assert set(satdf["SATNUM"]) == {1}

    assert np.isclose(satdf["SW"].min(), 0.32)
    assert np.isclose(satdf["SW"].max(), 1.0)

    assert np.isclose(satdf["SG"].min(), 0.0)
    assert np.isclose(satdf["SG"].max(), 1 - 0.32)

    assert np.isclose(satdf["KRW"].min(), 0.0)
    assert np.isclose(satdf["KRW"].max(), 1.0)

    assert np.isclose(satdf["KROW"].min(), 0.0)
    assert np.isclose(satdf["KROW"].max(), 1.0)

    assert np.isclose(satdf["KROG"].min(), 0.0)
    assert np.isclose(satdf["KROG"].max(), 1.0)

    assert len(satdf) == 76
コード例 #10
0
ファイル: test_grid.py プロジェクト: lindjoha/ecl2df
def test_subvectors():
    """Test that we can ask for a few vectors only"""
    eclfiles = EclFiles(EIGHTCELLS)
    init_df = grid.init2df(eclfiles, "PORO")
    assert "PORO" in init_df
    assert "PERMX" not in init_df
    assert "PORV" not in init_df

    init_df = grid.init2df(eclfiles, "P*")
    assert "PORO" in init_df
    assert "PERMX" in init_df
    assert "PVTNUM" in init_df
    assert "SATNUM" not in init_df

    init_df = grid.init2df(eclfiles, ["P*"])
    assert "PORO" in init_df
    assert "PERMX" in init_df
    assert "PVTNUM" in init_df
    assert "SATNUM" not in init_df

    init_df = grid.init2df(eclfiles, ["P*", "*NUM"])
    assert "PORO" in init_df
    assert "PERMX" in init_df
    assert "PVTNUM" in init_df
    assert "SATNUM" in init_df
    assert "MULTZ" not in init_df
コード例 #11
0
def test_smry_meta():
    """Test obtaining metadata dictionary for summary vectors from an EclSum object"""
    meta = smry_meta(EclFiles(DATAFILE))

    assert isinstance(meta, dict)
    assert "FOPT" in meta
    assert "FOPTH" in meta
    assert meta["FOPT"]["unit"] == "SM3"
    assert meta["FOPR"]["unit"] == "SM3/DAY"
    assert meta["FOPT"]["is_total"]
    assert not meta["FOPR"]["is_total"]
    assert not meta["FOPT"]["is_rate"]
    assert meta["FOPR"]["is_rate"]
    assert not meta["FOPT"]["is_historical"]
    assert meta["FOPTH"]["is_historical"]

    assert meta["WOPR:OP_1"]["wgname"] == "OP_1"
    assert meta["WOPR:OP_1"]["keyword"] == "WOPR"
    if "wgname" in meta["FOPT"]:
        # Not enforced yet to have None fields actually included
        assert meta["FOPT"]["wgname"] is None

    # Can create dataframes like this:
    meta_df = pd.DataFrame.from_dict(meta, orient="index")
    hist_keys = meta_df[meta_df["is_historical"]].index
    assert all([key.split(":")[0].endswith("H") for key in hist_keys])
コード例 #12
0
ファイル: test_grid.py プロジェクト: lindjoha/ecl2df
def test_gridgeometry2df(mocker):
    """Test that dataframes are produced"""
    eclfiles = EclFiles(REEK)
    grid_geom = grid.gridgeometry2df(eclfiles)

    assert isinstance(grid_geom, pd.DataFrame)
    assert not grid_geom.empty

    assert "I" in grid_geom
    assert "J" in grid_geom
    assert "K" in grid_geom
    assert "X" in grid_geom
    assert "Y" in grid_geom
    assert "Z" in grid_geom
    assert "Z_MIN" in grid_geom
    assert "Z_MAX" in grid_geom
    assert "VOLUME" in grid_geom
    assert "ZONE" in grid_geom
    assert "GLOBAL_INDEX" in grid_geom

    # If at least one inactive cell, this will hold:
    assert grid_geom["GLOBAL_INDEX"].max() > len(grid_geom)

    assert (grid_geom["Z_MAX"] > grid_geom["Z_MIN"]).all()

    with pytest.raises(TypeError, match="missing 1 required positional"):
        grid.gridgeometry2df()

    with pytest.raises(AttributeError):
        # This error situation we don't really try to handle.
        grid.gridgeometry2df(None)

    with pytest.raises(ValueError, match="No EGRID file supplied"):
        mocker.patch("ecl2df.eclfiles.EclFiles.get_egridfile", return_value=None)
        grid.gridgeometry2df(eclfiles)
コード例 #13
0
ファイル: test_grid.py プロジェクト: lindjoha/ecl2df
def test_merge_initvectors():
    eclfiles = EclFiles(REEK)
    assert grid.merge_initvectors(eclfiles, pd.DataFrame(), []).empty
    foo_df = pd.DataFrame([{"FOO": 1}])
    pd.testing.assert_frame_equal(grid.merge_initvectors(eclfiles, foo_df, []), foo_df)

    with pytest.raises(ValueError, match="All of the columns"):
        grid.merge_initvectors(eclfiles, foo_df, ["NONEXISTING"])

    minimal_df = pd.DataFrame([{"I": 10, "J": 11, "K": 12}])

    with pytest.raises(KeyError):
        grid.merge_initvectors(eclfiles, minimal_df, ["NONEXISTING"])

    withporo = grid.merge_initvectors(eclfiles, minimal_df, ["PORO"])
    pd.testing.assert_frame_equal(
        withporo, minimal_df.assign(PORO=0.221848), check_dtype=False
    )

    with pytest.raises(ValueError):
        # ijknames must be length 3
        grid.merge_initvectors(
            eclfiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"]
        )
    with pytest.raises(ValueError):
        grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["I", "J"])
    with pytest.raises(ValueError, match="All of the columns"):
        grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"])
コード例 #14
0
ファイル: test_rft.py プロジェクト: dapson2real/ecl2df
def test_rft2df():
    """Test that dataframes are produced"""
    eclfiles = EclFiles(DATAFILE)
    rftdf = rft.rft2df(eclfiles)
    assert "ZONE" in rftdf
    assert not rftdf.empty
    assert not rftdf.columns.empty
コード例 #15
0
def test_reek_dataset():
    """Test Reek dataset. It contains no CPI data and should return
    an empty dataframe.
    """
    eclfiles = EclFiles(REEK)
    wellconnstatus_df = wellconnstatus.df(eclfiles)
    assert wellconnstatus_df.empty
コード例 #16
0
def test_pvt_reek():
    """Test that the Reek PVT input can be parsed individually"""

    eclfiles = EclFiles(DATAFILE)
    pvto_df = pvt.pvto_fromdeck(eclfiles.get_ecldeck())
    assert "PVTNUM" in pvto_df
    assert "PRESSURE" in pvto_df
    assert "VOLUMEFACTOR" in pvto_df
    assert "VISCOSITY" in pvto_df
    assert max(pvto_df["PVTNUM"]) == 1
    assert max(pvto_df["PRESSURE"]) == 700.1
    # Check count of undersaturated lines pr. RS:
    # (nb: double brackets in .loc to ensure dataframe is returned)
    assert len(pvto_df.set_index("RS").loc[[0]]) == 2
    assert len(pvto_df.set_index("RS").loc[[15.906]]) == 1
    assert len(pvto_df.set_index("RS").loc[[105.5]]) == 15
    assert len(pvto_df["RS"].unique()) == 20
    assert pvto_df["VOLUMEFACTOR"].max() == 2.851
    assert pvto_df["VISCOSITY"].max() == 1.0001

    dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(pvto_df))
    pd.testing.assert_frame_equal(dframe_via_string, pvto_df)

    density_df = pvt.density_fromdeck(eclfiles.get_ecldeck())
    assert "PVTNUM" in density_df
    assert "OILDENSITY" in density_df
    assert "WATERDENSITY" in density_df
    assert "GASDENSITY" in density_df
    assert len(density_df) == 1
    assert density_df["WATERDENSITY"].values[0] == 999.04
    dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df))
    pd.testing.assert_frame_equal(dframe_via_string, density_df)

    rock_df = pvt.rock_fromdeck(eclfiles.get_ecldeck())
    assert "PVTNUM" in rock_df
    assert len(rock_df) == 1
    assert "PRESSURE" in rock_df
    assert "COMPRESSIBILITY" in rock_df
    assert rock_df["PRESSURE"].values[0] == 327.3

    pvtw_df = pvt.pvtw_fromdeck(eclfiles.get_ecldeck())
    assert "PVTNUM" in pvtw_df
    assert pvtw_df["PVTNUM"].values[0] == 1
    assert len(pvtw_df) == 1
    assert "PRESSURE" in pvtw_df
    assert "VOLUMEFACTOR" in pvtw_df
    assert "COMPRESSIBILITY" in pvtw_df
    assert "VISCOSITY" in pvtw_df
    assert "VISCOSIBILITY" in pvtw_df
    assert pvtw_df["VISCOSITY"].values[0] == 0.25

    pvdg_df = pvt.pvdg_fromdeck(eclfiles.get_ecldeck())
    assert "PVTNUM" in pvdg_df
    assert "PRESSURE" in pvdg_df
    assert "VOLUMEFACTOR" in pvdg_df
    assert "VISCOSITY" in pvdg_df
    assert len(pvdg_df["PVTNUM"].unique()) == 1
    assert pvdg_df["PVTNUM"].max() == 1
    assert len(pvdg_df) == 15
コード例 #17
0
def test_fipreports2df():
    """Test parsing of Reek dataset"""
    prtfile = EclFiles(DATAFILE).get_prtfilename()
    fipreport_df = fipreports.df(prtfile)
    assert len(fipreport_df["REGION"].unique()) == 6
    assert len(fipreport_df["DATE"].unique()) == 1
    assert fipreport_df["FIPNAME"].unique()[0] == "FIPNUM"
    assert len(fipreport_df["DATATYPE"].unique()) == 5
コード例 #18
0
ファイル: test_rft.py プロジェクト: miroine/ecl2df
def test_rftrecords2df():
    eclfiles = EclFiles(DATAFILE)

    rftrecs = rft._rftrecords2df(eclfiles)
    assert len(rftrecs[rftrecs["recordname"] == "TIME"]) == len(
        rftrecs["timeindex"].unique())
    assert set(rftrecs["recordtype"].unique()) == set(["REAL", "INTE", "CHAR"])
    assert rftrecs["timeindex"].dtype == np.int
コード例 #19
0
def test_nnc2df_coords():
    """Test that we are able to add coordinates"""
    eclfiles = EclFiles(DATAFILE)
    gnncdf = nnc.df(eclfiles, coords=True)
    assert not gnncdf.empty
    assert "X" in gnncdf
    assert "Y" in gnncdf
    assert "Z" in gnncdf
コード例 #20
0
def test_nx(tmp_path):
    """Test graph generation"""
    eclfiles = EclFiles(REEK)
    network = trans.make_nx_graph(eclfiles, region="FIPNUM")
    assert network.number_of_nodes() == 6
    networkx.write_gexf(network,
                        tmp_path / "reek-fipnum-trans.gxf",
                        prettyprint=True)
    assert (tmp_path / "reek-fipnum-trans.gxf").is_file()
コード例 #21
0
def test_ecl2df_errors(tmpdir):
    """Test error handling on bogus/corrupted summary files"""
    tmpdir.chdir()
    Path("FOO.UNSMRY").write_bytes(os.urandom(100))
    Path("FOO.SMSPEC").write_bytes(os.urandom(100))
    with pytest.raises(OSError, match="Failed to create summary instance"):
        # This is how libecl reacts to bogus binary data
        ecl.summary.EclSum("FOO.UNSMRY")

    # But EclFiles should be more tolerant, as it should be possible
    # to extract other data if SMRY is corrupted
    Path("FOO.DATA").write_text("RUNSPEC")
    assert str(EclFiles("FOO").get_ecldeck()).strip() == "RUNSPEC"
    with pytest.raises(OSError):
        EclFiles("FOO").get_eclsum()

    # Getting a dataframe from bogus data should give empty data:
    assert df(EclFiles("FOO")).empty
コード例 #22
0
ファイル: test_grid.py プロジェクト: lindjoha/ecl2df
def test_wrongfile():
    """Test the EclFiles object on nonexistent files"""
    # pylint: disable=invalid-name,redefined-builtin

    # We can initalize this object with bogus:
    eclfiles = EclFiles("FOO.DATA")
    # but when we try to use it, things should fail:
    with pytest.raises(FileNotFoundError):
        grid.init2df(eclfiles)
コード例 #23
0
def wellconnstatus_main(args):
    """Entry-point for module, for command line utility"""
    logger = getLogger_ecl2csv(__name__, vars(args))
    eclfiles = EclFiles(args.DATAFILE)

    wellconnstatus_df = df(eclfiles)
    write_dframe_stdout_file(
        wellconnstatus_df, args.output, index=False, caller_logger=logger
    )
コード例 #24
0
ファイル: test_trans.py プロジェクト: anders-kiaer/ecl2df
def test_nx(tmpdir):
    """Test graph generation"""
    eclfiles = EclFiles(DATAFILE)
    network = trans.make_nx_graph(eclfiles, region="FIPNUM")
    assert network.number_of_nodes() == 6
    networkx.write_gexf(network,
                        str(tmpdir.join("reek-fipnum-trans.gxf")),
                        prettyprint=True)
    assert Path(tmpdir / "reek-fipnum-trans.gxf").is_file()
コード例 #25
0
ファイル: test_summary.py プロジェクト: miroine/ecl2df
def test_summary2df():
    """Test that dataframes are produced"""
    eclfiles = EclFiles(DATAFILE)
    sumdf = summary.smry2df(eclfiles)

    assert not sumdf.empty
    assert sumdf.index.name == "DATE"
    assert not sumdf.columns.empty
    assert "FOPT" in sumdf.columns
コード例 #26
0
ファイル: test_grid.py プロジェクト: miroine/ecl2df
def test_init2df():
    """Test that dataframe with INIT vectors can be produced"""
    eclfiles = EclFiles(DATAFILE)
    init_df = grid.init2df(eclfiles)

    assert isinstance(init_df, pd.DataFrame)
    assert not init_df.empty
    assert "PERMX" in init_df
    assert "PORO" in init_df
    assert "PORV" in init_df
コード例 #27
0
ファイル: test_trans.py プロジェクト: anders-kiaer/ecl2df
def test_grouptrans():
    """Test grouping of transmissibilities"""
    eclfiles = EclFiles(DATAFILE)
    trans_df = trans.df(eclfiles, vectors="FIPNUM", group=True, coords=True)
    assert "FIPNUMPAIR" in trans_df
    assert "FIPNUM1" in trans_df
    assert "FIPNUM2" in trans_df
    assert (trans_df["FIPNUM1"] < trans_df["FIPNUM2"]).all()
    assert len(trans_df) == 7
    assert "X" in trans_df  # (average X coord for that FIPNUM interface)
コード例 #28
0
def test_wcon2df():
    """Test that dataframes are produced"""
    eclfiles = EclFiles(DATAFILE)
    wcondf = wcon.df(eclfiles.get_ecldeck())

    assert not wcondf.empty
    assert "DATE" in wcondf  # for all data
    assert "KEYWORD" in wcondf
    for col in wcondf.columns:
        assert col == col.upper()
コード例 #29
0
ファイル: test_rft.py プロジェクト: dapson2real/ecl2df
def test_rftrecords2df():
    """Test that RFT records can be converted to dataframes"""
    eclfiles = EclFiles(DATAFILE)

    rftrecs = rft._rftrecords2df(eclfiles)
    assert len(rftrecs[rftrecs["recordname"] == "TIME"]) == len(
        rftrecs["timeindex"].unique()
    )
    assert set(rftrecs["recordtype"].unique()) == set(["REAL", "INTE", "CHAR"])
    assert rftrecs["timeindex"].dtype == np.int
コード例 #30
0
def test_satfunc_roundtrip():
    """Test that we can produce a SATNUM dataframe from the Reek case, convert
    it back to an include file, and then reinterpret it to the same"""
    eclfiles = EclFiles(DATAFILE)
    satdf = satfunc.df(eclfiles.get_ecldeck())
    inc = satfunc.df2ecl(satdf)
    df_from_inc = satfunc.df(inc)
    pd.testing.assert_frame_equal(
        satdf.sort_values(["SATNUM", "KEYWORD"]),
        df_from_inc.sort_values(["SATNUM", "KEYWORD"]),
    )