Example #1
0
def test_errors(tmpdir, caplog):
    """Test in lyr parse function return correct errors"""
    lyrfile = tmpdir / "formations.lyr"
    lyrfile.write_text(
        """
foo
""",
        encoding="utf-8",
    )
    assert ecl2df.common.parse_lyrfile(lyrfile) is None
    assert "Could not parse lyr file" in caplog.text
    assert "Failed on content: foo" in caplog.text

    lyrfile = tmpdir / "formations.lyr"
    lyrfile.write_text(
        """
valid 1-2
foo 1 2 3
""",
        encoding="utf-8",
    )
    assert ecl2df.common.parse_lyrfile(lyrfile) is None
    assert "Failed on content: foo 1 2 3" in caplog.text

    lyrfile = tmpdir / "formations.lyr"
    lyrfile.write_text(
        """
foo 2-1
""",
        encoding="utf-8",
    )
    assert ecl2df.EclFiles(DATAFILE).get_zonemap(str(lyrfile)) is None
    assert "From_layer higher than to_layer" in caplog.text

    lyrfile = tmpdir / "formations.lyr"
    lyrfile.write_text(
        """
valid 1-2 #FFE5F7
foo   3- 4 #FFGGHH
""",
        encoding="utf-8",
    )
    assert ecl2df.EclFiles(DATAFILE).get_zonemap(str(lyrfile)) is None
    assert "Failed on content: foo   3- 4 #FFGGHH" in caplog.text

    lyrfile = tmpdir / "formations.lyr"
    lyrfile.write_text(
        """
valid 1-2 #FFE5F7
foo   3- 4 bluez
""",
        encoding="utf-8",
    )
    assert ecl2df.EclFiles(DATAFILE).get_zonemap(str(lyrfile)) is None
    assert "Failed on content: foo   3- 4 bluez" in caplog.text
Example #2
0
def pillars_main(args):
    """This is the command line API"""
    if args.verbose:
        logger.setLevel(logging.INFO)
    eclfiles = ecl2df.EclFiles(args.DATAFILE)
    dframe = df(
        eclfiles,
        region=args.region,
        rstdates=args.rstdates,
        soilcutoff=args.soilcutoff,
        sgascutoff=args.sgascutoff,
        swatcutoff=args.swatcutoff,
        stackdates=args.stackdates,
    )
    groupbies = []
    aggregators = {
        key: AGGREGATORS[key.split("@")[0]]
        for key in dframe if key.split("@")[0] in AGGREGATORS
    }
    if args.region and args.group:
        groupbies.append(args.region)
    if args.stackdates and args.group:
        groupbies.append("DATE")
    if groupbies:
        dframe = dframe.groupby(groupbies).agg(aggregators).reset_index()
    elif args.group:
        dframe = dframe.mean().to_frame().transpose()
    dframe["PORO"] = dframe["PORV"] / dframe["VOLUME"]
    ecl2df.common.write_dframe_stdout_file(dframe,
                                           args.output,
                                           index=False,
                                           caller_logger=logger)
Example #3
0
def test_nonexistingzones():
    """Test an Eclipse case with non-existing zonemap (i.e. no zonemap file
    in the standard location)"""
    eclfiles = ecl2df.EclFiles(REEK)
    zonemap = eclfiles.get_zonemap("foobar")
    # (we got a warning and an empty dict)
    assert not zonemap
Example #4
0
def main() -> None:
    """Executed when called from the command line.

    Acts on command line arguments, loads data, performs qc and dumps to
    CSV if requested."""
    parser = get_parser()
    args = parser.parse_args()

    if args.DATAFILE.endswith(".csv"):
        qc_frame = pd.read_csv(args.DATAFILE)
    else:
        eclfiles = ecl2df.EclFiles(args.DATAFILE)

        # Fail hard if the deck is not suitable for this tool or
        # give warnings/hints to the user:
        check_applicability(eclfiles)

        qc_frame = make_qc_gridframe(eclfiles)

        if args.output != "":
            logger.info("Exporting CSV to %s", args.output)
            reorder_dframe_for_nonnans(qc_frame).to_csv(args.output,
                                                        index=False)

    if "SWATINIT" not in qc_frame:
        print("Model did not use SWATINIT")
        return
    qc_vols = qc_volumes(qc_frame)
    print(human_report_qc_vols(qc_vols))
    qcsum = qc_vols["SWATINIT_WVOL"] + sum(
        [qc_vols[qc_flag] for qc_flag in QC_FLAGS])
    diff = qc_vols["SWAT_WVOL"] - qcsum
    if not np.isclose(diff, 0, atol=1e-6):
        print(f"Unexplained difference: {diff} Rm3")

    print()
    print(human_report_pc_scaling(qc_frame))

    if args.volplot or args.volplotfile:
        plotter.wvol_waterfall(qc_vols)
    if args.volplot:
        pyplot.show()
    if args.volplotfile:
        print(f"Dumping volume plot to {args.volplotfile}")
        pyplot.savefig(args.volplotfile)

    if (args.plotfile
            or args.plot) and args.eqlnum not in qc_frame["EQLNUM"].values:
        sys.exit(
            f"Error: EQLNUM {args.eqlnum} does not exist in grid. No plotting."
        )
    if args.plot or args.plotfile:
        plotter.plot_qc_panels(qc_frame[qc_frame["EQLNUM"] == args.eqlnum])
    if args.plot:
        pyplot.show()
    if args.plotfile:
        print(f"Dumping plot to {args.plotfile}")
        pyplot.savefig(args.plotfile)
Example #5
0
def get_yearly_summary(
    eclfile: str,
    oilvector: str = "FOPT",
    gasvector: str = "FGPT",
    gasinjvector: str = "FGIT",
) -> pd.DataFrame:
    """Obtain a yearly summary with only three production vectors from
    an Eclipse output file.

    Only cumulative vectors can be used, which will be linearly interpolated
    to 1st of January for each year, and then yearly volumes are
    calculated from the cumulatives.

    Args:
        eclfile: Path to Eclipse DATA file
        oilvector: Name of cumulative summary vector with oil production
        gasvector: Name of cumulative summary vector with gas production
        gasinjvector: Name of cumulative summary vector with gas injection

    Returns:
        pd.DataFrame. Indexed by year, with the columns OPR, GPR, GIR and GSR.

    """
    if not all([
            vec.split(":")[0].endswith("T")
            for vec in [oilvector, gasvector, gasinjvector]
    ]):
        raise ValueError("Only cumulative Eclipse vectors can be used")
    eclfiles = ecl2df.EclFiles(eclfile)
    sum_df = ecl2df.summary.df(
        eclfiles,
        column_keys=[oilvector, gasvector, gasinjvector],
        time_index="yearly")
    sum_df.rename(
        {
            oilvector: "OPT",
            gasvector: "GPT",
            gasinjvector: "GIT"
        },
        axis="columns",
        inplace=True,
    )
    sum_df = sum_df.reset_index()

    if "GIT" not in sum_df:
        sum_df["GIT"] = 0
    if "GPT" not in sum_df:
        sum_df["GPT"] = 0
    if "OPT" not in sum_df:
        sum_df["OPT"] = 0
    sum_df["YEAR"] = pd.to_datetime(sum_df["DATE"]).dt.year

    sum_df["OPR"] = sum_df["OPT"].shift(-1) - sum_df["OPT"]
    sum_df["GPR"] = sum_df["GPT"].shift(-1) - sum_df["GPT"]
    sum_df["GIR"] = sum_df["GIT"].shift(-1) - sum_df["GIT"]
    sum_df["GSR"] = sum_df["GPR"] - sum_df["GIR"]
    return sum_df.drop("DATE", axis="columns").set_index("YEAR").dropna()
    def parse_case(self, case_name):
        eclfiles = ecl2df.EclFiles(case_name)
        dataframes = {}

        dataframes['GRID'] = ecl2df.grid.df(eclfiles)
        dataframes['PVT'] = ecl2df.pvt.df(eclfiles)
        dataframes['EQUIL'] = ecl2df.equil.df(eclfiles, keywords='EQUIL')
        dataframes['RSVD'] = ecl2df.equil.df(eclfiles, keywords='RSVD')
        dataframes['RVVD'] = ecl2df.equil.df(eclfiles, keywords='RVVD')

        return eclfiles, dataframes
Example #7
0
def test_userapi():
    """Test that we can act as human API user

    Functionality should be extensively tested in other code, but this is here
    to illustrate how a user could work, and ensure that it works.

    To the user reading the source: Skip all 'assert' lines, read the rest.

    """
    eclfiles = ecl2df.EclFiles(REEK)

    compdatdf = ecl2df.compdat.df(eclfiles)
    equil = ecl2df.equil.df(eclfiles)
    faults = ecl2df.faults.df(eclfiles)
    fipreports = ecl2df.fipreports.df(eclfiles)
    grid_df = ecl2df.grid.df(eclfiles)
    grst_df = ecl2df.grid.df(eclfiles, rstdates="last")
    gruptree = ecl2df.gruptree.df(eclfiles)
    nnc = ecl2df.nnc.df(eclfiles)
    pillars = ecl2df.pillars.df(eclfiles)
    rft = ecl2df.rft.df(eclfiles)
    satfunc = ecl2df.satfunc.df(eclfiles)
    smry = ecl2df.summary.df(eclfiles, datetime=True)
    trans = ecl2df.trans.df(eclfiles)
    wcon = ecl2df.wcon.df(eclfiles)

    assert "PORV" in grid_df
    assert "SOIL" not in grid_df
    assert "SOIL" in grst_df
    assert "PORV" in grst_df

    # Make some HCPV calculations
    grst_df["OILPV"] = grst_df["SOIL"] * grst_df["PORV"]
    grst_df["HCPV"] = (1 - grst_df["SWAT"]) * grst_df["PORV"]

    hcpv_table = grst_df.groupby("FIPNUM").sum()[["OILPV", "HCPV"]]
    assert not hcpv_table.empty

    # Print the HCPV table by FIPNUM:
    print()
    print((hcpv_table / 1e6).round(2))

    assert not compdatdf.empty
    assert not equil.empty
    assert not faults.empty
    assert not fipreports.empty
    assert not gruptree.empty
    assert not nnc.empty
    assert not pillars.empty
    assert not rft.empty
    assert not satfunc.empty
    assert not smry.empty
    assert not trans.empty
    assert not wcon.empty
Example #8
0
    def _parse_case(case_name):
        eclfiles = ecl2df.EclFiles(case_name)
        dataframes = {}

        dataframes["GRID"] = ecl2df.grid.df(eclfiles)
        dataframes["PVT"] = ecl2df.pvt.df(eclfiles)
        dataframes["EQUIL"] = ecl2df.equil.df(eclfiles, keywords="EQUIL")
        dataframes["RSVD"] = ecl2df.equil.df(eclfiles, keywords="RSVD")
        dataframes["RVVD"] = ecl2df.equil.df(eclfiles, keywords="RVVD")
        dataframes["PBVD"] = ecl2df.equil.df(eclfiles, keywords="PBVD")
        dataframes["PDVD"] = ecl2df.equil.df(eclfiles, keywords="PDVD")

        return dataframes
Example #9
0
def _load_smry_dataframe_using_ecl2df(
    ens_path: str, frequency: Optional[Frequency]
) -> pd.DataFrame:

    time_index: str = "raw"
    if frequency:
        time_index = frequency.value

    print(f"## Loading data into DataFrame using ECL2DF  time_index={time_index}...")

    realidxregexp = re.compile(r"realization-(\d+)")
    globpattern = os.path.join(ens_path, "eclipse/model/*.UNSMRY")
    globbedpaths = sorted(glob.glob(globpattern))

    per_real_df_arr = []

    for smry_file in globbedpaths:
        real = None
        for path_comp in reversed(smry_file.split(os.path.sep)):
            realmatch = re.match(realidxregexp, path_comp)
            if realmatch:
                real = int(realmatch.group(1))
                break

        if real is None:
            raise ValueError(
                f"Unable to determine realization number for file: {smry_file}"
            )

        print(f"R={real}:  {smry_file}")

        eclfiles = ecl2df.EclFiles(smry_file.replace(".UNSMRY", ""))
        real_df = ecl2df.summary.df(eclfiles, time_index=time_index)
        real_df.insert(0, "REAL", real)
        real_df.index.name = "DATE"
        per_real_df_arr.append(real_df)

    df = pd.concat(per_real_df_arr, sort=False).reset_index()

    df = _make_date_column_datetime_object(df)

    # Convert float columns to float32 and real column to int32
    floatcols = df.select_dtypes("float").columns
    df[floatcols] = df[floatcols].apply(pd.to_numeric, downcast="float")
    df["REAL"] = df["REAL"].astype("int32")

    # Sort on real, then date to align with provider
    df.sort_values(by=["REAL", "DATE"], inplace=True)
    df.reset_index(drop=True, inplace=True)

    return df
Example #10
0
def test_nonstandardzones(tmpdir):
    """Test that we can read zones from a specific filename"""
    zonefile = tmpdir / "formations.lyr"
    zonefilecontent = """
-- foo
# foo
'Eiriksson'  1-10
Raude    20-30
# Difficult quote parsing above, might not run in ResInsight.
"""
    zonefile.write(zonefilecontent)
    eclfiles = ecl2df.EclFiles(DATAFILE)
    zonemap = eclfiles.get_zonemap(str(zonefile))
    assert zonemap[1] == "Eiriksson"
Example #11
0
def test_stdzoneslyr():
    """Test that we can read zones if the zonemap is in a standard location"""
    eclfiles = ecl2df.EclFiles(DATAFILE)

    zonemap = eclfiles.get_zonemap()
    assert isinstance(zonemap, dict)
    assert zonemap[3] == "UpperReek"
    assert zonemap[10] == "MidReek"
    assert zonemap[11] == "LowerReek"
    with pytest.raises(KeyError):
        assert zonemap[0]
    with pytest.raises(KeyError):
        assert zonemap["foo"]
    with pytest.raises(KeyError):
        assert zonemap[-10]
    assert len(zonemap) == 15
Example #12
0
    def _kw_from_files(case_name, kw_dict, ntequil):
        kw_dict_from_file = {}
        for key in kw_dict.keys():
            if key == "EQUIL":
                eclfiles = ecl2df.EclFiles(case_name)
                deck = eclfiles.get_ecldeck()
                fake_data = ""
                if "OIL" in deck:
                    fake_data += """
                    OIL
                    """
                if "GAS" in deck:
                    fake_data += """
                    GAS
                    """
                if "WATER" in deck:
                    fake_data += """
                    WATER
                    """
                df = ecl2df.equil.df(
                    fake_data + open(kw_dict[key], "r").read(),
                    keywords="EQUIL",
                    ntequl=ntequil,
                )
                assert "KEYWORD" in df, (
                    "Unable to read " + key + " kw from file: " + str(kw_dict[key])
                )
                kw_dict_from_file[key] = df

            elif key in ["RSVD", "RVVD", "PBVD", "PDVD"]:
                df = ecl2df.equil.df(
                    open(kw_dict[key], "r").read(), keywords=key, ntequl=ntequil
                )
                assert "KEYWORD" in df, (
                    "Unable to read " + key + " kw from file: " + str(kw_dict[key])
                )
                kw_dict_from_file[key] = df
            elif key == "PVT":
                df = ecl2df.pvt.df(eclfiles)
                assert "KEYWORD" in df, (
                    "Unable to read " + key + " kw from file: " + str(kw_dict[key])
                )
                kw_dict_from_file[key] = df
            else:
                raise KeyError("KW " + key + " not supported")

        return kw_dict_from_file
def _convert_single_smry_file(smry_filename: str, arrow_filename: str) -> None:
    """Read summary data for single realization from disk and write it out to .arrow
    file using ecl2df.
    """

    eclbase = (smry_filename.replace(".DATA",
                                     "").replace(".UNSMRY",
                                                 "").replace(".SMSPEC", ""))

    eclfiles = ecl2df.EclFiles(eclbase)
    sum_df = ecl2df.summary.df(eclfiles)

    # Slight hack here, using ecl2df protected function to gain access to conversion routine
    # pylint: disable=protected-access
    sum_table = ecl2df.summary._df2pyarrow(sum_df)

    ecl2df.summary.write_dframe_stdout_file(sum_table, arrow_filename)
Example #14
0
    def _parse_ecl_case(case_name):
        eclfiles = ecl2df.EclFiles(case_name)
        df_dict = {}
        try:
            df_dict["GRID"] = ecl2df.grid.df(eclfiles)
        except KeyError:
            print("No grid found, exiting")
            sys.exit()

        df_dict["PVT"] = ecl2df.pvt.df(eclfiles)
        df_dict["EQUIL"] = ecl2df.equil.df(eclfiles, keywords="EQUIL")
        df_dict["RSVD"] = ecl2df.equil.df(eclfiles, keywords="RSVD")
        df_dict["RVVD"] = ecl2df.equil.df(eclfiles, keywords="RVVD")
        df_dict["PBVD"] = ecl2df.equil.df(eclfiles, keywords="PBVD")
        df_dict["PDVD"] = ecl2df.equil.df(eclfiles, keywords="PDVD")

        return df_dict
Example #15
0
def test_stdzoneslyr():
    """Test that we can read zones if the zonemap is in a standard location.

    The eclfiles object defines what is the standard location for the file, while
    the actual parsing is done in ecl2df.common.parse_lyrfile() and
    converted to zonemap in common.convert_lyrlist_to_zonemap()
    """
    eclfiles = ecl2df.EclFiles(REEK)

    zonemap = eclfiles.get_zonemap()
    assert isinstance(zonemap, dict)
    assert zonemap[3] == "UpperReek"
    assert zonemap[10] == "MidReek"
    assert zonemap[11] == "LowerReek"
    with pytest.raises(KeyError):
        assert zonemap[0]
    with pytest.raises(KeyError):
        assert zonemap["foo"]
    with pytest.raises(KeyError):
        assert zonemap[-10]
    assert len(zonemap) == 15
Example #16
0
def pillars_main(args):
    """This is the command line API"""
    if args.verbose:
        logger.setLevel(logging.INFO)
    eclfiles = ecl2df.EclFiles(args.DATAFILE)
    dframe = df(
        eclfiles,
        region=args.region,
        rstdates=args.rstdates,
        soilcutoff=args.soilcutoff,
        sgascutoff=args.sgascutoff,
        swatcutoff=args.swatcutoff,
        stackdates=args.stackdates,
    )
    groupbies = []
    aggregators = {
        key: AGGREGATORS[key.split("@")[0]]
        for key in dframe
        if key.split("@")[0] in AGGREGATORS
    }
    if args.region and args.group:
        groupbies.append(args.region)
    if args.stackdates and args.group:
        groupbies.append("DATE")
    if groupbies:
        dframe = dframe.groupby(groupbies).agg(aggregators).reset_index()
    elif args.group:
        dframe = dframe.mean().to_frame().transpose()
    dframe["PORO"] = dframe["PORV"] / dframe["VOLUME"]
    if args.output == "-":
        # Ignore pipe errors when writing to stdout.
        from signal import signal, SIGPIPE, SIG_DFL

        signal(SIGPIPE, SIG_DFL)
        dframe.to_csv(sys.stdout, index=False)
    else:
        logger.info("Writing output to disk")
        dframe.to_csv(args.output, index=False)
        print("Wrote to " + args.output)
def run_reservoir_simulator(simulator, resmodel, perform_qc=True):
    """Run the given simulator (Eclipse100 or OPM-flow)
    on a dictionary representing a dynamical reservoir model

    After simulation, runs check_swatinit on the results and
    returns the dataframe with QC information.

    Will write to cwd. Caller is responsible for starting
    in a suitable directory.

    If the simulator fails, the stdout and stderr will be printed.

    Args:
        simulator (string): Path to a working reservoir simulator
            executable
        resmodel (PillarModel): A dynamical reservoir model
        perform_qc (bool): Whether a qc dataframe should be computed
            on the result.
    Returns:
        pd.DataFrame if perform_qc is True, else None
    """
    Path("FOO.DATA").write_text(str(resmodel), encoding="utf8")
    simulator_option = []
    if "runeclipse" in simulator:
        simulator_option = ["-i"]
    result = subprocess.run(  # pylint: disable=subprocess-run-check
        [simulator] + simulator_option + ["FOO.DATA"], stdout=subprocess.PIPE
    )
    if result.returncode != 0:
        if result.stdout:
            print(result.stdout.decode())
        if result.stderr:
            print(result.stderr.decode())
        raise AssertionError(f"reservoir simulator failed in {os.getcwd()}")

    if perform_qc:
        return make_qc_gridframe(ecl2df.EclFiles("FOO.DATA"))
    return None
Example #18
0
def _load_summary(datafile):
    if not datafile.is_file():
        sys.exit(f"{datafile} is not an existing file")
    eclfiles = ecl2df.EclFiles(datafile)
    ecl2df.summary.df(eclfiles)
    return ecl2df.summary.df(eclfiles)
Example #19
0
def test_nonexistingzones():
    """Test with non-existing zonemap"""
    eclfiles = ecl2df.EclFiles(DATAFILE)
    zonemap = eclfiles.get_zonemap("foobar")
    # (we got a warning and an empty dict)
    assert not zonemap