コード例 #1
0
def test_str_compdat():
    """Test compdat parsing directly on strings"""
    schstr = """
COMPDAT
 'OP1' 33 110 31 31 'OPEN' 1* 6467.31299 0.216 506642.25  0 1* 'Y' 7.18 /
-- comments.
/
"""
    deck = EclFiles.str2deck(schstr)
    compdfs = compdat.deck2dfs(deck)
    compdat_df = compdfs["COMPDAT"]
    assert compdat_df.loc[0, "SATN"] == 0
    assert not compdat_df.loc[0, "DFACT"]
    assert compdat_df.loc[0, "DIR"] == "Y"

    schstr = """
COMPDAT
 'FOO' 303 1010 031 39  /
/
"""
    compdat_df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"]
    assert len(compdat_df) == 9
    assert not compdat_df["DFACT"].values[0]
    assert not compdat_df["TRAN"].values[0]
    assert compdat_df["I"].values[0] == 303
コード例 #2
0
ファイル: check_swatinit.py プロジェクト: equinor/subscript
def check_applicability(eclfiles: ecl2df.EclFiles) -> None:
    """Check that the input is relevant for usage with check_swatinit. This
    function may raise exceptions, SystemExit or only give warnings"""

    deck = eclfiles.get_ecldeck()

    init = eclfiles.get_initfile()
    if (
        "SWATINIT" not in [initheader[0] for initheader in init.headers]
        and "SWATINIT" not in deck
    ):
        logger.warning(
            "INIT-file/deck does not have SWATINIT, this tool has limited use."
        )

    if "RPTRST" not in deck:
        logger.warning(
            "RPTRST not found in DATA-file, UNRST file is expected to be missing"
        )

    try:
        eclfiles.get_rstfile()
    except FileNotFoundError as exception:
        if "UNIFOUT" not in deck:
            sys.exit(
                "Only unified RESTARTs are supported. Add UNIFOUT to your DATA file."
            )
        logger.error(str(exception))
        sys.exit(
            "No UNRST file found. This is required to get the initial water saturation"
        )
コード例 #3
0
def pvt_main(args) -> None:
    """Entry-point for module, for command line utility for Eclipse to CSV"""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    eclfiles = EclFiles(args.DATAFILE)
    logger.info("Parsed %s", args.DATAFILE)
    if eclfiles:
        deck = eclfiles.get_ecldeck()
    if "TABDIMS" in deck:
        # Things are easier when a full deck with correct TABDIMS
        # is supplied:
        pvt_df = df(deck, keywords=args.keywords)
    else:
        # When TABDIMS is not present, the code will try to infer
        # the number of saturation functions, this is necessarily
        # more error-prone, and it needs a string as input.
        stringdeck = Path(args.DATAFILE).read_text()
        pvt_df = df(stringdeck, keywords=args.keywords)
    if "PVTNUM" in pvt_df and "KEYWORD" in pvt_df:
        pvtnums = str(len(pvt_df["PVTNUM"].unique()))
        keywords = str(pvt_df["KEYWORD"].unique())
    else:
        pvtnums = "-"
        keywords = "-"
    common.write_dframe_stdout_file(
        pvt_df,
        args.output,
        index=False,
        caller_logger=logger,
        logstr=f"Unique PVTNUMs: {pvtnums}, PVT keywords: {keywords}",
    )
コード例 #4
0
ファイル: inferdims.py プロジェクト: lindjoha/ecl2df
def inject_xxxdims_ntxxx(
    xxxdims: str,
    ntxxx_name: str,
    deck: Union[str, "opm.libopmcommon_python.Deck"],
    ntxxx_value: Optional[int] = None,
) -> "opm.libopmcommon_python.Deck":
    """Ensures TABDIMS/EQLDIMS is present in a deck.

    If ntxxx_value=None and ntxxx_name not in the deck, ntxxx_name will
    be inferred through trial-and-error parsing of the deck, and then injected
    into the deck.

    Args:
        xxxdims: TABDIMS or EQLDIMS
        ntxxx_name: NTPVT, NTEQUL or NTSFUN
        deck: A data deck. If ntxxx_name is to be
            estimated this *must* be a string and not a fully parsed deck.
        npxxx_value: Supply this if ntxxx_name is known, but not present in the
            deck, this will override any guessing. If the deck already
            contains XXXDIMS, this will be ignored.

    Returns:
        opm.io Deck object
    """
    assert xxxdims in ["TABDIMS", "EQLDIMS"]
    assert ntxxx_name in ["NTPVT", "NTEQUL", "NTSFUN"]

    if xxxdims in deck and ntxxx_value is None:
        # Then we have nothing to do, but ensure we parse a potential string to a deck
        if isinstance(deck, str):
            deck = EclFiles.str2deck(deck)
        return deck

    if xxxdims in deck and ntxxx_value is not None:
        logger.warning("Ignoring %s argument, it is already in the deck",
                       str(ntxxx_name))
        return deck

    if not isinstance(deck, str):
        # The deck must be converted to a string deck in order
        # to estimate dimensions.
        deck = str(deck)

    # Estimate if ntxxx_value is not provided:
    if ntxxx_value is None:
        ntxxx_estimate = guess_dim(deck, xxxdims, DIMS_POS[ntxxx_name])
        logger.warning("Estimated %s=%s", ntxxx_name, str(ntxxx_estimate))
    else:
        ntxxx_estimate = ntxxx_value

    augmented_strdeck = inject_dimcount(str(deck),
                                        xxxdims,
                                        DIMS_POS[ntxxx_name],
                                        ntxxx_estimate,
                                        nowarn=True)
    # Overwrite the deck object
    deck = EclFiles.str2deck(augmented_strdeck)

    return deck
コード例 #5
0
def test_comp2df():
    """Test that dataframes are produced"""
    eclfiles = EclFiles(DATAFILE)
    compdfs = compdat.deck2dfs(eclfiles.get_ecldeck())

    assert not compdfs["COMPDAT"].empty
    assert compdfs["WELSEGS"].empty  # REEK demo does not include multisegment wells
    assert compdfs["COMPSEGS"].empty
    assert len(compdfs["COMPDAT"].columns)
コード例 #6
0
def test_comp2df():
    """Test that dataframes are produced"""
    eclfiles = EclFiles(EIGHTCELLS)
    compdfs = compdat.deck2dfs(eclfiles.get_ecldeck())

    assert not compdfs["COMPDAT"].empty
    assert not compdfs["WELSEGS"].empty
    assert not compdfs["COMPSEGS"].empty
    assert not compdfs["COMPDAT"].columns.empty
コード例 #7
0
def test_applywelopen():
    schstr = """
DATES
   1 MAY 2001 /
/

COMPDAT
 'OP1' 33 110 31 31 'OPEN'  /
/
WELOPEN
 'OP1' 'SHUT' /
/

TSTEP
  1 /

COMPDAT
 'OP2' 66 110 31 31 'OPEN'  /
/

WELOPEN
 'OP1' 'OPEN' /
/

TSTEP
  2 3 /

WELOPEN
 'OP1' 'POPN' /
 'OP2' 'SHUT' /
/
"""
    df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"]
    assert df.shape[0] == 5
    assert df["OP/SH"].nunique() == 2
    assert df["DATE"].nunique() == 3

    schstr = """
DATES
   1 MAY 2001 /
/

COMPDAT
 'OP1' 33 110 31 31 'OPEN'  /
/
WELOPEN
 'OP2' 'SHUT' /
/"""
    with pytest.raises(ValueError):
        compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"]
コード例 #8
0
def wcon_main(args) -> None:
    """Read from disk and write CSV back to disk"""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    eclfiles = EclFiles(args.DATAFILE)
    if eclfiles:
        deck = eclfiles.get_ecldeck()
    wcon_df = df(deck)
    write_dframe_stdout_file(
        wcon_df,
        args.output,
        index=False,
        caller_logger=logger,
        logstr="Wrote to {}".format(args.output),
    )
コード例 #9
0
def test_unrollwelsegs():
    """Test unrolling of welsegs."""
    schstr = """
WELSEGS
  -- seg_start to seg_end (two first items in second record) is a range of
  -- 2 segments, should be automatically unrolled to 2 rows.
  'OP1' 1689 1923 1.0E-5 'ABS' 'HFA' 'HO' / comment without -- identifier
   2 3 1 1 1923.9 1689.000 0.1172 0.000015  /
/
"""
    df = compdat.deck2dfs(EclFiles.str2deck(schstr))["WELSEGS"]
    assert len(df) == 2

    df = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["WELSEGS"]
    assert len(df) == 1
コード例 #10
0
ファイル: faults.py プロジェクト: lindjoha/ecl2df
def faults_main(args) -> None:
    """Read from disk and write CSV back to disk"""
    logger = getLogger_ecl2csv(  # pylint: disable=redefined-outer-name
        __name__, vars(args))
    eclfiles = EclFiles(args.DATAFILE)
    if eclfiles:
        deck = eclfiles.get_ecldeck()
    faults_df = df(deck)
    write_dframe_stdout_file(
        faults_df,
        args.output,
        index=False,
        caller_logger=logger,
        logstr=f"Wrote to {args.output}",
    )
コード例 #11
0
def test_wsegvalv_max_default():
    """Test the WSEGVALV parser for column names and default values. NB: Default
    values are taken from other keywords in Eclipse except STATUS, which is OPEN
    by default. Combination of keywords is not tested here."""
    schstr = """
    WSEGVALV
    -- WELL    SEG         CV      AC
    WELL_A   31  0.0084252 0.00075  6* /
    /
    """
    deck = EclFiles.str2deck(schstr)
    wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"]
    pd.testing.assert_frame_equal(
        wsegvalv,
        pd.DataFrame(
            data=[
                {
                    "WELL": "WELL_A",
                    "SEGMENT_NUMBER": 31,
                    "CV": 0.0084252,
                    "AREA": 0.00075,
                    "EXTRA_LENGTH": None,
                    "PIPE_D": None,
                    "ROUGHNESS": None,
                    "PIPE_A": None,
                    "STATUS": "OPEN",
                    "MAX_A": None,
                    "DATE": None,
                }
            ]
        ),
    )
コード例 #12
0
def test_wsegsicd():
    """Test the WSEGSICD parser for column names and default values

    Proves bug 232 is fixed.
    """
    schstr = """WSEGSICD
        'WELL_A'              31    31    0.00178 0.57975861     1*   1*        0.7
        1*         1*     1         1*
                         OPEN /
            /
    """
    deck = EclFiles.str2deck(schstr)
    wsegsicd = compdat.deck2dfs(deck)["WSEGSICD"]
    pd.testing.assert_frame_equal(
        wsegsicd,
        pd.DataFrame(data=[{
            "WELL": "WELL_A",
            "SEGMENT1": 31,
            "SEGMENT2": 31,
            "STRENGTH": 0.00178,
            "LENGTH": 0.57975861,
            "DENSITY_CALI": 1000.25,
            "VISCOSITY_CALI": 1.45,
            "CRITICAL_VALUE": 0.7,
            "WIDTH_TRANS": 0.05,
            "MAX_VISC_RATIO": 5,
            "METHOD_SCALING_FACTOR": 1,
            "MAX_ABS_RATE": None,
            "STATUS": "OPEN",
            "DATE": None,
        }]),
    )
コード例 #13
0
def test_welopen(test_input, expected):
    """Test with WELOPEN present"""
    deck = EclFiles.str2deck(test_input)
    compdf = compdat.deck2dfs(deck)["COMPDAT"]

    columns_to_check = ["WELL", "I", "J", "K1", "K2", "OP/SH", "DATE"]
    assert (compdf[columns_to_check] == expected[columns_to_check]).all(axis=None)
コード例 #14
0
def get_completion_list(ECL_DATA_file_name):
    """
    Create a datafram of unrolled well completions

    Args:
    Input DATA file name

    Returns:
    Tuple:
    List of unique well names
    List of completions associated to well names
    """

    ECL_file = EclFiles(ECL_DATA_file_name)
    compdat_df = compdat.df(ECL_file)

    # Convert from ECL index
    compdat_df[["I", "J", "K1", "K2"]] = compdat_df[["I", "J", "K1", "K2"]] - 1

    # Create tuples
    compdat_df["IJK"] = compdat_df[["I", "J", "K1"]].apply(tuple, axis=1)

    well_list = compdat_df["WELL"].unique().tolist()
    completion_list = []
    for well in well_list:
        completion_list.append(
            compdat_df["IJK"].loc[compdat_df["WELL"] == well].to_list())

    return completion_list, well_list
コード例 #15
0
def pillars_main(args) -> None:
    """This is the command line API"""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    eclfiles = EclFiles(args.DATAFILE)
    dframe = df(
        eclfiles,
        region=args.region,
        rstdates=args.rstdates,
        soilcutoff=args.soilcutoff,
        sgascutoff=args.sgascutoff,
        swatcutoff=args.swatcutoff,
        stackdates=args.stackdates,
    )
    groupbies = []
    aggregators = {
        key: AGGREGATORS[key.split("@")[0]]
        for key in dframe if key.split("@")[0] in AGGREGATORS
    }
    if args.region and args.group:
        groupbies.append(args.region)
    if args.stackdates and args.group:
        groupbies.append("DATE")
    if groupbies:
        dframe = dframe.groupby(groupbies).agg(aggregators).reset_index()
    elif args.group:
        dframe = dframe.mean().to_frame().transpose()
    dframe["PORO"] = dframe["PORV"] / dframe["VOLUME"]
    common.write_dframe_stdout_file(dframe,
                                    args.output,
                                    index=False,
                                    caller_logger=logger)
コード例 #16
0
ファイル: pillars.py プロジェクト: lindjoha/ecl2df
def pillars_main(args) -> None:
    """This is the command line API"""
    logger = getLogger_ecl2csv(  # pylint: disable=redefined-outer-name
        __name__, vars(args))

    eclfiles = EclFiles(args.DATAFILE)
    dframe = df(
        eclfiles,
        region=args.region,
        rstdates=args.rstdates,
        soilcutoff=args.soilcutoff,
        sgascutoff=args.sgascutoff,
        swatcutoff=args.swatcutoff,
        stackdates=args.stackdates,
    )
    groupbies = []
    aggregators = {
        key: AGGREGATORS[key.split("@")[0]]
        for key in dframe if key.split("@")[0] in AGGREGATORS
    }
    if args.region and args.group:
        groupbies.append(args.region)
    if args.stackdates and args.group:
        groupbies.append("DATE")
    if groupbies:
        dframe = dframe.groupby(groupbies).agg(aggregators).reset_index()
    elif args.group:
        dframe = dframe.drop("PILLAR", axis=1).mean().to_frame().transpose()
    dframe["PORO"] = dframe["PORV"] / dframe["VOLUME"]
    common.write_dframe_stdout_file(dframe,
                                    args.output,
                                    index=False,
                                    caller_logger=logger)
コード例 #17
0
def test_msw_schfile2df():
    """Test that we can process individual files with AICD and ICD MSW"""
    deck = EclFiles.file2deck(SCHFILE_AICD)
    compdfs = compdat.deck2dfs(deck)
    assert not compdfs["WSEGAICD"].empty
    assert not compdfs["WSEGAICD"].columns.empty

    deck = EclFiles.file2deck(SCHFILE_ICD)
    compdfs = compdat.deck2dfs(deck)
    assert not compdfs["WSEGSICD"].empty
    assert not compdfs["WSEGSICD"].columns.empty

    deck = EclFiles.file2deck(SCHFILE_VALV)
    compdfs = compdat.deck2dfs(deck)
    assert not compdfs["WSEGVALV"].empty
    assert not compdfs["WSEGVALV"].columns.empty
コード例 #18
0
def test_tstep():
    """Test with TSTEP present"""
    schstr = """
DATES
   1 MAY 2001 /
/

COMPDAT
 'OP1' 33 110 31 31 'OPEN'  /
/

TSTEP
  1 /

COMPDAT
 'OP1' 34 111 32 32 'OPEN' /
/

TSTEP
  2 3 /

COMPDAT
  'OP1' 35 111 33 33 'SHUT' /
/
"""
    deck = EclFiles.str2deck(schstr)
    compdf = compdat.deck2dfs(deck)["COMPDAT"]
    dates = [str(x) for x in compdf["DATE"].unique()]
    assert len(dates) == 3
    assert "2001-05-01" in dates
    assert "2001-05-02" in dates
    assert "2001-05-07" in dates
コード例 #19
0
def test_wsegvalv():
    """Test the WSEGVALV parser for column names and default values"""
    schstr = """
    WSEGVALV
    -- WELL    SEG         CV      AC  Lex     Dp    ROUGH      Ap  STATUS     A_MAX
    WELL_A   31  0.0084252 0.00075  0.5  0.216   0.0005  0.0366    SHUT    0.0008 /
    /
    """
    deck = EclFiles.str2deck(schstr)
    wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"]
    pd.testing.assert_frame_equal(
        wsegvalv,
        pd.DataFrame(
            data=[
                {
                    "WELL": "WELL_A",
                    "SEGMENT_NUMBER": 31,
                    "CV": 0.0084252,
                    "AREA": 0.00075,
                    "EXTRA_LENGTH": 0.5,
                    "PIPE_D": 0.216,
                    "ROUGHNESS": 0.0005,
                    "PIPE_A": 0.0366,
                    "STATUS": "SHUT",
                    "MAX_A": 0.0008,
                    "DATE": None,
                }
            ]
        ),
    )
コード例 #20
0
def test_df():
    """Test main dataframe API, only testing that something comes out"""
    eclfiles = EclFiles(EIGHTCELLS)
    compdat_df = compdat.df(eclfiles)
    assert not compdat_df.empty
    assert "ZONE" in compdat_df
    assert "K1" in compdat_df
    assert "WELL" in compdat_df
コード例 #21
0
ファイル: gruptree.py プロジェクト: lindjoha/ecl2df
def gruptree_main(args) -> None:
    """Entry-point for module, for command line utility."""
    logger = getLogger_ecl2csv(  # pylint: disable=redefined-outer-name
        __name__, vars(args)
    )
    if not args.output and not args.prettyprint:
        print("Nothing to do. Set --output or --prettyprint")
        sys.exit(0)
    eclfiles = EclFiles(args.DATAFILE)
    dframe = df(eclfiles.get_ecldeck(), startdate=args.startdate)
    if args.prettyprint:
        if "DATE" in dframe:
            print(prettyprint(dframe))
        else:
            logger.warning("No tree data to prettyprint")
    elif args.output:
        write_dframe_stdout_file(dframe, args.output, index=False, caller_logger=logger)
コード例 #22
0
def test_unrollcompdatk1k2():
    """Test unrolling of k1-k2 ranges in COMPDAT"""
    schstr = """
COMPDAT
  -- K1 to K2 is a range of 11 layers, should be automatically
  -- unrolled to 11 rows.
  'OP1' 33 44 10 20  /
/
"""
    df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"]
    assert df["I"].unique() == 33
    assert df["J"].unique() == 44
    assert (df["K1"].values == range(10, 20 + 1)).all()
    assert (df["K2"].values == range(10, 20 + 1)).all()

    # Check that we can read withoug unrolling:
    df_noroll = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["COMPDAT"]
    assert len(df_noroll) == 1
コード例 #23
0
def faults_main(args) -> None:
    """Read from disk and write CSV back to disk"""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    eclfiles = EclFiles(args.DATAFILE)
    if eclfiles:
        deck = eclfiles.get_ecldeck()
    faults_df = df(deck)
    if faults_df.empty:
        logger.warning("Empty FAULT data, not written to disk!")
        return
    write_dframe_stdout_file(
        faults_df,
        args.output,
        index=False,
        caller_logger=logger,
        logstr="Wrote to {}".format(args.output),
    )
コード例 #24
0
def gruptree_main(args) -> None:
    """Entry-point for module, for command line utility."""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    if not args.output and not args.prettyprint:
        print("Nothing to do. Set --output or --prettyprint")
        sys.exit(0)
    eclfiles = EclFiles(args.DATAFILE)
    dframe = df(eclfiles.get_ecldeck(), startdate=args.startdate)
    if args.prettyprint:
        if "DATE" in dframe:
            print(prettyprint(dframe))
        else:
            logger.warning("No tree data to prettyprint")
    elif args.output:
        write_dframe_stdout_file(dframe,
                                 args.output,
                                 index=False,
                                 caller_logger=logger)
コード例 #25
0
def nnc_main(args):
    """Command line access point from main() or from ecl2csv via subparser"""
    if args.verbose:
        logger.setLevel(logging.INFO)
    eclfiles = EclFiles(args.DATAFILE)
    nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars)
    if nncdf.empty:
        logger.warning("Empty NNC dataframe being written to disk!")
    nncdf.to_csv(args.output, index=False)
    print("Wrote to " + args.output)
コード例 #26
0
def test_samecellperf():
    """Test that we allow multiple perforations in the same cell"""
    schstr = """
COMPDAT
  'OP1' 1 1 1 1 /
  'OP2' 1 1 1 1 /
/
"""
    df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"]
    assert len(df) == 2
コード例 #27
0
ファイル: gruptree.py プロジェクト: anders-kiaer/ecl2df
def gruptree_main(args) -> None:
    """Entry-point for module, for command line utility."""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    if not args.output and not args.prettyprint:
        print("Nothing to do. Set --output or --prettyprint")
        sys.exit(0)
    eclfiles = EclFiles(args.DATAFILE)
    dframe = df(eclfiles.get_ecldeck(), startdate=args.startdate)
    if args.prettyprint:
        if "DATE" in dframe:
            for date in dframe["DATE"].dropna().unique():
                print("Date: " + str(date.astype("M8[D]")))
                for tree in edge_dataframe2dict(dframe[dframe["DATE"] == date]):
                    print(tree_from_dict(tree))
                print("")
        else:
            logger.warning("No tree data to prettyprint")
    if dframe.empty:
        logger.error("Empty GRUPTREE dataframe, not written to disk!")
    elif args.output:
        write_dframe_stdout_file(dframe, args.output, index=False, caller_logger=logger)
コード例 #28
0
def nnc_main(args) -> None:
    """Command line access point from main() or from ecl2csv via subparser"""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    eclfiles = EclFiles(args.DATAFILE)
    nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars)
    write_dframe_stdout_file(
        nncdf,
        args.output,
        index=False,
        caller_logger=logger,
        logstr="Wrote to {}".format(args.output),
    )
    nncdf.to_csv(args.output, index=False)
コード例 #29
0
def nnc_main(args) -> None:
    """Command line access point from main() or from ecl2csv via subparser"""
    logger = getLogger_ecl2csv(  # pylint: disable=redefined-outer-name
        __name__, vars(args))
    eclfiles = EclFiles(args.DATAFILE)
    nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars)
    write_dframe_stdout_file(
        nncdf,
        args.output,
        index=False,
        caller_logger=logger,
        logstr=f"Wrote to {args.output}",
    )
    nncdf.to_csv(args.output, index=False)
コード例 #30
0
ファイル: fipreports.py プロジェクト: berland/ecl2df
def fipreports_main(args) -> None:
    """Command line API"""
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    if args.PRTFILE.endswith(".PRT"):
        prtfile = args.PRTFILE
    else:
        prtfile = EclFiles(args.PRTFILE).get_prtfilename()
    dframe = df(prtfile, args.fipname)
    write_dframe_stdout_file(dframe,
                             args.output,
                             index=False,
                             caller_logger=logger)