Пример #1
0
def test_default_logger_levels(capsys):
    """Verify that the intended usage of this logger have expected results"""

    # Scripts should start with this:
    logger = subscript.getLogger("test_levels")

    logger.debug("This DEBUG is not to be seen")
    captured = capsys.readouterr()
    assert "DEBUG" not in captured.out
    assert "DEBUG" not in captured.err

    logger.info("This INFO is not to be seen by default")
    captured = capsys.readouterr()
    assert "INFO" not in captured.out
    assert "INFO" not in captured.err

    logger.warning("This WARNING is to be seen")
    captured = capsys.readouterr()
    assert "WARNING" in captured.out
    assert "WARNING" not in captured.err

    logger.error("This ERROR should only be in stderr")
    captured = capsys.readouterr()
    assert "ERROR" not in captured.out
    assert "ERROR" in captured.err
Пример #2
0
def test_script_debug_mode(capsys):
    """Some scripts accept a --verbose option, which usually
    mean that logging should be at INFO level"""
    logger = subscript.getLogger("test_debug")
    logger.setLevel(logging.DEBUG)

    logger.info("This DEBUG is to be seen")
    captured = capsys.readouterr()
    assert "DEBUG" in captured.out
Пример #3
0
def test_subscriptlogger_name():
    """Test that the subscript logger can compute a correct name for itself"""
    assert subscript.getLogger().name == "subscript"
    assert subscript.getLogger("").name == "subscript"
    assert subscript.getLogger(
        "subscript.eclcompress").name == "subscript.eclcompress"
    assert (subscript.getLogger("subscript.eclcompress.eclcompress").name ==
            "subscript.eclcompress")
    assert (subscript.getLogger("subscript.eclcompress.eclcompress.eclcompress"
                                ).name == "subscript.eclcompress")
    assert (
        subscript.getLogger("subscript.eclcompress.eclcompress.somesubmodule"
                            ).name == "subscript.eclcompress.somesubmodule")

    assert subscript.getLogger("subscript_internal").name == "subscript"
    assert (subscript.getLogger("subscript_internal.completor").name ==
            "subscript.completor")
    assert (subscript.getLogger("subscript_internal.completor.sub").name ==
            "subscript.completor.sub")
Пример #4
0
def fmuobs(
    inputfile: str,
    ertobs: Optional[str] = None,
    yml: Optional[str] = None,
    resinsight: Optional[str] = None,
    csv: Optional[str] = None,
    verbose: bool = False,
    debug: bool = False,
    starttime: Optional[str] = None,
    includedir: bool = None,
):
    # pylint: disable=too-many-arguments
    """Alternative to main() with named arguments"""
    if verbose or debug:
        if __MAGIC_STDOUT__ in (csv, yml, ertobs):
            raise SystemExit("Don't use verbose/debug when writing to stdout")
        loglevel = logging.INFO
        if debug:
            loglevel = logging.DEBUG
        logger.setLevel(loglevel)
        getLogger("subscript.fmuobs.parsers").setLevel(loglevel)
        getLogger("subscript.fmuobs.writers").setLevel(loglevel)
        getLogger("subscript.fmuobs.util").setLevel(loglevel)

    (filetype, dframe) = autoparse_file(inputfile)

    # For ERT files, there is the problem of include-file-path. If not-found
    # include filepaths are present, the filetype is ert, but dframe is empty.
    if filetype == "ert" and pd.DataFrame.empty:
        with open(inputfile) as f_handle:
            input_str = f_handle.read()
        if not includedir:
            # Try and error for the location of include files, first in current
            # dir, then in the directory of the input file. The proper default
            # for cwd is the location of the ert config file, which is not
            # available in this parser, and must be supplied on command line.
            try:
                dframe = ertobs2df(input_str, cwd=".", starttime=starttime)
            except FileNotFoundError:
                dframe = ertobs2df(
                    input_str,
                    cwd=os.path.dirname(inputfile),
                    starttime=starttime,
                )
        else:
            dframe = ertobs2df(input_str, cwd=includedir)

    if starttime:
        dframe = compute_date_from_days(dframe)

    if not validate_internal_dframe(dframe):
        logger.error("Observation dataframe is invalid!")

    dump_results(dframe, csv, yml, resinsight, ertobs)
Пример #5
0
from multiprocessing import Process
from pathlib import Path
from typing import Optional

import matplotlib.pyplot
import numpy as np
from ecl.eclfile import EclFile  # type: ignore
from ecl.grid import EclGrid  # type: ignore
from ecl.summary import EclSum  # type: ignore

# Get rid of FutureWarning from pandas/plotting.py
from pandas.plotting import register_matplotlib_converters

import subscript

logger = subscript.getLogger(__name__)

register_matplotlib_converters()

DESCRIPTION = """
Summaryplot will plot summary vectors from your Eclipse output files.

To list summary vectors for a specific Eclipse output set, try::

  summary.x --list ECLFILE.DATA

Command line argument VECTORSDATAFILES are assumed to be Eclipse DATA-files as long
as the command line argument is an existing file. If not, it is assumed
to be a vector to plot. Thus, vectors and datafiles can be mixed.
"""
Пример #6
0
import os
import logging
import shutil
import subprocess

import pytest

from subscript.ecldiff2roff import ecldiff2roff

from subscript import getLogger

logger = getLogger("subscript.ecldiff2roff.ecldiff2roff")
logger.setLevel(logging.INFO)


@pytest.mark.parametrize(
    "datetxt, expected",
    [
        ("20000101 20010101", [("20000101", "20010101")]),
        ("2000-01-01 2001-01-01", [("20000101", "20010101")]),
        ("20000101 2001-01-01", [("20000101", "20010101")]),
        ("20000101         20010101", [("20000101", "20010101")]),
        ("", []),
        ("    ", []),
        ("\n\n", []),
        ("# a comment", []),
        ("-- a comment", []),
        (
            """

            # foo
Пример #7
0
import getpass
import json
import logging
import os
import pathlib
import platform
import shutil
import subprocess
import sys
import time

import yaml

from subscript import getLogger

logger = getLogger(__name__)

DESCRIPTION = """
Script to run a rms project from command line, which will in turn use the
'rms...' command OR will look at /prog/roxar/site. Note that not all
options valid for 'rms' should be covered.

* It should understand current RMS version in project and launch correct RMS executable
* It should be able to run test versions of RMS
* It should be able to set the correct Equinor valid PYTHONPATH.
* Company wide plugin path

Example of usage::

    runrms newreek.rms10.1.3 (if new project: warn and just start rms default)
    runrms reek.rms10.1.3  (automatically detect version from .master)
Пример #8
0
    get_observations,
    merge_rft_ertobs,
    split_wellname_reportstep,
)

# pylint: disable=unused-argument  # false positive on fixtures

try:
    # pylint: disable=unused-import
    import ert_shared  # noqa

    HAVE_ERT = True
except ImportError:
    HAVE_ERT = False

logger = getLogger("subscript.merge_rft_ertobs.merge_rft_ertobs")
logger.setLevel(logging.INFO)


@pytest.fixture(name="drogondata")
def fixture_drogondata(tmp_path):
    """Prepare a directory with Drogon testdata"""
    drogondir = Path(
        __file__).absolute().parent / "testdata_merge_rft_ertobs/drogon"
    drogondest = tmp_path / "drogondata"
    shutil.copytree(drogondir, drogondest)
    cwd = os.getcwd()
    os.chdir(drogondest)

    try:
        yield