Example #1
0
def test_non_existing_file():

    with pytest.raises(argparse.ArgumentTypeError) as err_context:
        Trajectory.load_from_file("non_existing")

    assert ("Warning: Trajectory file non_existing not found!" ==
            err_context.value.args[0])
Example #2
0
def test_update_simdata_from_rft_reek(reek_data):
    """Test data extraction from the binary Eclipse files
    for a single well point using TrajectoryPoint.update_simdata_from_rft()"""

    grid = EclGrid(ECL_BASE_REEK + ".EGRID")
    rft = EclRFTFile(ECL_BASE_REEK + ".RFT")
    rft_well_date = rft.get("OP_1", datetime.date(2000, 2, 1))

    # A trajectory point for an active cell in reek:
    point = TrajectoryPoint(462608.57, 5934210.96, 1624.38, 1624.38)
    assert point.grid_ijk is None
    assert point.pressure is None
    assert point.swat is None
    assert point.sgas is None
    assert point.soil is None
    point.set_ijk(
        grid.find_cell(point.utm_x, point.utm_y, point.true_vertical_depth))
    assert point.grid_ijk == (28, 27, 7)
    point.update_simdata_from_rft(rft_well_date)
    assert np.isclose(point.pressure, 304.37)
    assert np.isclose(point.swat, 0.151044)
    assert np.isclose(point.soil, 1 - 0.151044)
    assert np.isclose(point.sgas, 0.0)

    # Construct a Trajectory from the point
    traj = Trajectory([])
    traj.trajectory_points = [point]  # (can't initialize from list of points)
    dframe = traj.to_dataframe()
    assert {"i", "j", "k", "pressure", "soil", "sgas",
            "swat"}.issubset(set(dframe))
Example #3
0
def test_update_simdata_from_rft_norne(norne_data):
    """Similar test as the reek version, but the Norne RFT file
    does not contain saturations, and in libecl terms contains EclPLTCell
    as opposed to EclRFTCell as in Reek"""

    grid = EclGrid(ECL_BASE_NORNE + ".EGRID")
    rft = EclRFTFile(ECL_BASE_NORNE + ".RFT")
    rft_well_date = rft.get("C-3H", datetime.date(1999, 5, 4))

    # A trajectory point for an active cell in Norne, picked
    # from a line in gendata_rft_input_files/C-3H.txt
    point = TrajectoryPoint(455752.59771598293, 7321015.949386452,
                            2785.78173828125, 2785.78173828125)
    assert point.grid_ijk is None
    assert point.pressure is None
    assert point.swat is None
    assert point.sgas is None
    assert point.soil is None
    point.set_ijk(
        grid.find_cell(point.utm_x, point.utm_y, point.true_vertical_depth))
    assert point.grid_ijk == (8, 12, 20)  # Zero-indexed integers.
    point.update_simdata_from_rft(rft_well_date)
    # There is no saturation data in the Norne binary output, then these
    # should be None
    assert point.swat is None
    assert point.sgas is None
    assert point.soil is None

    # Construct a Trajectory from the point
    traj = Trajectory([])
    traj.trajectory_points = [point]  # (can't initialize from list of points)
    dframe = traj.to_dataframe()
    assert {"i", "j", "k", "pressure"}.issubset(set(dframe))
    assert "swat" not in dframe
Example #4
0
def main_entry_point(args=None):
    arg_parser = _build_parser()
    options = arg_parser.parse_args(args)
    logger.setLevel(options.log_level)

    well_names = [w_info[0] for w_info in options.well_and_time_file]

    trajectories = {
        wname: Trajectory.load_from_file(
            filepath=os.path.join(options.trajectory_path, wname + ".txt"))
        for wname in well_names
    }

    logger.info("All files loaded\nRetrieving RFT data...")

    try:
        gendata_rft.run(
            well_times=options.well_and_time_file,
            trajectories=trajectories,
            ecl_grid=options.eclbase[0],
            ecl_rft=options.eclbase[1],
            zonemap=options.zonemap,
            csvfile=options.csvfile,
            outputdirectory=options.outputdirectory,
        )
        with open("GENDATA_RFT.OK", "w") as fh:
            fh.write("GENDATA RFT completed OK")
        logger.info("Completed!")
    except ValueError as exception:
        logger.error("Failed with error message: {}".format(exception))
Example #5
0
def main_entry_point(args=None):
    arg_parser = _build_parser()
    options = arg_parser.parse_args(args)
    logger.setLevel(options.log_level)

    well_names = [w_info[0] for w_info in options.well_and_time_file]

    trajectories = {
        wname: Trajectory.load_from_file(
            filepath=os.path.join(options.trajectory_path, wname + ".txt"))
        for wname in well_names
    }

    logger.info("All files loaded\nRetrieving RFT data...")

    gendata_rft.run(
        well_times=options.well_and_time_file,
        trajectories=trajectories,
        ecl_grid=options.eclbase[0],
        ecl_rft=options.eclbase[1],
        zonemap=options.zonemap,
        csvfile=options.csvfile,
    )

    logger.info("Completed!")
Example #6
0
def test_load(initdir, fname):
    expected_utmxs = [0, 4]
    trajectory = Trajectory.load_from_file(fname)

    for expected_utmx, trajectorypoint in zip(expected_utmxs,
                                              trajectory.trajectory_points):
        assert trajectorypoint.utm_x == expected_utmx
Example #7
0
def test_load_from_line(line, expected_zone):
    point = TrajectoryPoint(*Trajectory.parse_trajectory_line(line))
    assert point.utm_x == 0
    assert point.utm_y == 1
    assert point.measured_depth == 2.2
    assert point.true_vertical_depth == 3
    assert point.zone == expected_zone
Example #8
0
def test_update_simdata_outside_grid(tmpdir):
    grid = EclGrid(ECL_BASE_REEK + ".EGRID")
    rft = EclRFTFile(ECL_BASE_REEK + ".RFT")
    rft_well_date = rft.get("OP_1", datetime.date(2000, 2, 1))

    # A point outside the grid:
    point = TrajectoryPoint(45000, 60000000, 1, 1)
    point.set_ijk(
        grid.find_cell(point.utm_x, point.utm_y, point.true_vertical_depth))
    assert point.grid_ijk is None  # There is no Exception raised by set_ijk()

    point.update_simdata_from_rft(rft_well_date)
    assert point.pressure is None  # Since we are outside the grid.

    # Construct a Trajectory from the point
    traj = Trajectory([])
    traj.trajectory_points = [point]  # (can't initialize from list of points)
    dframe = traj.to_dataframe()
    assert not set(dframe).intersection({"i", "j", "k", "pressure", "swat"})
Example #9
0
def test_update_simdata_outside_well(tmpdir):
    grid = EclGrid(ECL_BASE_REEK + ".EGRID")
    rft = EclRFTFile(ECL_BASE_REEK + ".RFT")
    rft_well_date = rft.get("OP_1", datetime.date(2000, 2, 1))

    # A point in the grid, but not related to the well
    point = TrajectoryPoint(462825.55, 5934025.52, 1623.19, 1623.19)
    point.set_ijk(
        grid.find_cell(point.utm_x, point.utm_y, point.true_vertical_depth))
    # NB: grid_ijk ints start at zero, ResInsight and ecl2df report this as (29, 29, 7)
    assert point.grid_ijk == (28, 28, 6)
    point.update_simdata_from_rft(rft_well_date)
    assert point.pressure is None
    assert point.swat is None
    assert point.sgas is None
    assert point.soil is None

    # Construct a Trajectory from the point
    traj = Trajectory([])
    traj.trajectory_points = [point]  # (can't initialize from list of points)
    dframe = traj.to_dataframe()
    assert {"i", "j", "k"}.issubset(set(dframe))
    assert not set(dframe).intersection({"pressure", "swat", "soil", "sgas"})
Example #10
0
def test_tuple_column_splitter(dframe, tuplecolumn, components):
    splitdf = Trajectory.split_tuple_column(dframe,
                                            tuplecolumn=tuplecolumn,
                                            components=components)

    assert tuplecolumn in dframe  # Ensure we have not touched the input
    assert tuplecolumn not in splitdf
    assert len(dframe) == len(splitdf)
    assert {val
            for tup in dframe[tuplecolumn] for val in tup
            } == {val
                  for tup in splitdf[components].values for val in tup}
    for comp in components:
        assert comp in splitdf
    assert len(splitdf.columns) == len(dframe.columns) - 1 + len(components)
Example #11
0
def test_dframe_trajectory(initdir, fname):
    """Test dataframe representation of a trajectory not having
    any attached Eclipse simulation results"""
    dframe = Trajectory.load_from_file(fname).to_dataframe()

    assert isinstance(dframe, pd.DataFrame)

    # Dataframe lengths should be the same as number of non-empty lines
    # in txt input:
    assert len(dframe) == len([
        line for line in open(fname).readlines()
        if line.strip() and not line.strip().startswith("--")
    ], )

    # Check that we have the input columns which defines the trajectory:
    assert {"utm_x", "utm_y", "measured_depth", "true_vertical_depth",
            "zone"}.issubset(set(dframe.columns))

    # grid_ijk is a temp column, never to be present in output
    assert "grid_ijk" not in dframe
    # and since there is no Eclipse results attached, we can't have these either:
    assert "i" not in dframe
    assert "j" not in dframe
    assert "k" not in dframe

    # Check casing for column names, ensuring consistency in this particular
    # dataframe:
    assert list(
        dframe.columns) == [colname.lower() for colname in dframe.columns]

    # pressure should not be there when there is no data for it
    # (no Eclipse simulation results attached in this particular test function)
    assert "pressure" not in dframe

    # These columns should be in place, to signify there is not data for them.
    # (Eclipse simulation results would be needed for any of these to be True)
    assert set(dframe["valid_zone"]) == {False}
    assert set(dframe["is_active"]) == {False}

    # Check dataframe sorting:
    assert (dframe.sort_values("measured_depth")["measured_depth"] ==
            dframe["measured_depth"]).all()
Example #12
0
def main_entry_point():
    arg_parser = _build_parser()
    options = arg_parser.parse_args()
    logger.setLevel(options.log_level)

    context_errors = []
    trajectories = {}
    for well_name, *_ in options.well_and_time_file:
        try:
            trajectories[well_name] = Trajectory.load_from_file(
                filepath=os.path.join(options.trajectory_path, well_name +
                                      ".txt"))
        except (IOError, ValueError) as err:
            context_errors.append(str(err))

    if context_errors:
        raise SystemExit("\n".join(context_errors))

    logger.info("All files loaded\nRetrieving RFT data...")

    try:
        gendata_rft.run(
            well_times=options.well_and_time_file,
            trajectories=trajectories,
            ecl_grid=options.eclbase[0],
            ecl_rft=options.eclbase[1],
            zonemap=options.zonemap,
            csvfile=options.csvfile,
            outputdirectory=options.outputdirectory,
        )
        with open("GENDATA_RFT.OK", "w") as fh:
            fh.write("GENDATA RFT completed OK")
        logger.info("Completed!")
    except ValueError as exception:
        logger.error(str(exception))
        sys.exit(1)
Example #13
0
def test_invalid_load_from_line(line, expected_error):
    with pytest.raises(ValueError, match=expected_error):
        TrajectoryPoint(*Trajectory.parse_trajectory_line(line))
Example #14
0
def test_non_existing_file():

    with pytest.raises(IOError,
                       match="Trajectory file non_existing not found!"):
        Trajectory.load_from_file("non_existing")
Example #15
0
def test_tuple_column_splitter_explicit():
    # Checks that None-rows are handled, and default parameters for split_tuple_column()
    dframe = Trajectory.split_tuple_column(
        pd.DataFrame(columns=["grid_ijk"], data=[[None], [(1, 2, 3)]]))
    assert len(dframe) == 2
    assert {"i", "j", "k"}.issubset(set(dframe.columns))
Example #16
0
def test_tuple_column_splitter_errors(raises, dframe, tuplecolumn, components):
    with pytest.raises(raises):
        Trajectory.split_tuple_column(dframe,
                                      tuplecolumn=tuplecolumn,
                                      components=components)
Example #17
0
def test_invalid_load_from_line(line, expected_error):
    with pytest.raises(argparse.ArgumentTypeError) as err_context:
        TrajectoryPoint(*Trajectory.parse_trajectory_line(line))
    assert expected_error in err_context.value.args[0]