Esempio n. 1
0
    def create_result(name, keys, indicator):
        summary = EclSum('model_folder/%s.DATA' % name)
        dates = summary.dates
        results = []
        all_keys = []

        if keys is None:
            keys = ["WOPR:*"]

        for key in keys:
            key_all_wells = summary.keys(key)
            all_keys = all_keys + list(key_all_wells)

        for key in all_keys:
            results.append(list(summary.numpy_vector(key)))

        if len(results) == 0:
            return print(
                'Результаты из модели не загрузились. Файл с результатами не был создан'
            )

        result_df = pd.DataFrame(data=np.array(results).T,
                                 index=dates,
                                 columns=all_keys)
        result_df.index.name = 'Time'
        if indicator is not None:
            if os.path.exists(f'csv_folder/{indicator}') is False:
                os.mkdir(f'csv_folder/{indicator}')
            result_df.to_csv(f'csv_folder/{indicator}/%s.csv' % name)
        else:
            result_df.to_csv('csv_folder/%s.csv' % name)
        print('%s_RESULT.csv is created' % name)
Esempio n. 2
0
def _sync_eclsum_to_record(location: Path,
                           smry_keys: List[str]) -> Dict[str, NumericalRecord]:
    eclsum = EclSum(str(location))
    record_dict = {}
    for key in smry_keys:
        record_dict[key] = NumericalRecord(data=dict(
            zip(map(str, eclsum.dates), map(float, eclsum.numpy_vector(key)))))
    return record_dict
Esempio n. 3
0
def test_summary_collector():
    res_config = ResConfig("snake_oil.ert")
    ert = EnKFMain(res_config)
    summary = EclSum("refcase/SNAKE_OIL_FIELD.UNSMRY")
    data = SummaryObservationCollector.loadObservationData(ert, "default_0")

    assert (data["FOPR"].values.tolist() == summary.numpy_vector(
        "FOPRH", report_only=True).tolist())
Esempio n. 4
0
    def test_load_case(self):
        path = os.path.join(self.TESTDATA_ROOT, "local/ECLIPSE/cp_simple3/SIMPLE_SUMMARY3")
        case = EclSum( path )
        self.assertFloatEqual(case.sim_length, 545.0)

        fopr = case.numpy_vector("FOPR")
        for time_index,value in enumerate(fopr):
            self.assertEqual(fopr[time_index], value)
Esempio n. 5
0
def _load_smry_into_table(smry_filename: str) -> pa.Table:
    """
    Reads data from SMRY file into PyArrow Table.
    DATE column is stored as an Arrow timetamp with ms resolution, timestamp[ms]
    All numeric columns will be stored as 32 bit float
    Summary meta data will be attached per field/column of the table's schema under the
    'smry_meta' key
    """

    eclsum = EclSum(smry_filename, include_restart=False, lazy_load=False)

    # For now, we go via a set to prune out duplicate entries being returned by EclSumKeyWordVector,
    # see: https://github.com/equinor/ecl/issues/816#issuecomment-865881283
    column_names: List[str] = list(
        set(EclSumKeyWordVector(eclsum, add_keywords=True)))

    # Exclude CPI columns from export
    org_col_count = len(column_names)
    column_names = [
        colname for colname in column_names if not _is_cpi_column(colname)
    ]
    if len(column_names) != org_col_count:
        logger.info(
            f"Excluding {org_col_count - len(column_names)} CPI columns from export"
        )

    # Fetch the dates as a numpy array with ms resolution
    np_dates_ms = eclsum.numpy_dates

    smry_meta_dict = _create_smry_meta_dict(eclsum, column_names)

    # Datatypes to use for DATE column and all the numeric columns
    dt_timestamp_ms = pa.timestamp("ms")
    dt_float32 = pa.float32()

    # Build schema for the table
    field_list: List[pa.Field] = []
    field_list.append(pa.field("DATE", dt_timestamp_ms))
    for colname in column_names:
        field_metadata = {b"smry_meta": json.dumps(smry_meta_dict[colname])}
        field_list.append(
            pa.field(colname, dt_float32, metadata=field_metadata))

    schema = pa.schema(field_list)

    # Now extract all the summary vectors one by one
    # We do this through EclSum.numpy_vector() instead of EclSum.pandas_frame() since
    # the latter throws an exception if the SMRY data has timestamps beyond 2262,
    # see: https://github.com/equinor/ecl/issues/802
    column_arrays = [np_dates_ms]

    for colname in column_names:
        colvector = eclsum.numpy_vector(colname)
        column_arrays.append(colvector)

    table = pa.table(column_arrays, schema=schema)

    return table
Esempio n. 6
0
def _extract_well_connection_status(filename: Path) -> pd.DataFrame:
    # pylint: disable=too-many-locals
    """Exctracts well connection status history for each compdat connection that
    is included in the summary data on the form CPI:WELL,I,J,K.

    From the CPI time series it is possible to extract the status of the connection
    because it is 0 when the connection is SHUT and >0 when the connection is open.

    The output from this function is one row for every time a connection changes
    status. The earliest date for any connection will be OPEN, i.e a cell can not
    be SHUT before it has been OPEN. This means that any cells that are always SHUT
    will not be included in the export.
    """

    eclsum = EclSum(str(filename), include_restart=False, lazy_load=False)
    column_names: Set[str] = set(EclSumKeyWordVector(eclsum,
                                                     add_keywords=True))
    np_dates_ms = eclsum.numpy_dates

    cpi_columns = [
        col for col in column_names
        if re.match("^CPI:[A-Z0-9_-]{1,8}:[0-9]+,[0-9]+,[0-9]+$", col)
    ]
    df = pd.DataFrame(columns=["DATE", "WELL", "I", "J", "K", "OP/SH"])

    for col in cpi_columns:
        colsplit = col.split(":")
        well = colsplit[1]
        i, j, k = colsplit[2].split(",")

        vector = eclsum.numpy_vector(col)

        status_changes = _get_status_changes(np_dates_ms, vector)
        for date, status in status_changes:
            df.loc[df.shape[0]] = [date, well, i, j, k, status]

    return df
Esempio n. 7
0
model_data_file_name = "spe1.DATA"

file_name = path_to_sim_dir + model_data_file_name
summary = EclSum(file_name)
dates = summary.dates

shape = len(dates)
result_df = pd.DataFrame({"test": list(range(shape))})

nedeed_keys = [
    "WOPR:*", "WWPR:*", "WLPR:*", "WGPR:*", "WWIR:*", "WGOR:*", "WBHP:*",
    "WOPT:*", "WWPT:*", "WLPT:*", "WGPT:*", "WWIT:*", "FOPT", "FWPT", "FLPT",
    "FGPT", "FWIT"
]

#  all_keys = summary.keys("*")
#  print all_keys
for i in nedeed_keys:
    keys_by_wells = summary.keys(i)
    for j in keys_by_wells:
        this_parameter_values = summary.numpy_vector(j)
        one_parameter_df = pd.DataFrame({j: this_parameter_values})
        result_df = result_df.join(one_parameter_df)

time_parameter_column = pd.DataFrame({"time": dates})
result_df = result_df.join(time_parameter_column)
result_df = result_df.set_index("time")
del result_df['test']

result_df.to_csv("sim_result.csv")
Esempio n. 8
0
def writeDiff(filename, vector1, vector2):
    with open(filename, "w") as f:
        for index in range(len(vector1)):
            node1 = vector1[index]
            node2 = vector2[index]

            diff = node1 - node2
            f.write("%f\n" % diff)


if __name__ == "__main__":
    ecl_sum = EclSum("SNAKE_OIL_FIELD")

    report_step = 199
    writeDiff(
        "snake_oil_opr_diff_%d.txt" % report_step,
        ecl_sum.numpy_vector("WOPR:OP1"),
        ecl_sum.numpy_vector("WOPR:OP2"),
    )
    writeDiff(
        "snake_oil_wpr_diff_%d.txt" % report_step,
        ecl_sum.numpy_vector("WWPR:OP1"),
        ecl_sum.numpy_vector("WWPR:OP2"),
    )
    writeDiff(
        "snake_oil_gpr_diff_%d.txt" % report_step,
        ecl_sum.numpy_vector("WGPR:OP1"),
        ecl_sum.numpy_vector("WGPR:OP2"),
    )